Commit d72a0d36 authored by Geoff Simmons's avatar Geoff Simmons

WIP: initial, limited implementation of TLS onload.

This uses haproxy for TLS connections to IngressBackends, and the
via feature of the klarlack implementation of Varnish. See:

https://github.com/varnishcache/varnish-cache/pull/3128

Adds the spec.tls object to the BackendConfig CRD, which configures
TLS onload for a backend.

Limitations: currently only verify:false and the maxConn settings
are implemented. Specification of CA certificates and the stick
table configuration for haproxy are not yet implemented. Currently
TLS onload may be only specified for one backend (no more than one
BackendConfig).

Adds the CLI option -varnishImpl to the controller. TLS onload is
only supported if this option is set to "klarlack". Otherwise, the
presence of the tls object in a BackendConfig leads to a SyncFatalError,
with a message that it's only supported for klarlack, and the
BackendConfig is not synced.

If the backend Service specified for TLS onload has type ExternalName,
then 3 server instances are configured for the haproxy backend. This
value is currently hard-wired, and may be made configurable in a future
iteration. For any other Service type, there are as many haproxy server
instances as there are Endpoints (Pods) in the k8s cluster.

If maxConn is not specified in the BackendConfig, it defaults to
2000 (the haproxy default).
parent 1bb78faa
......@@ -120,6 +120,30 @@ spec:
rampup:
type: string
pattern: '^\d+(\.\d+)?(ms|[smhdwy])$'
tls:
type: object
properties:
verify:
type: boolean
authority:
type: string
caSecret:
type: string
minLength: 1
caCrt:
type: string
minLength: 1
caIssuer:
type: string
minLength: 1
maxConn:
type: integer
minimum: 1
maximum: 2147483647
stickTableSize:
type: integer
minimum: 1
maximum: 2147483647
status:
acceptedNames:
kind: BackendConfig
......
......@@ -56,6 +56,9 @@ spec:
{{- if .Values.vikingController.namespace }}
- -namespace={{ .Values.vikingController.namespace }}
{{- end }}
{{- if .Values.vikingController.varnishImpl }}
- -varnishImpl={{ .Values.vikingController.varnishImpl }}
{{- end }}
{{- if .Values.vikingController.extraArgs }}
{{ toYaml .Values.vikingController.extraArgs | nindent 12 }}
{{- end }}
......
......@@ -15,6 +15,11 @@ vikingController:
## Only listen for resources in this namespace (default all)
# namespace:
## Set this value to klarlack to enable features that are only
## supported by the klarlack implementation of Varnish (default any
## implementation).
# varnishImpl:
# labels to add to the pod container metadata
podLabels: {}
# key: value
......
......@@ -89,7 +89,11 @@ var (
"re-queue delay when the controller does not have all of the\n"+
"information required for a necessary cluster change\n"+
"must be > 0s")
devModeF = flag.Bool("devmode", false, "enable development mode")
devModeF = flag.Bool("devmode", false, "enable development mode")
varnishImplF = flag.String("varnishImpl", "",
"set to 'klarlack' to enable features only implemented by\n"+
"the klarlack image for Varnish Ingress")
logFormat = logrus.TextFormatter{
DisableColors: true,
FullTimestamp: true,
......@@ -232,9 +236,10 @@ func main() {
informers.WithTweakListOptions(ingressTLSSecrets))
ingController, err := controller.NewIngressController(log,
*ingressClassF, *namespaceF, *devModeF, kubeClient, vController,
hController, informerFactory, vcrInformerFactory,
vsecrInformerFactory, tsecrInformerFactory, *incomplRetryDelayF)
*ingressClassF, *namespaceF, *devModeF, *varnishImplF,
kubeClient, vController, hController, informerFactory,
vcrInformerFactory, vsecrInformerFactory, tsecrInformerFactory,
*incomplRetryDelayF)
if err != nil {
log.Fatalf("Could not initialize controller: %v", err)
os.Exit(-1)
......
......@@ -87,7 +87,8 @@ deploy-controller-helm:
@helm install viking-controller $(mkdir)/../charts/viking-controller \
--values values-controller.yaml --namespace kube-system \
--set vikingController.image.repository=$(CONTROLLER_IMAGE) \
--set vikingController.image.tag=$(CONTROLLER_TAG)
--set vikingController.image.tag=$(CONTROLLER_TAG) \
--set vikingController.varnishImpl=$(VARNISH)
deploy-controller-kubectl:
@kubectl apply -f serviceaccount-controller.yaml
......@@ -95,7 +96,11 @@ deploy-controller-kubectl:
@kubectl apply -f varnishcfg-crd.yaml
@kubectl apply -f backendcfg-crd.yaml
@kubectl apply -f templatecfg-crd.yaml
ifeq ($(VARNISH),klarlack)
@kubectl apply -f controller_klarlack.yaml
else
@kubectl apply -f controller.yaml
endif
deploy-controller:
......@@ -119,7 +124,11 @@ deploy-service-kubectl:
@kubectl apply -f serviceaccount-varnish.yaml
@kubectl apply -f rbac-varnish.yaml
@kubectl apply -f adm-secret.yaml
ifeq ($(VARNISH),klarlack)
@kubectl apply -f varnish_klarlack.yaml
else
@kubectl apply -f varnish.yaml
endif
@kubectl apply -f admin-svc.yaml
@kubectl apply -f service.yaml
......@@ -146,7 +155,11 @@ undeploy-service-kubectl:
@kubectl delete -f serviceaccount-varnish.yaml
@kubectl delete -f service.yaml
@kubectl delete -f admin-svc.yaml
ifeq ($(VARNISH),klarlack)
@kubectl delete -f varnish_klarlack.yaml
else
@kubectl delete -f varnish.yaml
endif
@kubectl delete -f adm-secret.yaml
@echo Waiting for viking-service Pods to be deleted
@kubectl wait pod --timeout=$(WAIT_TIMEOUT) \
......@@ -170,7 +183,11 @@ undeploy-controller-helm:
-l app.kubernetes.io/name=viking-controller --for=delete
undeploy-controller-kubectl:
ifeq ($(VARNISH),klarlack)
@kubectl delete -f controller_klarlack.yaml
else
@kubectl delete -f controller.yaml
endif
@kubectl delete -f templatecfg-crd.yaml
@kubectl delete -f backendcfg-crd.yaml
@kubectl delete -f varnishcfg-crd.yaml
......
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: backendconfigs.ingress.varnish-cache.org
spec:
group: ingress.varnish-cache.org
names:
kind: BackendConfig
listKind: BackendConfigList
plural: backendconfigs
singular: backendconfig
shortNames:
- becfg
scope: Namespaced
version: v1alpha1
versions:
- name: v1alpha1
served: true
storage: true
validation:
openAPIV3Schema:
required:
- spec
properties:
spec:
required:
- services
properties:
services:
type: array
minItems: 1
items:
type: string
minLength: 1
host-header:
type: string
minLength: 1
connect-timeout:
type: string
pattern: '^\d+(\.\d+)?(ms|[smhdwy])$'
first-byte-timeout:
type: string
pattern: '^\d+(\.\d+)?(ms|[smhdwy])$'
between-bytes-timeout:
type: string
pattern: '^\d+(\.\d+)?(ms|[smhdwy])$'
dnsRetryDelay:
type: string
pattern: '^\d+(\.\d+)?(ms|[smhdwy])$'
domainUsageTimeout:
type: string
pattern: '^\d+(\.\d+)?(ms|[smhdwy])$'
firstLookupTimeout:
type: string
pattern: '^\d+(\.\d+)?(ms|[smhdwy])$'
resolverIdleTimeout:
type: string
pattern: '^\d+(\.\d+)?(ms|[smhdwy])$'
resolverTimeout:
type: string
pattern: '^\d+(\.\d+)?(ms|[smhdwy])$'
proxy-header:
type: integer
minimum: 1
maximum: 2
max-connections:
type: integer
minimum: 1
maxDNSQueries:
type: integer
minimum: 0
maximum: 65535
followDNSRedirects:
type: boolean
probe:
type: object
properties:
url:
type: string
pattern: ^/
request:
type: array
minItems: 1
items:
type: string
expected-response:
type: integer
minimum: 100
maximum: 599
timeout:
type: string
pattern: '^\d+(\.\d+)?(ms|[smhdwy])$'
interval:
type: string
pattern: '^\d+(\.\d+)?(ms|[smhdwy])$'
initial:
type: integer
minimum: 0
window:
type: integer
minimum: 0
maximum: 64
threshold:
type: integer
minimum: 0
maximum: 64
director:
type: object
properties:
type:
enum:
- round-robin
- random
- shard
type: string
warmup:
type: integer
minimum: 0
maximum: 100
rampup:
type: string
pattern: '^\d+(\.\d+)?(ms|[smhdwy])$'
status:
acceptedNames:
kind: BackendConfig
listKind: BackendConfigList
plural: backendconfigs
singular: backendconfig
shortNames:
- becfg
storedVersions:
- v1alphav1
conditions: []
../charts/viking-controller/crds/backendcfg-crd.yaml
\ No newline at end of file
apiVersion: apps/v1
kind: Deployment
metadata:
name: varnish-ingress-controller
namespace: kube-system
spec:
replicas: 1
selector:
matchLabels:
app: varnish-ingress-controller
template:
metadata:
labels:
app: varnish-ingress-controller
spec:
serviceAccountName: varnish-ingress-controller
containers:
- image: varnish-ingress/controller
imagePullPolicy: IfNotPresent
name: varnish-ingress-controller
ports:
- name: http
containerPort: 8080
volumeMounts:
- name: run
mountPath: "/run"
livenessProbe:
exec:
command:
- /usr/bin/pgrep
- -P
- "0"
- k8s-ingress
readinessProbe:
exec:
command:
- /usr/bin/test
- -e
- /run/controller-ready
args:
- -readyfile=/run/controller-ready
- -varnishImpl=klarlack
volumes:
- name: run
emptyDir:
medium: "Memory"
apiVersion: apps/v1
kind: Deployment
metadata:
name: varnish
spec:
replicas: 2
selector:
matchLabels:
app: varnish-ingress
template:
metadata:
labels:
app: varnish-ingress
spec:
serviceAccountName: varnish-ingress
securityContext:
# group varnish in the varnish and haproxy containers
# The varnish and haproxy users belong to this group.
fsGroup: 998
containers:
- image: varnish-ingress/klarlack
imagePullPolicy: IfNotPresent
name: varnish-ingress
ports:
- name: http
containerPort: 80
- name: k8s
containerPort: 8080
volumeMounts:
- name: adm-secret
mountPath: "/var/run/varnish"
readOnly: true
- name: varnish-home
mountPath: "/var/run/varnish-home"
- name: offload
mountPath: "/var/run/offload"
env:
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
livenessProbe:
exec:
command:
- /usr/bin/pgrep
- -P
- "0"
- varnishd
readinessProbe:
httpGet:
path: /ready
port: k8s
args:
- -n
- /var/run/varnish-home
- image: varnish-ingress/haproxy
imagePullPolicy: IfNotPresent
name: varnish-ingress-offloader
ports:
- name: tls
containerPort: 443
- name: k8s
containerPort: 8443
volumeMounts:
- name: tls-cert
mountPath: "/etc/ssl/private"
- name: offload
mountPath: "/var/run/offload"
env:
- name: SECRET_DATAPLANEAPI
valueFrom:
secretKeyRef:
name: adm-secret
key: dataplaneapi
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
livenessProbe:
exec:
command:
- /usr/bin/pgrep
- -P
- "0"
- haproxy
readinessProbe:
httpGet:
path: /healthz
port: k8s
volumes:
- name: adm-secret
secret:
secretName: adm-secret
items:
- key: admin
path: _.secret
- name: tls-cert
emptyDir: {}
- name: varnish-home
emptyDir:
medium: "Memory"
- name: offload
emptyDir: {}
# Copyright (c) 2021 UPLEX Nils Goroll Systemoptimierung
# All rights reserved
#
# Author: Geoffrey Simmons <geoffrey.simmons@uplex.de>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
# GNU make is required.
mkpath := $(abspath $(lastword $(MAKEFILE_LIST)))
mkdir := $(dir $(mkpath))
CHARTDIR=$(mkdir)/../../../charts
TESTDIR=$(mkdir)/../../../test
ifneq ($(VARNISH),klarlack)
all deploy deploy-helm deploy-kubectl verify wait uninstall-kubectl \
uninstall-helm undeploy-kubectl undeploy-helm:
@echo TLS onload only supported for the klarlack implementation
else
all: deploy
deploy-helm:
@helm install viking-ingress-tls-onload $(CHARTDIR)/viking-test-app \
--values values.yaml
deploy-kubectl:
@kubectl apply -f cafe.yaml
@kubectl apply -f ext-svcs.yaml
@kubectl apply -f cafe-ingress.yaml
@kubectl apply -f backend-cfg.yaml
# TESTOPTS are passed to varnishtest, e.g.: make TESTOPTS=-v verify
verify:
$(mkdir)/verify.sh
wait:
$(TESTDIR)/wait.sh app=varnish-ingress
uninstall-kubectl:
@kubectl delete -f backend-cfg.yaml
@kubectl delete -f cafe-ingress.yaml
@kubectl delete -f ext-svcs.yaml
@kubectl delete -f cafe.yaml
uninstall-helm:
@helm uninstall viking-ingress-tls-onload
undeploy-kubectl: uninstall-kubectl wait
undeploy-helm: uninstall-helm wait
endif
ifeq ($(DEPLOY),kubectl)
deploy: deploy-kubectl
undeploy: undeploy-kubectl
else
deploy: deploy-helm
undeploy: undeploy-helm
endif
.PHONY: all $(MAKECMDGOALS)
# Sample backend configurations
apiVersion: "ingress.varnish-cache.org/v1alpha1"
kind: BackendConfig
metadata:
name: cafe-onload-cfg
spec:
services:
- coffee-external-svc
tls:
verify: false
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: cafe-ingress-varnish
annotations:
kubernetes.io/ingress.class: "varnish"
spec:
rules:
- host: cafe.example.com
http:
paths:
- path: /tea
backend:
serviceName: tea-svc
servicePort: 80
- path: /coffee
backend:
serviceName: coffee-external-svc
servicePort: 4443
# looks like -*- vcl -*-
varnishtest "cafe example with TLS onload for a backend"
# Expectations for the https-echo test backend
client c1 -connect "${localhost} ${localport}" {
txreq -url /coffee/foo/bar -hdr "Host: cafe.example.com"
rxresp
expect resp.status == 200
expect resp.http.X-Host ~ "^coffee-[a-z0-9]+-[a-z0-9]+$"
expect resp.http.X-URI == "/coffee/foo/bar"
expect resp.body == "GET /coffee/foo/bar HTTP/1.1"
} -run
apiVersion: apps/v1
kind: Deployment
metadata:
name: coffee
spec:
replicas: 2
selector:
matchLabels:
app: coffee
example: onload
template:
metadata:
labels:
app: coffee
example: onload
spec:
containers:
- name: coffee
image: uplex/https-echo
ports:
- containerPort: 4443
---
apiVersion: v1
kind: Service
metadata:
name: coffee-svc
spec:
ports:
- port: 443
targetPort: 4443
protocol: TCP
name: https
selector:
app: coffee
example: onload
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: tea
spec:
replicas: 3
selector:
matchLabels:
app: tea
example: onload
template:
metadata:
labels:
app: tea
example: onload
spec:
containers:
- name: tea
image: uplex/http-echo
ports:
- containerPort: 7357
---
apiVersion: v1
kind: Service
metadata:
name: tea-svc
spec:
ports:
- port: 80
targetPort: 7357
protocol: TCP
name: http
selector:
app: tea
example: onload
apiVersion: v1
kind: Service
metadata:
name: coffee-external-svc
spec:
type: ExternalName
externalName: coffee-svc.default.svc.cluster.local
ports:
- port: 443
protocol: TCP
name: https
apps:
coffee-external:
externalName: coffee-svc.default.svc.cluster.local
labels:
app: coffee-external
example: onload
config:
tls:
verify: false
coffee:
image: uplex/https-echo
replicas: 2
servicePort: 443
containerPort: 4443
targetPort: 4443
labels:
app: coffee
example: onload
tea:
image: uplex/http-echo
replicas: 3
servicePort: 80
containerPort: 7357
targetPort: 7357
labels:
app: tea
example: onload
ingress:
name: cafe-ingress
rules:
- host: cafe.example.com
paths:
- path: /tea
app: tea
servicePort: 80
- path: /coffee
app: coffee-external
servicePort: 4443
#! /bin/bash -ex
MYDIR=$(dirname ${BASH_SOURCE[0]})
source ${MYDIR}/../../../test/utils.sh
LOCALPORT=${LOCALPORT:-8888}
wait_until_ready app=varnish-ingress
wait_until_configured app=varnish-ingress
kubectl port-forward svc/varnish-ingress ${LOCALPORT}:80 >/dev/null &
trap 'kill $(jobs -p)' EXIT
wait_for_port ${LOCALPORT}
varnishtest ${TESTOPTS} -Dlocalport=${LOCALPORT} cafe.vtc
......@@ -5,21 +5,27 @@ go 1.15
require (
code.uplex.de/uplex-varnish/varnishapi v0.0.0-20191205154529-31e610a4139d
github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883
github.com/go-openapi/errors v0.19.4 // indirect
github.com/go-openapi/strfmt v0.19.5
github.com/go-openapi/swag v0.19.9 // indirect
github.com/go-openapi/validate v0.19.8 // indirect
github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d // indirect
github.com/go-openapi/analysis v0.20.1 // indirect
github.com/go-openapi/errors v0.20.0 // indirect
github.com/go-openapi/jsonreference v0.19.6 // indirect
github.com/go-openapi/runtime v0.19.29 // indirect
github.com/go-openapi/strfmt v0.20.1
github.com/go-openapi/swag v0.19.15 // indirect
github.com/go-openapi/validate v0.20.2 // indirect
github.com/golang/groupcache v0.0.0-20181024230925-c65c006176ff // indirect
github.com/google/go-cmp v0.3.0
github.com/google/go-cmp v0.5.2
github.com/googleapis/gnostic v0.2.0 // indirect
github.com/haproxytech/models v1.2.4
github.com/haproxytech/models/v2 v2.2.0
github.com/imdario/mergo v0.3.6 // indirect
github.com/mailru/easyjson v0.7.7 // indirect
github.com/prometheus/client_golang v0.9.2
github.com/sergi/go-diff v1.1.0 // indirect
github.com/sirupsen/logrus v1.2.0
gopkg.in/inf.v0 v0.9.1 // indirect
k8s.io/api v0.16.4
k8s.io/apimachinery v0.16.4
k8s.io/client-go v0.16.4
github.com/sirupsen/logrus v1.6.0
go.mongodb.org/mongo-driver v1.5.3 // indirect
golang.org/x/net v0.0.0-20210525063256-abc453219eb5 // indirect
k8s.io/api v0.16.15
k8s.io/apimachinery v0.16.15
k8s.io/client-go v0.16.15
k8s.io/code-generator v0.16.5-beta.1 // indirect
)
This diff is collapsed.
......@@ -449,6 +449,7 @@ type BackendConfigSpec struct {
Services []string `json:"services,omitempty"`
Probe *ProbeSpec `json:"probe,omitempty"`
Director *DirectorSpec `json:"director,omitempty"`
TLS *TLSSpec `json:"tls,omitempty"`
HostHeader string `json:"host-header,omitempty"`
ConnectTimeout string `json:"connect-timeout,omitempty"`
FirstByteTimeout string `json:"first-byte-timeout,omitempty"`
......@@ -485,6 +486,20 @@ type DirectorSpec struct {
Rampup string `json:"rampup,omitempty"`
}
// TLSSpec corresponds to spec.tls in a BackendConfig, to configure
// TLS onload -- TLS connections to IngressBackends using haproxy and
// the via feature of the klarlack implementation of Varnish. Only
// supported for klarlack.
type TLSSpec struct {
Authority *string `json:"authority,omitempty"`
MaxConn *int32 `json:"maxConn,omitempty"`
StickTblSz *int32 `json:"stickTableSize,omitempty"`
Verify *bool `json:"verify,omitempty"`
CACrt string `json:"caCrt,omitempty"`
CAIssuer string `json:"caIssuer,omitempty"`
CASecret string `json:"caSecret,omitempty"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// BackendConfigList is a list of BackendConfig Custom Resources.
......
......@@ -192,6 +192,11 @@ func (in *BackendConfigSpec) DeepCopyInto(out *BackendConfigSpec) {
*out = new(DirectorSpec)
(*in).DeepCopyInto(*out)
}
if in.TLS != nil {
in, out := &in.TLS, &out.TLS
*out = new(TLSSpec)
(*in).DeepCopyInto(*out)
}
if in.MaxConnections != nil {
in, out := &in.MaxConnections, &out.MaxConnections
*out = new(int32)
......@@ -531,6 +536,42 @@ func (in *ShardSpec) DeepCopy() *ShardSpec {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TLSSpec) DeepCopyInto(out *TLSSpec) {
*out = *in
if in.Authority != nil {
in, out := &in.Authority, &out.Authority
*out = new(string)
**out = **in
}
if in.MaxConn != nil {
in, out := &in.MaxConn, &out.MaxConn
*out = new(int32)
**out = **in
}
if in.StickTblSz != nil {
in, out := &in.StickTblSz, &out.StickTblSz
*out = new(int32)
**out = **in
}
if in.Verify != nil {
in, out := &in.Verify, &out.Verify
*out = new(bool)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSSpec.
func (in *TLSSpec) DeepCopy() *TLSSpec {
if in == nil {
return nil
}
out := new(TLSSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TemplateConfig) DeepCopyInto(out *TemplateConfig) {
*out = *in
......
......@@ -148,6 +148,7 @@ func NewIngressController(
ingClass string,
namespace string,
devMode bool,
varnishImpl string,
kubeClient kubernetes.Interface,
vc *varnish.Controller,
hc *haproxy.Controller,
......@@ -243,7 +244,8 @@ func NewIngressController(
}
ingc.nsQs = NewNamespaceQueues(ingc.log, ingClass, vc, hc, ingc.listers,
ingc.client, ingc.recorder, incomplRetryDelay, devMode)
ingc.client, ingc.recorder, incomplRetryDelay, devMode,
varnishImpl)
return &ingc, nil
}
......
......@@ -59,6 +59,9 @@ const (
defACLfailStatus = uint16(403)
defDNSRetryDelay = "30s"
defMax2ndTTL = "5m"
defStickTblSz = 128
defMaxConn = 2000
extOnldInstances = 3
vikingPubSvcKey = vikingLabelPfx + "svc"
vikingPubSvcVal = "public"
)
......@@ -335,7 +338,12 @@ func (worker *NamespaceWorker) getVCLSvc(
addrs []vcl.Address,
extName string,
extPort string,
) (vcl.Service, *vcr_v1alpha1.BackendConfig, update.Status) {
) (
vcl.Service,
*vcr_v1alpha1.BackendConfig,
*haproxy.OnloadSpec,
update.Status,
) {
if svcNamespace == "" {
svcNamespace = "default"
}
......@@ -354,9 +362,9 @@ func (worker *NamespaceWorker) getVCLSvc(
status := update.MakeNoop(
"No BackendConfig in namespace %s",
svcNamespace)
return vclSvc, nil, status
return vclSvc, nil, nil, status
}
return vclSvc, nil, update.MakeRecoverable("%v", err)
return vclSvc, nil, nil, update.MakeRecoverable("%v", err)
}
var bcfg *vcr_v1alpha1.BackendConfig
BCfgs:
......@@ -370,13 +378,13 @@ BCfgs:
}
}
if bcfg == nil {
return vclSvc, nil,
return vclSvc, nil, nil,
update.MakeNoop("Service %s/%s: no BackendConfig "+
"specified", svcNamespace, svcName)
}
if bcfg.Spec.Director != nil {
if extName != "" {
return vclSvc, nil,
return vclSvc, nil, nil,
update.MakeFatal(
"Service %s/%s, BackendConfig %s/%s: "+
"director may not be set for "+
......@@ -420,16 +428,88 @@ BCfgs:
if bcfg.Spec.FollowDNSRedirects != nil {
vclSvc.FollowDNSRedirects = *bcfg.Spec.FollowDNSRedirects
}
return vclSvc, bcfg, update.MakeSuccess("")
var onload *haproxy.OnloadSpec
if bcfg.Spec.TLS != nil {
if worker.varnishImpl != "klarlack" {
return vclSvc, nil, nil, update.MakeFatal(
"Service %s/%s, BackendConfig %s/%s: "+
"TLS onload only enabled for the "+
"klarlack implementation "+
"(varnishImpl: %s)",
svcNamespace, svcName, bcfg.Namespace,
bcfg.Name, worker.varnishImpl)
}
vclSvc.Via = true
onload = &haproxy.OnloadSpec{
Verify: true,
StickTblSz: defStickTblSz,
MaxConn: defMaxConn,
}
if bcfg.Spec.TLS.MaxConn != nil {
onload.MaxConn = int(*bcfg.Spec.TLS.MaxConn)
}
if bcfg.Spec.TLS.StickTblSz != nil {
onload.StickTblSz = int(*bcfg.Spec.TLS.StickTblSz)
}
if bcfg.Spec.TLS.Authority != nil {
authority := *bcfg.Spec.TLS.Authority
vclSvc.Authority = &authority
onload.Authority = true
} else {
vclSvc.Authority = nil
onload.Authority = false
}
if bcfg.Spec.TLS.Verify != nil && !*bcfg.Spec.TLS.Verify {
onload.Verify = false
} else if bcfg.Spec.TLS.CACrt != "" {
// XXX
// - find CACrt in cert-manager.io/v1, same ns
// - Sync Incomplete if not found
// - else check isCA field
// - SyncFatal if false
// - else find Secret in secretName field, same ns
// - Sync Incomplete if not found
// 3. else add to haproxySpec
// will have to send to crt-dnldr
if bcfg.Spec.TLS.CAIssuer != "" {
// XXX
// 1. find in cert-manager, may be a cluster
// issuer if ns/name is specified
// 2. SyncIncomplete if not found
// 3. otherwise attempt verification
// 4. SyncFatal if verfication fails
// - not added to any config
}
} else if bcfg.Spec.TLS.CASecret != "" {
// XXX
// 1. Find in the same ns
// 2. SyncIncomplete if not found
// 3. otherwise update haproxy spec
// will have to send to crt-dnldr
} else {
// SyncFatal, Verify true but no crt
}
}
return vclSvc, bcfg, onload, update.MakeSuccess("")
}
func (worker *NamespaceWorker) ings2VCLSpec(
ings []*extensions.Ingress) (vcl.Spec,
map[string]*vcr_v1alpha1.BackendConfig, update.Status) {
func (worker *NamespaceWorker) ings2VCLSpec(ings []*extensions.Ingress) (
vcl.Spec,
map[string]*vcr_v1alpha1.BackendConfig,
map[string]*haproxy.OnloadSpec,
update.Status,
) {
vclSpec := vcl.Spec{}
vclSpec.IntSvcs = make(map[string]vcl.Service)
vclSpec.ExtSvcs = make(map[string]vcl.Service)
bcfgs := make(map[string]*vcr_v1alpha1.BackendConfig)
onlds := make(map[string]*haproxy.OnloadSpec)
for _, ing := range ings {
namespace := ing.Namespace
if namespace == "" {
......@@ -443,23 +523,33 @@ func (worker *NamespaceWorker) ings2VCLSpec(
addrs, extName, extPort, status := worker.
ingBackend2Addrs(namespace, *backend)
if status.IsError() {
return vclSpec, bcfgs, status
return vclSpec, bcfgs, onlds, status
}
vclSvc, bcfg, status := worker.getVCLSvc(namespace,
backend.ServiceName, addrs, extName, extPort)
vclSvc, bcfg, onload, status := worker.
getVCLSvc(namespace, backend.ServiceName, addrs,
extName, extPort)
if status.IsError() {
return vclSpec, bcfgs, status
return vclSpec, bcfgs, onlds, status
}
vclSpec.DefaultService = vclSvc
key := namespace + "/" + backend.ServiceName
if extName == "" {
vclSpec.IntSvcs[key] = vclSvc
if onload != nil {
onload.Instances = len(addrs)
}
} else {
vclSpec.ExtSvcs[key] = vclSvc
if onload != nil {
onload.Instances = extOnldInstances
}
}
if bcfg != nil {
bcfgs[vclSvc.Name] = bcfg
}
if onload != nil {
onlds[vclSvc.Name] = onload
}
}
for _, rule := range ing.Spec.Rules {
vclRule := vcl.Rule{Host: rule.Host}
......@@ -473,7 +563,7 @@ func (worker *NamespaceWorker) ings2VCLSpec(
ingBackend2Addrs(namespace,
path.Backend)
if status.IsError() {
return vclSpec, bcfgs, status
return vclSpec, bcfgs, onlds, status
}
if extName == "" &&
(addrs == nil || len(addrs) == 0) {
......@@ -481,29 +571,39 @@ func (worker *NamespaceWorker) ings2VCLSpec(
namespace + " IngressBackend=" +
path.Backend.String())
}
vclSvc, bcfg, status := worker.
vclSvc, bcfg, onload, status := worker.
getVCLSvc(namespace,
path.Backend.ServiceName, addrs,
extName, extPort)
if status.IsError() {
return vclSpec, bcfgs, status
return vclSpec, bcfgs, onlds, status
}
vclRule.PathMap[path.Path] = vclSvc
key := namespace + "/" +
path.Backend.ServiceName
if extName == "" {
vclSpec.IntSvcs[key] = vclSvc
if onload != nil {
onload.Instances = len(addrs)
}
} else {
vclSpec.ExtSvcs[key] = vclSvc
if onload != nil {
onload.Instances =
extOnldInstances
}
}
if bcfg != nil {
bcfgs[vclSvc.Name] = bcfg
}
if onload != nil {
onlds[vclSvc.Name] = onload
}
}
vclSpec.Rules = append(vclSpec.Rules, vclRule)
}
}
return vclSpec, bcfgs, update.MakeSuccess("")
return vclSpec, bcfgs, onlds, update.MakeSuccess("")
}
func configConditions(vclConds []vcl.MatchTerm,
......@@ -995,7 +1095,7 @@ func (worker *NamespaceWorker) addOrUpdateIng(
}
worker.log.Infof("Ingresses implemented by Varnish Service %s: %v",
svcKey, ingNames)
vclSpec, bcfgs, status := worker.ings2VCLSpec(ings)
vclSpec, bcfgs, onlds, status := worker.ings2VCLSpec(ings)
if status.IsError() {
return status
}
......@@ -1051,7 +1151,18 @@ func (worker *NamespaceWorker) addOrUpdateIng(
if err != nil {
return update.MakeFatal("%v", err)
}
if offldrSpec.Name == "" {
if len(onlds) > 0 {
if len(onlds) > 1 {
// XXX
return update.MakeFatal(
"Multiple TLS onload configs currently not " +
"supported")
} // else {
for _, v := range onlds {
offldrSpec.Onload = v
}
}
if offldrSpec.Name == "" && len(onlds) == 0 {
worker.log.Infof("No TLS config found for Ingresses: %v",
ingNames)
} else {
......@@ -1073,10 +1184,7 @@ func (worker *NamespaceWorker) addOrUpdateIng(
return status
}
if len(offldrSpec.Secrets) == 0 {
worker.log.Infof("Service %s: no TLS certificates specified",
svcKey)
} else {
if len(offldrSpec.Secrets) != 0 || len(onlds) != 0 {
if status := worker.hController.Update(svcKey, offldAddrs,
offldrSpec); status.IsError() {
return status
......
......@@ -62,6 +62,7 @@ import (
type NamespaceWorker struct {
namespace string
ingClass string
varnishImpl string
log *logrus.Logger
vController *varnish.Controller
hController *haproxy.Controller
......@@ -296,6 +297,7 @@ func (worker *NamespaceWorker) work() {
type NamespaceQueues struct {
Queue workqueue.RateLimitingInterface
ingClass string
varnishImpl string
log *logrus.Logger
vController *varnish.Controller
hController *haproxy.Controller
......@@ -326,6 +328,7 @@ func NewNamespaceQueues(
recorder record.EventRecorder,
incomplRetryDelay time.Duration,
devMode bool,
varnishImpl string,
) *NamespaceQueues {
q := workqueue.NewNamedRateLimitingQueue(
......@@ -334,6 +337,7 @@ func NewNamespaceQueues(
Queue: q,
log: log,
ingClass: ingClass,
varnishImpl: varnishImpl,
vController: vController,
hController: hController,
workers: make(map[string]*NamespaceWorker),
......@@ -379,6 +383,7 @@ func (qs *NamespaceQueues) next() {
worker = &NamespaceWorker{
namespace: ns,
ingClass: qs.ingClass,
varnishImpl: qs.varnishImpl,
log: qs.log,
vController: qs.vController,
hController: qs.hController,
......
......@@ -31,6 +31,7 @@ package haproxy
import (
"bytes"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
......@@ -40,7 +41,7 @@ import (
"time"
"github.com/go-openapi/strfmt"
"github.com/haproxytech/models"
models "github.com/haproxytech/models/v2"
)
var fmts = strfmt.NewFormats()
......@@ -53,14 +54,23 @@ const (
versionHdr = "Configuration-Version"
reloadIDHdr = "Reload-ID"
varnishSock = "unix@/varnish.sock"
backendSock = "unix@/run/offload/onload.sock"
frontend = "offloader"
backend = "varnish"
server = backend
onloader = "onloader"
varnish = "varnish"
server = varnish
ingBackends = "ingressBackends"
frontendSitePath = sitesPath + "/" + frontend
backendSitePath = sitesPath + "/" + onloader
crtPath = "/etc/ssl/private"
caBundlePath = "/run/haproxy/ca-bundle.crt"
)
var port = int64(4443)
var (
port = int64(4443)
sslPort = int64(443)
roundRobin = "roundrobin"
)
// ReloadStatus classifies the current state of a dataplane reload.
type ReloadStatus uint8
......@@ -197,7 +207,7 @@ var offldSite = &models.Site{
},
Farms: []*models.SiteFarm{
{
Name: backend,
Name: varnish,
UseAs: "default",
Servers: []*models.Server{
{
......@@ -215,6 +225,58 @@ func getSite() ([]byte, error) {
return offldSite.MarshalBinary()
}
func getOnldSite(spec *OnloadSpec) ([]byte, error) {
// XXX: stick-table configuration
maxConn := int64(spec.MaxConn)
site := &models.Site{
Name: onloader,
Service: &models.SiteService{
Listeners: []*models.Bind{
{
Name: onloader,
Address: backendSock,
Mode: "660",
AcceptProxy: true,
},
},
Mode: "tcp",
Maxconn: &maxConn,
},
Farms: []*models.SiteFarm{
{
Name: ingBackends,
UseAs: "default",
Mode: "tcp",
Balance: &models.Balance{
Algorithm: &roundRobin,
},
Servers: []*models.Server{},
},
},
}
for s := 0; s < spec.Instances; s++ {
server := &models.Server{
Name: fmt.Sprintf("s%02d", s),
Address: "0.0.0.0",
Port: &sslPort,
Ssl: "enabled",
Alpn: "http/1.1",
Stick: "enabled",
}
if spec.Verify {
server.Verify = "required"
server.SslCafile = caBundlePath
if spec.Authority {
server.Sni = "fc_pp_authority"
}
} else {
server.Verify = "none"
}
site.Farms[0].Servers = append(site.Farms[0].Servers, server)
}
return site.MarshalBinary()
}
func drainAndClose(body io.ReadCloser) {
io.Copy(ioutil.Discard, body)
body.Close()
......@@ -355,14 +417,24 @@ func (client *DataplaneClient) FinishTx(
}
func (client *DataplaneClient) configTLS(
tx *models.Transaction, spec Spec, path, method string) error {
tx *models.Transaction,
path, method string,
onldSpec *OnloadSpec,
) error {
var rdr *bytes.Reader
if method != http.MethodDelete {
site, err := getSite()
var siteBytes []byte
var err error
if onldSpec == nil {
siteBytes, err = getSite()
} else {
siteBytes, err = getOnldSite(onldSpec)
}
if err != nil {
return err
}
rdr = bytes.NewReader(site)
rdr = bytes.NewReader(siteBytes)
}
req, err := client.getReq(path, method, rdr,
......@@ -407,39 +479,72 @@ func (client *DataplaneClient) configTLS(
}
}
// AddTLS adds the offloader configuration for haproxy, as specified
// by spec, in the dataplane transaction tx.
// AddOffldr adds the offloader configuration for haproxy, in the
// dataplane transaction tx.
//
// AddTLS MUST be used if the offloader was not configured previously
// AddOffldr MUST be used if the offloader was not configured previously
// since the haproxy container was started, or after deletion.
//
// A non-nil error return may wrap a DataplaneError.
func (client *DataplaneClient) AddTLS(
tx *models.Transaction, spec Spec) error {
func (client *DataplaneClient) AddOffldr(tx *models.Transaction) error {
return client.configTLS(tx, sitesPath, http.MethodPost, nil)
}
return client.configTLS(tx, spec, sitesPath, http.MethodPost)
// UpdateOffldr modifies the offloader configuration for haproxy, in
// the dataplane transaction tx.
//
// UpdateOffldr MUST be used if the offloader was previously added with
// AddOffldr, and not removed with DeleteOffldr.
//
// A non-nil error return may wrap a DataplaneError.
func (client *DataplaneClient) UpdateOffldr(tx *models.Transaction) error {
return client.configTLS(tx, frontendSitePath, http.MethodPut, nil)
}
// DeleteOffldr removes the haproxy offloader configuration, in the
// dataplane transaction tx.
//
// A non-nil error return may wrap a DataplaneError.
func (client *DataplaneClient) DeleteOffldr(tx *models.Transaction) error {
return client.configTLS(tx, frontendSitePath, http.MethodDelete, nil)
}
// UpdateTLS modifies the offloader configuration for haproxy to the
// specification in spec, in the dataplane transaction tx.
// AddOnldr adds the onloader configuration for haproxy, in the
// dataplane transaction tx. instances specifies the number of servers
// in the haproxy backend.
//
// UpdateTLS MUST be used if the offloader was previously added with
// AddTLS, and not removed with DeleteTLS.
// AddOnldr MUST be used if the onloader was not configured previously
// since the haproxy container was started, or after deletion.
//
// A non-nil error return may wrap a DataplaneError.
func (client *DataplaneClient) UpdateTLS(
tx *models.Transaction, spec Spec) error {
func (client *DataplaneClient) AddOnldr(
tx *models.Transaction,
onldSpec *OnloadSpec,
) error {
return client.configTLS(tx, sitesPath, http.MethodPost, onldSpec)
}
return client.configTLS(tx, spec, frontendSitePath, http.MethodPut)
// UpdateOnldr modifies the onloader configuration for haproxy, in the
// dataplane transaction tx. instances specifies the number of servers
// in the haproxy backend.
//
// UpdateOnldr MUST be used if the onloader was previously added with
// AddOnldr, and not removed with DeleteOnldr.
//
// A non-nil error return may wrap a DataplaneError.
func (client *DataplaneClient) UpdateOnldr(
tx *models.Transaction,
onldSpec *OnloadSpec,
) error {
return client.configTLS(tx, backendSitePath, http.MethodPut, onldSpec)
}
// DeleteTLS removes the haproxy offloader configuration, in the
// DeleteOnldr removes the haproxy onloader configuration, in the
// dataplane transaction tx.
//
// A non-nil error return may wrap a DataplaneError.
func (client *DataplaneClient) DeleteTLS(tx *models.Transaction) error {
return client.configTLS(tx, Spec{}, frontendSitePath, http.MethodDelete)
func (client *DataplaneClient) DeleteOnldr(tx *models.Transaction) error {
return client.configTLS(tx, backendSitePath, http.MethodDelete, nil)
}
// DeleteTx removes the dataplane transaction tx. This should be
......@@ -500,12 +605,13 @@ type sitesBody struct {
Sites models.Sites `json:"data"`
}
// OffldrStatus returns true iff the offloader site has been loaded by
// the dataplane API, and returns the current configuration version.
// LoaderStatus returns booleans for whether the off- and onloader
// sites have been loaded by the dataplane API, and returns the
// current configuration version.
//
// A non-nil error return may wrap a DataplaneError.
func (client *DataplaneClient) OffldrStatus() (
loaded bool, version int, err error) {
func (client *DataplaneClient) LoaderStatus() (
offLoaded, onLoaded bool, version int, err error) {
req, err := client.getReq(sitesPath, http.MethodGet, nil, false)
if err != nil {
......@@ -544,7 +650,11 @@ func (client *DataplaneClient) OffldrStatus() (
// XXX what if version from body & the header don't match?
for _, site := range []*models.Site(sb.Sites) {
if site.Name == frontend {
loaded = true
offLoaded = true
} else if site.Name == onloader {
onLoaded = true
}
if offLoaded && onLoaded {
break
}
}
......
This diff is collapsed.
......@@ -190,6 +190,9 @@ sub vcl_init {
{{- if .Probe}}
, probe = {{probeName $name}}
{{- end}}
{{- if .Via}}
, via = vk8s_via
{{- end}}
{{- end}}
);
{{- if .ResolverIdleTimeout}}
......
......@@ -27,6 +27,10 @@ echo TLS offload example with multiple certificates distinguished by SNI
cd ${MYPATH}/../examples/tls/sni
make deploy verify undeploy
echo TLS onload
cd ${MYPATH}/../examples/tls/onload
make deploy verify undeploy
echo Single namespace example
cd ${MYPATH}/../examples/namespace/
make deploy verify undeploy
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment