Commit 9eb94ce7 authored by Geoff Simmons's avatar Geoff Simmons

First working deployment of self-sharding cluster configuration.

parent 8fff6733
......@@ -31,6 +31,7 @@ package controller
import (
"fmt"
"reflect"
"strings"
"time"
"code.uplex.de/uplex-varnish/k8s-ingress/cmd/varnish"
......@@ -61,6 +62,7 @@ const (
admSecretKey = "admin"
admSvcName = "varnish-ingress-admin"
admPortName = "varnishadm"
selfShardKey = "custom.varnish-cache.org/self-sharding"
// resyncPeriod = 30 * time.Second
)
......@@ -415,6 +417,91 @@ func (ingc *IngressController) Stop() {
ingc.syncQueue.shutdown()
}
func (ingc *IngressController) configSharding(spec *vcl.Spec,
ing *extensions.Ingress) error {
ann, exists := ing.Annotations[selfShardKey]
if !exists {
return nil
}
if !strings.EqualFold(ann, "on") && !strings.EqualFold(ann, "true") {
return nil
}
ingc.log.Debugf("Set cluster shard configuration for Ingress %s/%s",
ing.Namespace, ing.Name)
// Get the Pods for the Varnish admin service
svcKey := ing.Namespace + "/" + admSvcName
svcObj, svcExists, err := ingc.svcLister.GetByKey(svcKey)
if err != nil {
return err
}
if !svcExists {
return fmt.Errorf("Service not found: %s", svcKey)
}
svc, ok := svcObj.(*api_v1.Service)
if !ok {
return fmt.Errorf("Unexpected obj found for service %s: %v",
svcKey, svcObj)
}
ingc.log.Debug("Admin service for shard configuration:", svc)
pods, err := ingc.client.Core().Pods(svc.Namespace).
List(meta_v1.ListOptions{
LabelSelector: labels.Set(svc.Spec.Selector).String(),
})
if err != nil {
return fmt.Errorf("Error getting pod information for service "+
"%s: %v", svcKey, err)
}
if len(pods.Items) <= 1 {
return fmt.Errorf("Sharding requested, but only %d pods found "+
"for service %s", len(pods.Items), svcKey)
}
ingc.log.Debug("Pods for shard configuration:", pods.Items)
// Populate spec.ClusterNodes with Pod names and the http endpoint
for _, pod := range pods.Items {
var varnishCntnr api_v1.Container
var httpPort int32
for _, c := range pod.Spec.Containers {
if c.Image == "varnish-ingress/varnish" {
varnishCntnr = c
break
}
}
if varnishCntnr.Image != "varnish-ingress/varnish" {
return fmt.Errorf("No Varnish container found in Pod "+
"%s for service %s", pod.Name, svcKey)
}
for _, p := range varnishCntnr.Ports {
if p.Name == "http" {
httpPort = p.ContainerPort
break
}
}
if httpPort == 0 {
return fmt.Errorf("No http port found in Pod %s for "+
"service %s", pod.Name, svcKey)
}
node := vcl.Service{Addresses: make([]vcl.Address, 1)}
if pod.Spec.Hostname != "" {
node.Name = pod.Spec.Hostname
} else {
node.Name = pod.Name
}
node.Addresses[0].IP = pod.Status.PodIP
node.Addresses[0].Port = httpPort
spec.ClusterNodes = append(spec.ClusterNodes, node)
}
ingc.log.Debugf("Spec configuration for self-sharding in Ingress "+
"%s/%s: %+v", ing.Namespace, ing.Name, spec.ClusterNodes)
return nil
}
func (ingc *IngressController) addOrUpdateIng(task Task,
ing extensions.Ingress) {
......@@ -430,6 +517,14 @@ func (ingc *IngressController) addOrUpdateIng(task Task,
return
}
if err = ingc.configSharding(&vclSpec, &ing); err != nil {
// XXX as above
ingc.syncQueue.requeueAfter(task, err, 5*time.Second)
ingc.recorder.Eventf(&ing, api_v1.EventTypeWarning, "Rejected",
"%v was rejected: %v", key, err)
return
}
ingc.log.Debugf("Check if Ingress is loaded: key=%s uuid=%s hash=%0x",
key, string(ing.UID), vclSpec.Canonical().DeepHash())
if ingc.hasIngress(&ing, vclSpec) {
......
......@@ -108,7 +108,10 @@ func (vc *VarnishController) monitor() {
vc.checkInst(inst)
}
vc.updateVarnishInstances(insts)
if err := vc.updateVarnishInstances(insts); err != nil {
vc.log.Errorf("Errors updating Varnish "+
"instances: %+v", err)
}
}
}
}
......@@ -29,7 +29,6 @@
package varnish
import (
"bytes"
"fmt"
"io"
"reflect"
......@@ -122,14 +121,6 @@ func (vc *VarnishController) Start(errChan chan error) {
go vc.monitor()
}
func (vc *VarnishController) getSrc() (string, error) {
var buf bytes.Buffer
if err := vcl.Tmpl.Execute(&buf, vc.spec.spec); err != nil {
return "", err
}
return buf.String(), nil
}
func (vc *VarnishController) updateVarnishInstance(svc *varnishSvc,
cfgName string, vclSrc string) error {
......@@ -173,6 +164,8 @@ func (vc *VarnishController) updateVarnishInstance(svc *varnishSvc,
vc.log.Debugf("Load config %s at %s", cfgName, svc.addr)
err = adm.VCLInline(cfgName, vclSrc)
if err != nil {
vc.log.Debugf("Error loading config %s at %s: %v",
cfgName, svc.addr, err)
return VarnishAdmError{addr: svc.addr, err: err}
}
vc.log.Infof("Loaded config %s at Varnish endpoint %s", cfgName,
......@@ -217,7 +210,7 @@ func (vc *VarnishController) updateVarnishInstances(svcs []*varnishSvc) error {
return nil
}
vclSrc, err := vc.getSrc()
vclSrc, err := vc.spec.spec.GetSrc()
if err != nil {
return err
}
......
vcl 4.0;
import std;
import directors;
......@@ -6,14 +5,15 @@ import directors;
probe vk8s_probe_varnish {
.request = "HEAD /vk8s_cluster_health HTTP/1.1"
"Host: vk8s_cluster";
.interval = 1s;
.timeout = 5s;
.interval = 5s;
}
{{range $node := .ClusterNodes -}}
backend {{$node.Name}} {
.host = "{{(index $node.Addresses 0).IP}}";
.port = "{{(index $node.Addresses 0).Port}}";
.probe = probe_varnish;
.probe = vk8s_probe_varnish;
}
{{end -}}
......@@ -27,16 +27,16 @@ acl vk8s_cluster_acl {
sub vcl_init {
new vk8s_cluster = directors.shard();
{{range $node := .ClusterNodes -}}
varnish_director.add_backend({{$node.Name}});
vk8s_cluster.add_backend({{$node.Name}});
{{end -}}
varnish_director.reconfigure();
vk8s_cluster.reconfigure();
}
sub vcl_recv {
if (remote.ip ~ vk8s_cluster_acl) {
if (req.http.Host == "vk8s_cluster") {
if (req.url == "/vk8s_cluster_health") {
return (vcl(vk8s_readiness));
return (synth(200));
}
return (synth(404));
}
......@@ -57,15 +57,15 @@ sub vcl_backend_fetch {
if (bereq.retries == 0
&& !bereq.uncacheable
&& remote.ip !~ vk8s_cluster_acl
&& "" + varnish_director.backend(resolve=NOW) != server.identity) {
set bereq.backend = varnish_director.backend(resolve=LAZY);
&& "" + vk8s_cluster.backend(resolve=NOW) != server.identity) {
set bereq.backend = vk8s_cluster.backend(resolve=NOW);
set bereq.http.VK8S-Is-Bgfetch = bereq.is_bgfetch;
return (fetch);
}
}
sub vcl_backend_response {
if (bereq.backend == varnish_director.backend(resolve=LAZY)) {
if (bereq.backend == vk8s_cluster.backend(resolve=LAZY)) {
if (beresp.http.VK8S-Cluster-TTL) {
set beresp.ttl = std.duration(
beresp.http.VK8S-Cluster-TTL + "s", 1s);
......@@ -84,7 +84,7 @@ sub vcl_backend_response {
}
sub vcl_backend_error {
if (bereq.backend == varnish_director.backend(resolve=LAZY)) {
if (bereq.backend == vk8s_cluster.backend(resolve=LAZY)) {
return (deliver);
}
}
......@@ -93,7 +93,7 @@ sub vcl_deliver {
unset resp.http.VK8S-Cluster-TTL;
if (remote.ip ~ vk8s_cluster_acl) {
if (! obj.uncacheable) {
set resp.http.X-Cluster-TTL = obj.ttl;
set resp.http.VK8S-Cluster-TTL = obj.ttl;
}
return (deliver);
}
......
vcl 4.0;
import std;
import directors;
import re2;
backend notfound {
# 192.0.2.0/24 reserved for docs & examples (RFC5737).
.host = "192.0.2.255";
.port = "80";
}
backend coffee-svc_192_2e_0_2e_2_2e_4 {
.host = "192.0.2.4";
.port = "80";
}
backend coffee-svc_192_2e_0_2e_2_2e_5 {
.host = "192.0.2.5";
.port = "80";
}
backend tea-svc_192_2e_0_2e_2_2e_1 {
.host = "192.0.2.1";
.port = "80";
}
backend tea-svc_192_2e_0_2e_2_2e_2 {
.host = "192.0.2.2";
.port = "80";
}
backend tea-svc_192_2e_0_2e_2_2e_3 {
.host = "192.0.2.3";
.port = "80";
}
sub vcl_init {
new hosts = re2.set(posix_syntax=true, literal=true, anchor=both);
hosts.add("cafe.example.com");
hosts.compile();
new coffee-svc_director = directors.round_robin();
coffee-svc_director.add_backend(coffee-svc_192_2e_0_2e_2_2e_4);
coffee-svc_director.add_backend(coffee-svc_192_2e_0_2e_2_2e_5);
new tea-svc_director = directors.round_robin();
tea-svc_director.add_backend(tea-svc_192_2e_0_2e_2_2e_1);
tea-svc_director.add_backend(tea-svc_192_2e_0_2e_2_2e_2);
tea-svc_director.add_backend(tea-svc_192_2e_0_2e_2_2e_3);
new cafe_2e_example_2e_com_url = re2.set(posix_syntax=true, anchor=start);
cafe_2e_example_2e_com_url.add("/coffee",
backend=coffee-svc_director.backend());
cafe_2e_example_2e_com_url.add("/tea",
backend=tea-svc_director.backend());
cafe_2e_example_2e_com_url.compile();
}
sub set_backend {
set req.backend_hint = notfound;
if (hosts.match(req.http.Host)) {
if (hosts.nmatches() != 1) {
# Fail fast when the match was not unique.
return (fail);
}
if (0 != 0) {
#
}
elsif (hosts.which() == 1) {
if (cafe_2e_example_2e_com_url.match(req.url)) {
set req.backend_hint = cafe_2e_example_2e_com_url.backend(select=FIRST);
}
}
}
if (req.backend_hint == notfound) {
return (synth(404));
}
}
sub vcl_miss {
call set_backend;
}
sub vcl_pass {
call set_backend;
}
import std;
import directors;
probe vk8s_probe_varnish {
.request = "HEAD /vk8s_cluster_health HTTP/1.1"
"Host: vk8s_cluster";
.timeout = 5s;
.interval = 5s;
}
backend varnish-8445d4f7f-z2b9p {
.host = "172.17.0.12";
.port = "80";
.probe = vk8s_probe_varnish;
}
backend varnish-8445d4f7f-k22dn {
.host = "172.17.0.13";
.port = "80";
.probe = vk8s_probe_varnish;
}
backend varnish-8445d4f7f-ldljf {
.host = "172.17.0.14";
.port = "80";
.probe = vk8s_probe_varnish;
}
acl vk8s_cluster_acl {
"172.17.0.12";
"172.17.0.13";
"172.17.0.14";
}
sub vcl_init {
new vk8s_cluster = directors.shard();
vk8s_cluster.add_backend(varnish-8445d4f7f-z2b9p);
vk8s_cluster.add_backend(varnish-8445d4f7f-k22dn);
vk8s_cluster.add_backend(varnish-8445d4f7f-ldljf);
vk8s_cluster.reconfigure();
}
sub vcl_recv {
if (remote.ip ~ vk8s_cluster_acl) {
if (req.http.Host == "vk8s_cluster") {
if (req.url == "/vk8s_cluster_health") {
return (synth(200));
}
return (synth(404));
}
# prevent deadlock for accidental cyclic requests
set req.hash_ignore_busy = true;
# if we're async, don't deliver stale
if (req.http.VK8S-Is-Bgfetch == "true") {
set req.grace = 0s;
}
return (hash);
}
}
sub vcl_backend_fetch {
if (bereq.retries == 0
&& !bereq.uncacheable
&& remote.ip !~ vk8s_cluster_acl
&& "" + vk8s_cluster.backend(resolve=NOW) != server.identity) {
set bereq.backend = vk8s_cluster.backend(resolve=NOW);
set bereq.http.VK8S-Is-Bgfetch = bereq.is_bgfetch;
return (fetch);
}
}
sub vcl_backend_response {
if (bereq.backend == vk8s_cluster.backend(resolve=LAZY)) {
if (beresp.http.VK8S-Cluster-TTL) {
set beresp.ttl = std.duration(
beresp.http.VK8S-Cluster-TTL + "s", 1s);
if (beresp.ttl > 5m) {
set beresp.ttl = 5m;
}
unset beresp.http.VK8S-Cluster-TTL;
}
else {
set beresp.uncacheable = true;
}
return (deliver);
}
}
sub vcl_backend_error {
if (bereq.backend == vk8s_cluster.backend(resolve=LAZY)) {
return (deliver);
}
}
sub vcl_deliver {
unset resp.http.VK8S-Cluster-TTL;
if (remote.ip ~ vk8s_cluster_acl) {
if (! obj.uncacheable) {
set resp.http.VK8S-Cluster-TTL = obj.ttl;
}
return (deliver);
}
}
import std;
import directors;
probe vk8s_probe_varnish {
.request = "HEAD /vk8s_cluster_health HTTP/1.1"
"Host: vk8s_cluster";
.timeout = 5s;
.interval = 5s;
}
backend varnish-8445d4f7f-z2b9p {
.host = "172.17.0.12";
.port = "80";
.probe = vk8s_probe_varnish;
}
backend varnish-8445d4f7f-k22dn {
.host = "172.17.0.13";
.port = "80";
.probe = vk8s_probe_varnish;
}
backend varnish-8445d4f7f-ldljf {
.host = "172.17.0.14";
.port = "80";
.probe = vk8s_probe_varnish;
}
acl vk8s_cluster_acl {
"172.17.0.12";
"172.17.0.13";
"172.17.0.14";
}
sub vcl_init {
new vk8s_cluster = directors.shard();
vk8s_cluster.add_backend(varnish-8445d4f7f-z2b9p);
vk8s_cluster.add_backend(varnish-8445d4f7f-k22dn);
vk8s_cluster.add_backend(varnish-8445d4f7f-ldljf);
vk8s_cluster.reconfigure();
}
sub vcl_recv {
if (remote.ip ~ vk8s_cluster_acl) {
if (req.http.Host == "vk8s_cluster") {
if (req.url == "/vk8s_cluster_health") {
return (synth(200));
}
return (synth(404));
}
# prevent deadlock for accidental cyclic requests
set req.hash_ignore_busy = true;
# if we're async, don't deliver stale
if (req.http.VK8S-Is-Bgfetch == "true") {
set req.grace = 0s;
}
return (hash);
}
}
sub vcl_backend_fetch {
if (bereq.retries == 0
&& !bereq.uncacheable
&& remote.ip !~ vk8s_cluster_acl
&& "" + vk8s_cluster.backend(resolve=NOW) != server.identity) {
set bereq.backend = vk8s_cluster.backend(resolve=NOW);
set bereq.http.VK8S-Is-Bgfetch = bereq.is_bgfetch;
return (fetch);
}
}
sub vcl_backend_response {
if (bereq.backend == vk8s_cluster.backend(resolve=LAZY)) {
if (beresp.http.VK8S-Cluster-TTL) {
set beresp.ttl = std.duration(
beresp.http.VK8S-Cluster-TTL + "s", 1s);
if (beresp.ttl > 5m) {
set beresp.ttl = 5m;
}
unset beresp.http.VK8S-Cluster-TTL;
}
else {
set beresp.uncacheable = true;
}
return (deliver);
}
}
sub vcl_backend_error {
if (bereq.backend == vk8s_cluster.backend(resolve=LAZY)) {
return (deliver);
}
}
sub vcl_deliver {
unset resp.http.VK8S-Cluster-TTL;
if (remote.ip ~ vk8s_cluster_acl) {
if (! obj.uncacheable) {
set resp.http.VK8S-Cluster-TTL = obj.ttl;
}
return (deliver);
}
}
......@@ -29,6 +29,7 @@
package vcl
import (
"bytes"
"encoding/binary"
"fmt"
"hash"
......@@ -112,6 +113,7 @@ type Spec struct {
DefaultService Service
Rules []Rule
AllServices map[string]Service
ClusterNodes []Service
}
func (spec Spec) DeepHash() uint64 {
......@@ -131,6 +133,9 @@ func (spec Spec) DeepHash() uint64 {
hash.Write([]byte(svc))
spec.AllServices[svc].hash(hash)
}
for _, node := range spec.ClusterNodes {
node.hash(hash)
}
return hash.Sum64()
}
......@@ -139,6 +144,7 @@ func (spec Spec) Canonical() Spec {
DefaultService: Service{Name: spec.DefaultService.Name},
Rules: make([]Rule, len(spec.Rules)),
AllServices: make(map[string]Service, len(spec.AllServices)),
ClusterNodes: make([]Service, len(spec.ClusterNodes)),
}
copy(canon.DefaultService.Addresses, spec.DefaultService.Addresses)
sort.Stable(ByIPPort(canon.DefaultService.Addresses))
......@@ -153,6 +159,11 @@ func (spec Spec) Canonical() Spec {
canon.AllServices[name] = svcs
sort.Stable(ByIPPort(canon.AllServices[name].Addresses))
}
copy(canon.ClusterNodes, spec.ClusterNodes)
sort.Stable(ByName(canon.ClusterNodes))
for _, node := range canon.ClusterNodes {
sort.Stable(ByIPPort(node.Addresses))
}
return canon
}
......@@ -170,10 +181,14 @@ var fMap = template.FuncMap{
},
}
const tmplSrc = "vcl.tmpl"
const (
ingTmplSrc = "vcl.tmpl"
shardTmplSrc = "self-shard.tmpl"
)
var (
Tmpl = template.Must(template.New(tmplSrc).Funcs(fMap).ParseFiles(tmplSrc))
IngressTmpl = template.Must(template.New(ingTmplSrc).Funcs(fMap).ParseFiles(ingTmplSrc))
ShardTmpl = template.Must(template.New(shardTmplSrc).Funcs(fMap).ParseFiles(shardTmplSrc))
symPattern = regexp.MustCompile("^[[:alpha:]][[:word:]-]*$")
first = regexp.MustCompile("[[:alpha:]]")
restIllegal = regexp.MustCompile("[^[:word:]-]+")
......@@ -188,6 +203,19 @@ func replIllegal(ill []byte) []byte {
return repl
}
func (spec Spec) GetSrc() (string, error) {
var buf bytes.Buffer
if err := IngressTmpl.Execute(&buf, spec); err != nil {
return "", err
}
if len(spec.ClusterNodes) > 0 {
if err := ShardTmpl.Execute(&buf, spec); err != nil {
return "", err
}
}
return buf.String(), nil
}
func Mangle(s string) string {
var mangled string
bytes := []byte(s)
......
......@@ -37,6 +37,15 @@ import (
"text/template"
)
func cmpGold(got []byte, goldfile string) (bool, error) {
goldpath := filepath.Join("testdata", goldfile)
gold, err := ioutil.ReadFile(goldpath)
if err != nil {
return false, err
}
return bytes.Equal(got, gold), nil
}
var teaSvc = Service{
Name: "tea-svc",
Addresses: []Address{
......@@ -84,24 +93,19 @@ var cafeSpec = Spec{
},
}
func TestTemplate(t *testing.T) {
func TestIngressTemplate(t *testing.T) {
var buf bytes.Buffer
if err := Tmpl.Execute(&buf, cafeSpec); err != nil {
t.Error("Execute():", err)
gold := "ingressrule.golden"
if err := IngressTmpl.Execute(&buf, cafeSpec); err != nil {
t.Fatal("Execute():", err)
}
goldpath := filepath.Join("testdata", "ingressrule.golden")
gold, err := ioutil.ReadFile(goldpath)
ok, err := cmpGold(buf.Bytes(), gold)
if err != nil {
t.Fatalf("Error reading %s: %v", goldpath, err)
t.Fatalf("Reading %s: %v", gold, err)
}
if !bytes.Equal(buf.Bytes(), gold) {
if !ok {
t.Errorf("Generated VCL for IngressSpec does not match gold "+
"file: %s", goldpath)
if testing.Verbose() {
t.Log("Generated VCL:", string(buf.Bytes()))
t.Log(goldpath, ":", string(gold))
}
"file: %s", gold)
}
}
......@@ -217,57 +221,66 @@ func TestCanoncial(t *testing.T) {
}
}
var varnishCluster = struct{
ClusterNodes []Service
}{
ClusterNodes: []Service{
Service{
Name: "varnish-8445d4f7f-z2b9p",
Addresses: []Address{
{"172.17.0.12", 80},
},
var varnishCluster = []Service{
Service{
Name: "varnish-8445d4f7f-z2b9p",
Addresses: []Address{
{"172.17.0.12", 80},
},
Service{
Name: "varnish-8445d4f7f-k22dn",
Addresses: []Address{
{"172.17.0.13", 80},
},
},
Service{
Name: "varnish-8445d4f7f-k22dn",
Addresses: []Address{
{"172.17.0.13", 80},
},
Service{
Name: "varnish-8445d4f7f-ldljf",
Addresses: []Address{
{"172.17.0.14", 80},
},
},
Service{
Name: "varnish-8445d4f7f-ldljf",
Addresses: []Address{
{"172.17.0.14", 80},
},
},
}
func TestShardTemplate(t *testing.T) {
var buf bytes.Buffer
gold := "shard.golden"
tmplName := "self-shard.tmpl"
tmpl, err := template.New("self-shard.tmpl").Funcs(fMap).
ParseFiles("self-shard.tmpl")
tmpl, err := template.New(tmplName).Funcs(fMap).ParseFiles(tmplName)
if err != nil {
t.Error("Cannot parse shard template:", err)
return
}
if err := tmpl.Execute(&buf, varnishCluster); err != nil {
t.Error("Execute():", err)
cafeSpec.ClusterNodes = varnishCluster
if err := tmpl.Execute(&buf, cafeSpec); err != nil {
t.Error("cluster template Execute():", err)
return
}
t.Log(buf.String())
ok, err := cmpGold(buf.Bytes(), gold)
if err != nil {
t.Fatalf("Reading %s: %v", gold, err)
}
if !ok {
t.Errorf("Generated VCL for self-sharding does not match gold "+
"file: %s", gold)
}
}
// goldpath := filepath.Join("testdata", "ingressrule.golden")
// gold, err := ioutil.ReadFile(goldpath)
// if err != nil {
// t.Fatalf("Error reading %s: %v", goldpath, err)
// }
// if !bytes.Equal(buf.Bytes(), gold) {
// t.Errorf("Generated VCL for IngressSpec does not match gold "+
// "file: %s", goldpath)
// if testing.Verbose() {
// t.Log("Generated VCL:", string(buf.Bytes()))
// t.Log(goldpath, ":", string(gold))
// }
// }
func TestGetSrc(t *testing.T) {
gold := "ingress_shard.golden"
cafeSpec.ClusterNodes = varnishCluster
src, err := cafeSpec.GetSrc()
if err != nil {
t.Error("Spec.GetSrc():", err)
return
}
ok, err := cmpGold([]byte(src), gold)
if err != nil {
t.Fatalf("Reading %s: %v", gold, err)
}
if !ok {
t.Errorf("Generated VCL from GetSrc() does not match gold "+
"file: %s", gold)
}
}
......@@ -10,5 +10,5 @@ RUN vgo generate && \
FROM alpine:3.8
COPY --from=builder /go/src/code.uplex.de/uplex-varnish/k8s-ingress/cmd/k8s-ingress /k8s-ingress
COPY --from=builder /go/src/code.uplex.de/uplex-varnish/k8s-ingress/cmd/varnish/vcl/vcl.tmpl /vcl.tmpl
COPY --from=builder /go/src/code.uplex.de/uplex-varnish/k8s-ingress/cmd/varnish/vcl/*.tmpl /
ENTRYPOINT ["/k8s-ingress"]
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment