From 2b97c42f9f5a9eed330850c78830ea1ad7e961cd Mon Sep 17 00:00:00 2001 From: ramr Date: Sat, 3 Mar 2018 00:16:15 -0800 Subject: [PATCH 1/9] Add support to dynamically configure haproxy to reduce the reloads needed when configuration changes. This commit includes: o Support for a dynamic configuration manager in the template router. o HAProxy configuration manager implementation. o Scale up support with a pool of dynamic servers for each backend. o Dynamic route addition support with a pre-allocated pool of blueprint routes. o HAProxy dynamic configuration retry support. o Passthrough routes weights are relative, so as to allow them to be scaled. Fixes a bug with the haproxy dynamic config api hanging. o Support for a blueprint route namespace, so that we can load custom blueprints at startup. o Refactor, log errors, cleanup and bug fixes. o Remove unused/extraneous map entries for blueprint routes. o Self-review changes. o Add new tests and modify existing integration+e2e tests. --- .../haproxy/conf/haproxy-config.template | 58 +- pkg/cmd/infra/router/template.go | 131 ++- pkg/oc/admin/router/router.go | 14 +- .../template/configmanager/haproxy/backend.go | 403 ++++++++ .../template/configmanager/haproxy/client.go | 204 ++++ .../configmanager/haproxy/client_test.go | 609 +++++++++++ .../configmanager/haproxy/converter.go | 88 ++ .../configmanager/haproxy/fake_haproxy.go | 453 +++++++++ .../template/configmanager/haproxy/manager.go | 954 ++++++++++++++++++ .../template/configmanager/haproxy/map.go | 209 ++++ .../configmanager/haproxy/map_test.go | 841 +++++++++++++++ pkg/router/template/plugin.go | 10 +- pkg/router/template/router.go | 202 +++- pkg/router/template/router_test.go | 2 + pkg/router/template/template_helper.go | 17 + pkg/router/template/template_helper_test.go | 41 + pkg/router/template/types.go | 86 ++ test/end-to-end/router_test.go | 4 + 18 files changed, 4276 insertions(+), 50 deletions(-) create mode 100644 pkg/router/template/configmanager/haproxy/backend.go create mode 100644 pkg/router/template/configmanager/haproxy/client.go create mode 100644 pkg/router/template/configmanager/haproxy/client_test.go create mode 100644 pkg/router/template/configmanager/haproxy/converter.go create mode 100644 pkg/router/template/configmanager/haproxy/fake_haproxy.go create mode 100644 pkg/router/template/configmanager/haproxy/manager.go create mode 100644 pkg/router/template/configmanager/haproxy/map.go create mode 100644 pkg/router/template/configmanager/haproxy/map_test.go diff --git a/images/router/haproxy/conf/haproxy-config.template b/images/router/haproxy/conf/haproxy-config.template index fc1f06b06014..ed4eed1083a3 100644 --- a/images/router/haproxy/conf/haproxy-config.template +++ b/images/router/haproxy/conf/haproxy-config.template @@ -6,6 +6,7 @@ {{- define "/var/lib/haproxy/conf/haproxy.config" }} {{- $workingDir := .WorkingDir }} {{- $defaultDestinationCA := .DefaultDestinationCA }} +{{- $dynamicConfigManager := .DynamicConfigManager }} {{- $router_ip_v4_v6_mode := env "ROUTER_IP_V4_V6_MODE" "v4" }} @@ -46,6 +47,8 @@ global {{- end}} ca-base /etc/ssl crt-base /etc/ssl + # TODO: Check if we can get reload to be faster by saving server state. + # server-state-file /var/lib/haproxy/run/haproxy.state stats socket /var/lib/haproxy/run/haproxy.sock mode 600 level admin expose-fd listeners stats timeout 2m @@ -196,7 +199,7 @@ frontend public_ssl # for the SNI case, we also need to compare it in case-insensitive mode (by converting it to lowercase) as RFC 4343 says acl sni req.ssl_sni -m found acl sni_passthrough req.ssl_sni,lower,map_reg(/var/lib/haproxy/conf/os_sni_passthrough.map) -m found - use_backend be_tcp:%[req.ssl_sni,lower,map_reg(/var/lib/haproxy/conf/os_tcp_be.map)] if sni sni_passthrough + use_backend %[req.ssl_sni,lower,map_reg(/var/lib/haproxy/conf/os_tcp_be.map)] if sni sni_passthrough # if the route is SNI and NOT passthrough enter the termination flow use_backend be_sni if sni @@ -367,19 +370,10 @@ backend openshift_default */}} {{- range $cfgIdx, $cfg := .State }} {{- if matchValues (print $cfg.TLSTermination) "" "edge" "reencrypt" }} - {{- if (eq $cfg.TLSTermination "") }} -# Plain http backend -backend be_http:{{$cfgIdx}} - {{- else if (eq $cfg.TLSTermination "edge") }} - -# Plain http backend but request is TLS, terminated at edge -backend be_edge_http:{{$cfgIdx}} - {{ else if (eq $cfg.TLSTermination "reencrypt") }} - -# Secure backend which requires re-encryption -backend be_secure:{{$cfgIdx}} - {{- end }}{{/* end chceck for router type */}} +# Plain http backend or backend with TLS terminated at the edge or a +# secure backend with re-encryption. +backend {{genBackendNamePrefix $cfg.TLSTermination}}:{{$cfgIdx}} mode http option redispatch option forwardfor @@ -473,12 +467,36 @@ backend be_secure:{{$cfgIdx}} {{- end }}{{/* end get serviceUnit from its name */}} {{- end }}{{/* end range over serviceUnitNames */}} + {{- with $dynamicConfigManager }} + {{- if (eq $cfg.TLSTermination "reencrypt") }} + {{- range $idx, $serverName := $dynamicConfigManager.GenerateDynamicServerNames $cfgIdx }} + server {{$serverName}} 172.4.0.4:8765 weight 0 ssl disabled check inter {{firstMatch $timeSpecPattern (index $cfg.Annotations "router.openshift.io/haproxy.health.check.interval") (env "ROUTER_BACKEND_CHECK_INTERVAL") "5000ms"}} + {{- if gt (len (index $cfg.Certificates (printf "%s_pod" $cfg.Host)).Contents) 0 }} verify required ca-file {{ $workingDir }}/cacerts/{{$cfgIdx}}.pem + {{- else }} + {{- if gt (len $defaultDestinationCA) 0 }} verify required ca-file {{ $defaultDestinationCA }} + {{- else }} verify none + {{- end }} + {{- end }} + {{- with $podMaxConn := index $cfg.Annotations "haproxy.router.openshift.io/pod-concurrent-connections" }} + {{- if (isInteger (index $cfg.Annotations "haproxy.router.openshift.io/pod-concurrent-connections")) }} maxconn {{$podMaxConn}} {{- end }} + {{- end}}{{/* end pod-concurrent-connections annotation */}} + {{- end }}{{/* end range over dynamic server names */}} + + {{- else }} + {{- with $name := $dynamicConfigManager.ServerTemplateName $cfgIdx }} + {{- with $size := $dynamicConfigManager.ServerTemplateSize $cfgIdx }} + server-template {{$name}}- 1-{{$size}} 172.4.0.4:8765 check disabled + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- end }}{{/* end if tls==edge/none/reencrypt */}} {{- if eq $cfg.TLSTermination "passthrough" }} # Secure backend, pass through -backend be_tcp:{{$cfgIdx}} +backend {{genBackendNamePrefix $cfg.TLSTermination}}:{{$cfgIdx}} {{- if ne (env "ROUTER_SYSLOG_ADDRESS") ""}} option tcplog {{- end }} @@ -528,6 +546,15 @@ backend be_tcp:{{$cfgIdx}} {{- end }}{{/* end get ServiceUnit from serviceUnitName */}} {{- end }}{{/* end if weight != 0 */}} {{- end }}{{/* end iterate over services*/}} + + {{- with $dynamicConfigManager }} + {{- with $name := $dynamicConfigManager.ServerTemplateName $cfgIdx }} + {{- with $size := $dynamicConfigManager.ServerTemplateSize $cfgIdx }} + server-template {{$name}}- 1-{{$size}} 172.4.0.4:8765 check disabled + {{- end }} + {{- end }} + {{- end }} + {{- end }}{{/*end tls==passthrough*/}} {{- end }}{{/* end loop over routes */}} @@ -564,6 +591,7 @@ backend be_tcp:{{$cfgIdx}} {{ end -}}{{/* end http host map template */}} + {{/* os_edge_reencrypt_be.map : contains a mapping of www.example.com -> . This map is similar to os_http_be.map but for tls routes. by attaching prefix: be_edge_http for edge terminated routes @@ -590,7 +618,7 @@ backend be_tcp:{{$cfgIdx}} {{/* os_tcp_be.map: contains a mapping of www.example.com -> . This map is used to discover the correct backend - by attaching a prefix (be_tcp: or be_secure:) by use_backend statements if acls are matched. + by use_backend statements if acls are matched. */}} {{ define "/var/lib/haproxy/conf/os_tcp_be.map" -}} {{ range $idx, $line := generateHAProxyMap . -}} diff --git a/pkg/cmd/infra/router/template.go b/pkg/cmd/infra/router/template.go index fc1c31453a7f..671c3aae2bc6 100644 --- a/pkg/cmd/infra/router/template.go +++ b/pkg/cmd/infra/router/template.go @@ -15,6 +15,7 @@ import ( "github.com/spf13/cobra" "github.com/spf13/pflag" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ktypes "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/wait" @@ -31,6 +32,7 @@ import ( "github.com/openshift/origin/pkg/cmd/util" cmdversion "github.com/openshift/origin/pkg/cmd/version" projectinternalclientset "github.com/openshift/origin/pkg/project/generated/internalclientset" + routeapi "github.com/openshift/origin/pkg/route/apis/route" routeinternalclientset "github.com/openshift/origin/pkg/route/generated/internalclientset" routelisters "github.com/openshift/origin/pkg/route/generated/listers/route/internalversion" "github.com/openshift/origin/pkg/router" @@ -38,6 +40,7 @@ import ( "github.com/openshift/origin/pkg/router/metrics" "github.com/openshift/origin/pkg/router/metrics/haproxy" templateplugin "github.com/openshift/origin/pkg/router/template" + haproxyconfigmanager "github.com/openshift/origin/pkg/router/template/configmanager/haproxy" "github.com/openshift/origin/pkg/util/proc" "github.com/openshift/origin/pkg/util/writerlease" "github.com/openshift/origin/pkg/version" @@ -46,6 +49,10 @@ import ( // defaultReloadInterval is how often to do reloads in seconds. const defaultReloadInterval = 5 +// defaultCommitInterval is how often (in seconds) to commit the "in-memory" +// router changes made using the dynamic configuration manager. +const defaultCommitInterval = 60 * 60 + var routerLong = templates.LongDesc(` Start a router @@ -85,6 +92,18 @@ type TemplateRouter struct { Ciphers string StrictSNI bool MetricsType string + + TemplateRouterConfigManager +} + +type TemplateRouterConfigManager struct { + ConfigManagerName string + ConfigManagerConnectionInfo string + CommitInterval time.Duration + BlueprintRouteNamespace string + BlueprintRoutePoolSize int + DynamicServerPrefix string + MaxDynamicServers int } // isTrue here has the same logic as the function within package pkg/router/template @@ -93,14 +112,14 @@ func isTrue(s string) bool { return v } -// reloadInterval returns how often to run the router reloads. The interval -// value is based on an environment variable or the default. -func reloadInterval() time.Duration { - interval := util.Env("RELOAD_INTERVAL", fmt.Sprintf("%vs", defaultReloadInterval)) +// getIntervalFromEnv returns a interval value based on an environment +// variable or the default. +func getIntervalFromEnv(name string, defaultValSecs int) time.Duration { + interval := util.Env(name, fmt.Sprintf("%vs", defaultValSecs)) value, err := time.ParseDuration(interval) if err != nil { - glog.Warningf("Invalid RELOAD_INTERVAL %q, using default value %v ...", interval, defaultReloadInterval) - value = time.Duration(defaultReloadInterval * time.Second) + glog.Warningf("Invalid %q %q, using default value %v ...", name, interval, defaultValSecs) + value = time.Duration(time.Duration(defaultValSecs) * time.Second) } return value } @@ -113,12 +132,19 @@ func (o *TemplateRouter) Bind(flag *pflag.FlagSet) { flag.StringVar(&o.DefaultDestinationCAPath, "default-destination-ca-path", util.Env("DEFAULT_DESTINATION_CA_PATH", "/var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt"), "A path to a PEM file containing the default CA bundle to use with re-encrypt routes. This CA should sign for certificates in the Kubernetes DNS space (service.namespace.svc).") flag.StringVar(&o.TemplateFile, "template", util.Env("TEMPLATE_FILE", ""), "The path to the template file to use") flag.StringVar(&o.ReloadScript, "reload", util.Env("RELOAD_SCRIPT", ""), "The path to the reload script to use") - flag.DurationVar(&o.ReloadInterval, "interval", reloadInterval(), "Controls how often router reloads are invoked. Mutiple router reload requests are coalesced for the duration of this interval since the last reload time.") + flag.DurationVar(&o.ReloadInterval, "interval", getIntervalFromEnv("RELOAD_INTERVAL", defaultReloadInterval), "Controls how often router reloads are invoked. Mutiple router reload requests are coalesced for the duration of this interval since the last reload time.") flag.BoolVar(&o.BindPortsAfterSync, "bind-ports-after-sync", util.Env("ROUTER_BIND_PORTS_AFTER_SYNC", "") == "true", "Bind ports only after route state has been synchronized") flag.StringVar(&o.MaxConnections, "max-connections", util.Env("ROUTER_MAX_CONNECTIONS", ""), "Specifies the maximum number of concurrent connections.") flag.StringVar(&o.Ciphers, "ciphers", util.Env("ROUTER_CIPHERS", ""), "Specifies the cipher suites to use. You can choose a predefined cipher set ('modern', 'intermediate', or 'old') or specify exact cipher suites by passing a : separated list.") flag.BoolVar(&o.StrictSNI, "strict-sni", isTrue(util.Env("ROUTER_STRICT_SNI", "")), "Use strict-sni bind processing (do not use default cert).") flag.StringVar(&o.MetricsType, "metrics-type", util.Env("ROUTER_METRICS_TYPE", ""), "Specifies the type of metrics to gather. Supports 'haproxy'.") + flag.StringVar(&o.ConfigManagerName, "config-manager", util.Env("ROUTER_CONFIG_MANAGER", ""), "Specifies the manager to use for dynamically configuring changes with the underlying router. Supports 'haproxy-manager'.") + flag.StringVar(&o.ConfigManagerConnectionInfo, "config-manager-connection-info", "", "Specifies connection information for the dynamic configuration manager.") + flag.DurationVar(&o.CommitInterval, "commit-interval", getIntervalFromEnv("COMMIT_INTERVAL", defaultCommitInterval), "Controls how often to commit (to the actual config) all the changes made using the router specific dynamic configuration manager.") + flag.StringVar(&o.BlueprintRouteNamespace, "blueprint-route-namespace", util.Env("ROUTER_BLUEPRINT_ROUTE_NAMESPACE", ""), "Specifies the namespace which contains the routes that serve as blueprints for the dynamic configuration manager.") + flag.IntVar(&o.BlueprintRoutePoolSize, "blueprint-route-pool-size", int(util.EnvInt("ROUTER_BLUEPRINT_ROUTE_POOL_SIZE", 10, 1)), "Specifies the size of the pre-allocated pool for each route blueprint managed by the router specific dynamic configuration manager. This can be overriden by an annotation router.openshift.io/pool-size on an individual route.") + flag.StringVar(&o.DynamicServerPrefix, "dynamic-server-prefix", util.Env("ROUTER_DYNAMIC_SERVER_PREFIX", ""), "Specifies the prefix for dynamic servers added to router backends. These dynamic servers are handled by the router specific dynamic configuration manager.") + flag.IntVar(&o.MaxDynamicServers, "max-dynamic-servers", int(util.EnvInt("ROUTER_MAX_DYNAMIC_SERVERS", 5, 1)), "Specifies the maximum number of dynamic servers added to a route for use by the router specific dynamic configuration manager.") } type RouterStats struct { @@ -212,6 +238,10 @@ func (o *TemplateRouterOptions) Complete() error { return fmt.Errorf("invalid reload interval: %v - must be a positive duration", nsecs) } + if nsecs := int(o.CommitInterval.Seconds()); nsecs < 1 { + return fmt.Errorf("invalid dynamic configuration manager commit interval: %v - must be a positive duration", nsecs) + } + return o.RouterSelection.Complete() } @@ -384,6 +414,46 @@ func (o *TemplateRouterOptions) Run() error { reloadCallbacks = append(reloadCallbacks, collector.CollectNow) } + kc, err := o.Config.Clients() + if err != nil { + return err + } + config, _, err := o.Config.KubeConfig() + if err != nil { + return err + } + routeclient, err := routeinternalclientset.NewForConfig(config) + if err != nil { + return err + } + projectclient, err := projectinternalclientset.NewForConfig(config) + if err != nil { + return err + } + + var cfgManager templateplugin.ConfigManager + if o.ConfigManagerName == "haproxy-manager" { + blueprintRoutes, err := o.blueprintRoutes(routeclient) + if err != nil { + return err + } + + uri := o.ConfigManagerConnectionInfo + if len(o.ConfigManagerConnectionInfo) == 0 { + uri = "unix:///var/lib/haproxy/run/haproxy.sock" + } + + cmopts := templateplugin.ConfigManagerOptions{ + ConnectionInfo: uri, + CommitInterval: o.CommitInterval, + BlueprintRoutes: blueprintRoutes, + BlueprintRoutePoolSize: o.BlueprintRoutePoolSize, + DynamicServerPrefix: o.DynamicServerPrefix, + MaxDynamicServers: o.MaxDynamicServers, + } + cfgManager = haproxyconfigmanager.NewHAProxyConfigManager(cmopts) + } + pluginCfg := templateplugin.TemplatePluginConfig{ WorkingDir: o.WorkingDir, TemplatePath: o.TemplateFile, @@ -404,23 +474,7 @@ func (o *TemplateRouterOptions) Run() error { MaxConnections: o.MaxConnections, Ciphers: o.Ciphers, StrictSNI: o.StrictSNI, - } - - kc, err := o.Config.Clients() - if err != nil { - return err - } - config, _, err := o.Config.KubeConfig() - if err != nil { - return err - } - routeclient, err := routeinternalclientset.NewForConfig(config) - if err != nil { - return err - } - projectclient, err := projectinternalclientset.NewForConfig(config) - if err != nil { - return err + DynamicConfigManager: cfgManager, } svcFetcher := templateplugin.NewListWatchServiceLookup(kc.Core(), o.ResyncInterval, o.Namespace) @@ -456,7 +510,36 @@ func (o *TemplateRouterOptions) Run() error { controller := factory.Create(plugin, false) controller.Run() + if blueprintPlugin != nil { + // f is like factory but filters the routes based on the + // blueprint route namespace and label selector (if any). + f := o.RouterSelection.NewFactory(routeclient, projectclient.Project().Projects(), kc) + f.LabelSelector = o.BlueprintRouteLabelSelector + f.Namespace = o.BlueprintRouteNamespace + f.ResyncInterval = o.ResyncInterval + c := f.Create(blueprintPlugin, false) + c.Run() + } + proc.StartReaper() select {} } + +// blueprintRoutes returns all the routes in the blueprint namespace. +func (o *TemplateRouterOptions) blueprintRoutes(routeclient *routeinternalclientset.Clientset) ([]*routeapi.Route, error) { + blueprints := make([]*routeapi.Route, 0) + if len(o.BlueprintRouteNamespace) == 0 { + return blueprints, nil + } + + routeList, err := routeclient.Route().Routes(o.BlueprintRouteNamespace).List(metav1.ListOptions{}) + if err != nil { + return blueprints, err + } + for _, r := range routeList.Items { + blueprints = append(blueprints, r.DeepCopy()) + } + + return blueprints, nil +} diff --git a/pkg/oc/admin/router/router.go b/pkg/oc/admin/router/router.go index 1f33b09a1134..78183f9886ea 100644 --- a/pkg/oc/admin/router/router.go +++ b/pkg/oc/admin/router/router.go @@ -806,11 +806,15 @@ func RunCmdRouter(f kcmdutil.Factory, cmd *cobra.Command, out, errout io.Writer, env["ROUTER_CANONICAL_HOSTNAME"] = cfg.RouterCanonicalHostname } // automatically start the internal metrics agent if we are handling a known type - if cfg.Type == "haproxy-router" && cfg.StatsPort != 0 { - env["ROUTER_LISTEN_ADDR"] = fmt.Sprintf("0.0.0.0:%d", cfg.StatsPort) - env["ROUTER_METRICS_TYPE"] = "haproxy" - env["ROUTER_METRICS_TLS_CERT_FILE"] = "/etc/pki/tls/metrics/tls.crt" - env["ROUTER_METRICS_TLS_KEY_FILE"] = "/etc/pki/tls/metrics/tls.key" + if cfg.Type == "haproxy-router" { + env["ROUTER_CONFIG_MANAGER"] = "haproxy-manager" + env["ROUTER_DYNAMIC_SERVER_PREFIX"] = "_dynamic" + if cfg.StatsPort != 0 { + env["ROUTER_LISTEN_ADDR"] = fmt.Sprintf("0.0.0.0:%d", cfg.StatsPort) + env["ROUTER_METRICS_TYPE"] = "haproxy" + env["ROUTER_METRICS_TLS_CERT_FILE"] = "/etc/pki/tls/metrics/tls.crt" + env["ROUTER_METRICS_TLS_KEY_FILE"] = "/etc/pki/tls/metrics/tls.key" + } } mtlsAuth := strings.TrimSpace(cfg.MutualTLSAuth) if len(mtlsAuth) > 0 && mtlsAuth != defaultMutualTLSAuth { diff --git a/pkg/router/template/configmanager/haproxy/backend.go b/pkg/router/template/configmanager/haproxy/backend.go new file mode 100644 index 000000000000..011926db0d8f --- /dev/null +++ b/pkg/router/template/configmanager/haproxy/backend.go @@ -0,0 +1,403 @@ +package haproxy + +import ( + "bytes" + "fmt" + "strconv" + "strings" + + "github.com/golang/glog" +) + +// BackendServerState indicates the state for a haproxy backend server. +type BackendServerState string + +const ( + // BackendServerStateReady indicates a server is ready. + BackendServerStateReady BackendServerState = "ready" + + // BackendServerStateDrain indicates a server is ready but draining. + BackendServerStateDrain BackendServerState = "drain" + + // BackendServerStateDown indicates a server is down. + BackendServerStateDown BackendServerState = "down" + + // BackendServerStateMaint indicates a server is under maintainence. + BackendServerStateMaint BackendServerState = "maint" + + // ListBackendsCommand is the command to get a list of all backends. + ListBackendsCommand = "show backend" + + // GetServersStateCommand gets the state of all servers. This can be + // optionally filtered by backends by passing a backend name. + GetServersStateCommand = "show servers state" + + // SetServerCommand sets server specific information and state. + SetServerCommand = "set server" + + // showBackendHeader is the haproxy backend list csv output header. + showBackendHeader = "name" + + // serverStateHeader is the haproxy server state csv output header. + serversStateHeader = "be_id be_name srv_id srv_name srv_addr srv_op_state srv_admin_state srv_uweight srv_iweight srv_time_since_last_change srv_check_status srv_check_result srv_check_health srv_check_state srv_agent_state bk_f_forced_id srv_f_forced_id srv_fqdn srv_port" +) + +// backendEntry is an entry in the list of backends returned from haproxy. +type backendEntry struct { + Name string `csv:"name"` +} + +// serverStateInfo represents the state of a specific backend server. +type serverStateInfo struct { + BackendID string `csv:"be_id"` + BackendName string `csv:"be_name"` + ID string `csv:"srv_id"` + Name string `csv:"srv_name"` + IPAddress string `csv:"srv_addr"` + + OperationalState int32 `csv:"srv_op_state"` + AdministrativeState int32 `csv:"srv_admin_state"` + UserVisibleWeight int32 `csv:"srv_uweight"` + InitialWeight int32 `csv:"srv_iweight"` + + TimeSinceLastChange int `csv:"srv_time_since_last_change"` + LastHealthCheckStatus int `csv:"srv_check_status"` + LastHealthCheckResult int `csv:"srv_check_result"` + CheckHealth int `csv:"srv_check_health"` + CheckHealthState int `csv:"srv_check_state"` + AgentCheckState int `csv:"srv_agent_state"` + + BackendIDForced int `csv:"bk_f_forced_id"` + IDForced int `csv:"srv_f_forced_id"` + + FQDN string `csv:"srv_fqdn"` + Port int `csv:"srv_port"` +} + +// BackendServerInfo represents a server [endpoint] for a haproxy backend. +type BackendServerInfo struct { + Name string + FQDN string + IPAddress string + Port int + CurrentWeight int32 + InitialWeight int32 + State BackendServerState +} + +// Backend represents a specific haproxy backend. +type Backend struct { + name string + servers map[string]*backendServer + + client *Client +} + +// backendServer is internally used for managing a haproxy backend server. +type backendServer struct { + BackendServerInfo + + updatedIPAddress string + updatedPort int + updatedWeight string // as it can be a percentage. + updatedState BackendServerState +} + +// GetHAProxyBackends returns a list of haproxy backends. +func GetHAProxyBackends(c *Client) ([]*Backend, error) { + entries := []*backendEntry{} + converter := NewCSVConverter(showBackendHeader, &entries, nil) + _, err := c.RunCommand(ListBackendsCommand, converter) + if err != nil { + return []*Backend{}, err + } + + backends := make([]*Backend, len(entries)) + for k, v := range entries { + backends[k] = newBackend(v.Name, c) + } + + return backends, nil +} + +// newBackend returns a new Backend representing a haproxy backend. +func newBackend(name string, c *Client) *Backend { + return &Backend{ + name: name, + servers: make(map[string]*backendServer), + client: c, + } +} + +// Name returns the name of this haproxy backend. +func (b *Backend) Name() string { + return b.name +} + +// Reset resets the cached server info in this haproxy backend. +func (b *Backend) Reset() { + b.servers = make(map[string]*backendServer) +} + +// Refresh refreshs our internal state for this haproxy backend. +func (b *Backend) Refresh() error { + entries := []*serverStateInfo{} + converter := NewCSVConverter(serversStateHeader, &entries, stripVersionNumber) + cmd := fmt.Sprintf("%s %s", GetServersStateCommand, b.Name()) + _, err := b.client.RunCommand(cmd, converter) + if err != nil { + return err + } + + b.servers = make(map[string]*backendServer) + for _, v := range entries { + info := BackendServerInfo{ + Name: v.Name, + IPAddress: v.IPAddress, + Port: v.Port, + FQDN: v.FQDN, + CurrentWeight: v.UserVisibleWeight, + InitialWeight: v.InitialWeight, + State: getManagedServerState(v), + } + + b.servers[v.Name] = newBackendServer(info) + } + + return nil +} + +// Disable stops serving traffic for all servers for a haproxy backend. +func (b *Backend) Disable() error { + if _, err := b.Servers(); err != nil { + return err + } + + for _, s := range b.servers { + if err := b.DisableServer(s.Name); err != nil { + return err + } + } + + return nil +} + +// EnableServer enables serving traffic with a haproxy backend server. +func (b *Backend) EnableServer(name string) error { + glog.V(4).Infof("Enabling server %s with ready state", name) + return b.UpdateServerState(name, BackendServerStateReady) +} + +// DisableServer stops serving traffic for a haproxy backend server. +func (b *Backend) DisableServer(name string) error { + glog.V(4).Infof("Disabling server %s with maint state", name) + return b.UpdateServerState(name, BackendServerStateMaint) +} + +// Commit commits all the pending changes made to a haproxy backend. +func (b *Backend) Commit() error { + for _, s := range b.servers { + if err := s.ApplyChanges(b.name, b.client); err != nil { + return err + } + } + + b.Reset() + return nil +} + +// Servers returns the servers for this haproxy backend. +func (b *Backend) Servers() ([]BackendServerInfo, error) { + if len(b.servers) == 0 { + if err := b.Refresh(); err != nil { + return []BackendServerInfo{}, err + } + } + + serverInfo := make([]BackendServerInfo, len(b.servers)) + i := 0 + for _, s := range b.servers { + serverInfo[i] = s.BackendServerInfo + i++ + } + + return serverInfo, nil +} + +// UpdateServerInfo updates the information for a haproxy backend server. +func (b *Backend) UpdateServerInfo(id, ipaddr, port string, weight int32, relativeWeight bool) error { + server, err := b.FindServer(id) + if err != nil { + return err + } + + if len(ipaddr) > 0 { + server.updatedIPAddress = ipaddr + } + if n, err := strconv.Atoi(port); err == nil && n > 0 { + server.updatedPort = n + } + if weight > -1 { + suffix := "" + if relativeWeight { + suffix = "%" + } + server.updatedWeight = fmt.Sprintf("%v%s", weight, suffix) + } + + return nil +} + +// UpdateServerState specifies what should be the state of a haproxy backend +// server when all the changes made to the backend committed. +func (b *Backend) UpdateServerState(id string, state BackendServerState) error { + server, err := b.FindServer(id) + if err != nil { + return err + } + + server.updatedState = state + return nil +} + +// FindServer returns a specific haproxy backend server if found. +func (b *Backend) FindServer(id string) (*backendServer, error) { + if _, err := b.Servers(); err != nil { + return nil, err + } + + if s, ok := b.servers[id]; ok { + return s, nil + } + + return nil, fmt.Errorf("no server found for id: %s", id) +} + +// newBackendServer returns a BackendServer representing a haproxy backend server. +func newBackendServer(info BackendServerInfo) *backendServer { + return &backendServer{ + BackendServerInfo: info, + + updatedIPAddress: info.IPAddress, + updatedPort: info.Port, + updatedWeight: strconv.Itoa(int(info.CurrentWeight)), + updatedState: info.State, + } +} + +// ApplyChanges applies all the local backend server changes. +func (s *backendServer) ApplyChanges(backendName string, client *Client) error { + // Build the haproxy dynamic config API commands. + commands := []string{} + + cmdPrefix := fmt.Sprintf("%s %s/%s", SetServerCommand, backendName, s.Name) + + if s.updatedIPAddress != s.IPAddress || s.updatedPort != s.Port { + cmd := fmt.Sprintf("%s addr %s", cmdPrefix, s.updatedIPAddress) + if s.updatedPort != s.Port { + cmd = fmt.Sprintf("%s port %v", cmd, s.updatedPort) + } + commands = append(commands, cmd) + } + + if s.updatedWeight != strconv.Itoa(int(s.CurrentWeight)) { + // Build and execute the haproxy dynamic config API command. + cmd := fmt.Sprintf("%s weight %s", cmdPrefix, s.updatedWeight) + commands = append(commands, cmd) + } + + state := string(s.updatedState) + if s.updatedState == BackendServerStateDown { + // BackendServerStateDown for a server can't be set! + state = "" + } + + if len(state) > 0 && s.updatedState != s.State { + cmd := fmt.Sprintf("%s state %s", cmdPrefix, state) + commands = append(commands, cmd) + } + + // Execute all the commands. + for _, cmd := range commands { + if err := s.executeCommand(cmd, client); err != nil { + return err + } + } + + return nil +} + +// executeCommand runs a server change command and handles the response. +func (s *backendServer) executeCommand(cmd string, client *Client) error { + responseBytes, err := client.Execute(cmd) + if err != nil { + return err + } + + response := strings.TrimSpace(string(responseBytes)) + if len(response) == 0 { + return nil + } + + okPrefixes := []string{"IP changed from", "no need to change"} + for _, prefix := range okPrefixes { + if strings.HasPrefix(response, prefix) { + return nil + } + } + + return fmt.Errorf("setting server info with %s : %s", cmd, response) +} + +// stripVersionNumber strips off the first line if it is a version number. +func stripVersionNumber(data []byte) ([]byte, error) { + // The first line contains the version number, so we need to strip + // that off in order to use the CSV converter. + // Example: + // > show servers state be_sni + // 1 + // # be_id be_name srv_id srv_name ... srv_fqdn srv_port + // 4 be_sni 1 fe_sni 127.0.0.1 2 0 1 1 46518 1 0 2 0 0 0 0 - 10444 + // + idx := bytes.Index(data, []byte("\n")) + if idx > -1 { + version := string(data[:idx]) + if _, err := strconv.ParseInt(version, 10, 0); err == nil { + if idx+1 < len(data) { + return data[idx+1:], nil + } + } + } + + return data, nil +} + +// getManagedServerState returns the "managed" state for a backend server. +func getManagedServerState(s *serverStateInfo) BackendServerState { + if (s.AdministrativeState & 0x01) == 0x01 { + return BackendServerStateMaint + } + if (s.AdministrativeState & 0x08) == 0x08 { + return BackendServerStateDrain + } + + if s.OperationalState == 0 { + maintainenceMasks := []int32{0x01, 0x02, 0x04, 0x20} + for _, m := range maintainenceMasks { + if (s.AdministrativeState & m) == m { + return BackendServerStateMaint + } + } + + drainingMasks := []int32{0x08, 0x10} + for _, m := range drainingMasks { + if (s.AdministrativeState & m) == m { + return BackendServerStateDrain + } + } + + return BackendServerStateDown + } + + return BackendServerStateReady +} diff --git a/pkg/router/template/configmanager/haproxy/client.go b/pkg/router/template/configmanager/haproxy/client.go new file mode 100644 index 000000000000..639cfc7dabfc --- /dev/null +++ b/pkg/router/template/configmanager/haproxy/client.go @@ -0,0 +1,204 @@ +package haproxy + +import ( + "bytes" + "fmt" + "strings" + "time" + + haproxy "github.com/bcicen/go-haproxy" + "github.com/golang/glog" +) + +const ( + // Prefix for the socket file used for haproxy dynamic API commands. + afUnixSocketPrefix = "unix://" + + // Prefix if TCP is used to communicate with haproxy. + tcpSocketPrefix = "tcp://" + + // maxRetries is the number of times a command is retried. + maxRetries = 3 +) + +// Client is a client used to dynamically configure haproxy. +type Client struct { + socketAddress string + timeout int + + backends []*Backend + maps map[string]*HAProxyMap +} + +// NewClient returns a client used to dynamically change the haproxy config. +func NewClient(socketName string, timeout int) *Client { + sockAddr := socketName + if !strings.HasPrefix(sockAddr, afUnixSocketPrefix) && !strings.HasPrefix(sockAddr, tcpSocketPrefix) { + sockAddr = fmt.Sprintf("%s%s", afUnixSocketPrefix, sockAddr) + } + + return &Client{ + socketAddress: sockAddr, + timeout: timeout, + backends: make([]*Backend, 0), + maps: make(map[string]*HAProxyMap), + } +} + +// RunCommand executes a haproxy dynamic config API command and if present +// converts the response as desired. +func (c *Client) RunCommand(cmd string, converter Converter) ([]byte, error) { + glog.V(4).Infof("Running haproxy command: %q ...", cmd) + buffer, err := c.runCommandWithRetries(cmd, maxRetries) + if err != nil { + glog.Warningf("haproxy dynamic config API command %s failed with error: %v", cmd, err) + return nil, err + } + + response := buffer.Bytes() + glog.V(4).Infof("haproxy command response: %q", string(response)) + if converter == nil { + return response, nil + } + + return converter.Convert(response) +} + +// Execute runs a haproxy dynamic config API command. +func (c *Client) Execute(cmd string) ([]byte, error) { + return c.RunCommand(cmd, nil) +} + +// Reset resets any changes and clears the backends and maps. +func (c *Client) Reset() { + c.backends = make([]*Backend, 0) + c.maps = make(map[string]*HAProxyMap) +} + +// Commit flushes out any pending changes on all the backends and maps. +func (c *Client) Commit() error { + for _, b := range c.backends { + if err := b.Commit(); err != nil { + return err + } + } + + for _, m := range c.maps { + if err := m.Commit(); err != nil { + return err + } + } + + return nil +} + +// Backends returns the list of configured haproxy backends. +func (c *Client) Backends() ([]*Backend, error) { + if len(c.backends) == 0 { + if backends, err := GetHAProxyBackends(c); err != nil { + return nil, err + } else { + c.backends = backends + } + } + + return c.backends, nil +} + +// FindBackend returns a specific haproxy backend if it is configured. +func (c *Client) FindBackend(id string) (*Backend, error) { + if _, err := c.Backends(); err != nil { + return nil, err + } + + for _, b := range c.backends { + if b.Name() == id { + return b, nil + } + } + + return nil, fmt.Errorf("no backend found for id: %s", id) +} + +// Maps returns the list of configured haproxy maps. +func (c *Client) Maps() ([]*HAProxyMap, error) { + if len(c.maps) == 0 { + hapMaps, err := GetHAProxyMaps(c) + if err != nil { + return nil, err + } + + for _, v := range hapMaps { + c.maps[v.Name()] = v + } + + return hapMaps, nil + } + + mapList := make([]*HAProxyMap, len(c.maps)) + i := 0 + for _, v := range c.maps { + mapList[i] = v + i++ + } + + return mapList, nil +} + +// FindMap returns a populated haproxy map. +func (c *Client) FindMap(name string) (*HAProxyMap, error) { + if _, err := c.Maps(); err != nil { + return nil, err + } + + if m, ok := c.maps[name]; ok { + return m, m.Refresh() + } + + return nil, fmt.Errorf("no map found for name: %s", name) +} + +// runCommandWithRetries retries a haproxy command upto the retry limit +// if the error for the command is a retryable error. +func (c *Client) runCommandWithRetries(cmd string, limit int) (*bytes.Buffer, error) { + retryAttempt := 0 + for { + client := &haproxy.HAProxyClient{ + Addr: c.socketAddress, + Timeout: c.timeout, + } + buffer, err := client.RunCommand(cmd) + if err == nil || !isRetryable(err, cmd) { + return buffer, err + } + + retryAttempt++ + if retryAttempt > limit { + return buffer, err + } + + msecs := retryAttempt * 10 + if msecs > 60 { + msecs = 60 + } + time.Sleep(time.Duration(msecs) * time.Millisecond) + glog.V(4).Infof("retry #%d: cmd: %q, err was %v", retryAttempt, cmd, err) + } +} + +// isRetryable checks if a haproxy command can be retried. +func isRetryable(err error, cmd string) bool { + retryableErrors := []string{ + "connection reset by peer", + "connection refused", + } + + s := err.Error() + for _, v := range retryableErrors { + if strings.HasSuffix(s, v) { + return true + } + } + + return false +} diff --git a/pkg/router/template/configmanager/haproxy/client_test.go b/pkg/router/template/configmanager/haproxy/client_test.go new file mode 100644 index 000000000000..9098dbad3a57 --- /dev/null +++ b/pkg/router/template/configmanager/haproxy/client_test.go @@ -0,0 +1,609 @@ +package haproxy + +import ( + "testing" +) + +type infoEntry struct { + Name string `csv:"name"` + Value string `csv:"value"` +} + +// TestNewClient tests a new client. +func TestNewClient(t *testing.T) { + testCases := []struct { + name string + sockFile string + }{ + { + name: "empty sockfile", + sockFile: "", + }, + { + name: "some sockfile", + sockFile: "/tmp/some-fake-haproxy.sock", + }, + { + name: "bad socketfile", + sockFile: "/non-existent/fake-haproxy.sock", + }, + } + for _, tc := range testCases { + if client := NewClient(tc.sockFile, 0); client == nil { + t.Errorf("TestNewClient test case %s failed. Unexpected error", tc.name) + } + } +} + +// TestClientRunCommand tests client command execution. +func TestClientRunCommand(t *testing.T) { + server := startFakeServerForTest(t) + defer server.Stop() + + testCases := []struct { + name string + sockFile string + failureExpected bool + }{ + { + name: "empty sockfile", + sockFile: "", + failureExpected: true, + }, + { + name: "bad socketfile", + sockFile: "/non-existent/fake-haproxy.sock", + failureExpected: true, + }, + { + name: "valid sockfile", + sockFile: server.SocketFile(), + failureExpected: false, + }, + } + + for _, tc := range testCases { + client := NewClient(tc.sockFile, 1) + response, err := client.RunCommand("show info", nil) + if tc.failureExpected && err == nil { + t.Errorf("TestClientRunCommand test case %s expected a failure but got none, response=%s", + tc.name, string(response)) + } + if !tc.failureExpected && err != nil { + t.Errorf("TestClientRunCommand test case %s expected no failure but got one: %v", tc.name, err) + } + } +} + +// TestClientRunInfoCommandConverter tests client show info command execution with a converter. +func TestClientRunInfoCommandConverter(t *testing.T) { + testCases := []struct { + name string + command string + header string + converter ByteConverterFunc + failureExpected bool + }{ + { + name: "info parser", + command: "show info", + header: "name value", + converter: nil, + failureExpected: false, + }, + { + name: "info parser with comment header", + command: "show info", + header: "#name value", + converter: nil, + failureExpected: false, + }, + { + name: "info parser with bad header", + command: "show info", + header: "# name value extra1 extra2", + converter: nil, + failureExpected: true, + }, + { + name: "info parser with empty header", + command: "show info", + header: "", + converter: nil, + failureExpected: false, + }, + { + name: "bad command with header", + command: "bad command", + header: "field1 field2 field3", + converter: nil, + failureExpected: true, + }, + } + + server := startFakeServerForTest(t) + defer server.Stop() + + for _, tc := range testCases { + client := NewClient(server.SocketFile(), 1) + entries := []*infoEntry{} + csvcon := NewCSVConverter(tc.header, &entries, tc.converter) + response, err := client.RunCommand(tc.command, csvcon) + if tc.failureExpected && err == nil { + t.Errorf("TestClientRunInfoCommandConverter test case %s expected a failure but got none, response=%s", + tc.name, string(response)) + } + if !tc.failureExpected && err != nil { + t.Errorf("TestClientRunInfoCommandConverter test case %s expected no failure but got one: %v", tc.name, err) + } + } +} + +// TestClientRunBackendCommandConverter tests client show backend command execution with a converter. +func TestClientRunBackendCommandConverter(t *testing.T) { + testCases := []struct { + name string + command string + header string + converter ByteConverterFunc + failureExpected bool + }{ + { + name: "show backend command", + command: "show backend", + header: "name", + converter: nil, + failureExpected: false, + }, + } + + server := startFakeServerForTest(t) + defer server.Stop() + + for _, tc := range testCases { + client := NewClient(server.SocketFile(), 1) + entries := []*backendEntry{} + csvcon := NewCSVConverter(tc.header, &entries, tc.converter) + response, err := client.RunCommand(tc.command, csvcon) + if tc.failureExpected && err == nil { + t.Errorf("TestClientRunBackendCommandConverter test case %s expected a failure but got none, response=%s", + tc.name, string(response)) + } + if !tc.failureExpected && err != nil { + t.Errorf("TestClientRunBackendCommandConverter test case %s expected no failure but got one: %v", tc.name, err) + } + } +} + +// TestClientRunMapCommandConverter tests client show map command execution with a converter. +func TestClientRunMapCommandConverter(t *testing.T) { + testCases := []struct { + name string + command string + header string + converter ByteConverterFunc + failureExpected bool + }{ + { + name: "show map command", + command: "show map", + header: "id (file) description", + converter: fixupMapListOutput, + failureExpected: false, + }, + { + name: "show map command with no converter", + command: "show map", + header: "id (file) description", + converter: nil, + failureExpected: true, + }, + } + + server := startFakeServerForTest(t) + defer server.Stop() + + for _, tc := range testCases { + client := NewClient(server.SocketFile(), 1) + entries := []*mapListEntry{} + csvcon := NewCSVConverter(tc.header, &entries, tc.converter) + response, err := client.RunCommand(tc.command, csvcon) + if tc.failureExpected && err == nil { + t.Errorf("TestClientRunMapCommandConverter test case %s expected a failure but got none, response=%s", + tc.name, string(response)) + } + if !tc.failureExpected && err != nil { + t.Errorf("TestClientRunMapCommandConverter test case %s expected no failure but got one: %v", tc.name, err) + } + } +} + +// TestClientRunServerCommandConverter tests client show servers state command execution with a converter. +func TestClientRunServerCommandConverter(t *testing.T) { + testCases := []struct { + name string + command string + header string + converter ByteConverterFunc + failureExpected bool + }{ + { + name: "show servers state command", + command: "show servers state be_edge_http:default:example-route", + header: serversStateHeader, + converter: stripVersionNumber, + failureExpected: false, + }, + { + name: "show servers state command without a converter", + command: "show servers state be_edge_http:default:example-route", + header: serversStateHeader, + converter: nil, + failureExpected: true, + }, + } + + server := startFakeServerForTest(t) + defer server.Stop() + + for _, tc := range testCases { + client := NewClient(server.SocketFile(), 1) + entries := []*serverStateInfo{} + csvcon := NewCSVConverter(tc.header, &entries, tc.converter) + response, err := client.RunCommand(tc.command, csvcon) + if tc.failureExpected && err == nil { + t.Errorf("TestClientRunServerCommandConverter test case %s expected a failure but got none, response=%s", + tc.name, string(response)) + } + if !tc.failureExpected && err != nil { + t.Errorf("TestClientRunServerCommandConverter test case %s expected no failure but got one: %v", tc.name, err) + } + } +} + +// TestClientExecute tests client command execution. +func TestClientExecute(t *testing.T) { + testCases := []struct { + name string + command string + failureExpected bool + }{ + { + name: "info command", + command: "show info", + failureExpected: false, + }, + { + name: "bad command", + command: "bad command here", + failureExpected: true, + }, + { + name: "show backend command", + command: "show backend", + failureExpected: false, + }, + { + name: "show map command", + command: "show map", + failureExpected: false, + }, + { + name: "show servers state command", + command: "show servers state be_edge_http:default:example-route", + failureExpected: false, + }, + { + name: "set server ipaddr command", + command: "set server be_edge_http:default:example-route/_dynamic-pod-1 ipaddr 1.2.3.4", + failureExpected: false, + }, + { + name: "set server ipaddr and port command", + command: "set server be_edge_http:default:example-route/_dynamic-pod-1 ipaddr 1.2.3.4 port 8080", + failureExpected: false, + }, + { + name: "set server weight command", + command: "set server be_edge_http:default:example-route/_dynamic-pod-1 weight 256", + failureExpected: false, + }, + { + name: "set server state command", + command: "set server be_edge_http:default:example-route/_dynamic-pod-1 state maint", + failureExpected: false, + }, + } + + server := startFakeServerForTest(t) + defer server.Stop() + + for _, tc := range testCases { + client := NewClient(server.SocketFile(), 1) + response, err := client.Execute(tc.command) + if tc.failureExpected && err == nil { + t.Errorf("TestClientExecute test case %s expected a failure but got none, response=%s", + tc.name, string(response)) + } + if !tc.failureExpected && err != nil { + t.Errorf("TestClientExecute test case %s expected no failure but got one: %v", tc.name, err) + } + } +} + +// TestClientReset tests client state reset. +func TestClientReset(t *testing.T) { + server := startFakeServerForTest(t) + defer server.Stop() + + client := NewClient(server.SocketFile(), 1) + if _, err := client.Backends(); err != nil { + t.Errorf("TestClientReset error getting backends: %v", err) + } + if _, err := client.Maps(); err != nil { + t.Errorf("TestClientReset error getting maps: %v", err) + } + + server.Reset() + commands := server.Commands() + if len(commands) != 0 { + t.Errorf("TestClientReset error resetting server commands=%+v", commands) + } + + client.Reset() + if _, err := client.Backends(); err != nil { + t.Errorf("TestClientReset error getting backends: %v", err) + } + commands = server.Commands() + if len(commands) == 0 { + t.Errorf("TestClientReset after reset no server command found, where one was expected") + } + + server.Reset() + commands = server.Commands() + if len(commands) != 0 { + t.Errorf("TestClientReset error resetting server commands=%+v", commands) + } + + client.Reset() + client.FindBackend("foo") + commands = server.Commands() + if len(commands) == 0 { + t.Errorf("TestClientReset after reset no server command found, where one was expected") + } +} + +// TestClientCommit tests client state commit. +func TestClientCommit(t *testing.T) { + server := startFakeServerForTest(t) + defer server.Stop() + + client := NewClient(server.SocketFile(), 1) + backends, err := client.Backends() + if err != nil { + t.Errorf("TestClientCommit error getting backends: %v", err) + } + maps, err := client.Maps() + if err != nil { + t.Errorf("TestClientCommit error getting maps: %v", err) + } + + server.Reset() + for _, m := range maps { + m.Add("key", "value", true) + } + if len(server.Commands()) == 0 { + t.Errorf("TestClientCommit no commands found after reset and adding to maps") + } + + skipNames := map[string]bool{ + "be_sni": true, + "be_no_sni": true, + "openshift_default": true, + } + + server.Reset() + for _, be := range backends { + if _, ok := skipNames[be.Name()]; ok { + continue + } + + serverName := "_dynamic-pod-1" + if err := be.UpdateServerState(serverName, BackendServerStateMaint); err != nil { + t.Errorf("TestClientCommit error setting state to maint for backend %s, server %s: %v", + be.Name(), serverName, err) + } + } + client.Commit() + if len(server.Commands()) == 0 { + t.Errorf("TestClientCommit no commands found after reset and commit") + } + + server.Reset() + for _, be := range backends { + if _, ok := skipNames[be.Name()]; ok { + continue + } + + serverName := "invalid-pod-not-found-name" + if err := be.UpdateServerState(serverName, BackendServerStateMaint); err == nil { + t.Errorf("TestClientCommit error setting state to maint for backend %s, server %s, expected an error but got none", + be.Name(), serverName) + } + } + client.Commit() + if len(server.Commands()) == 0 { + t.Errorf("TestClientCommit no commands found after second reset and commit") + } +} + +// TestClientBackends tests client backends. +func TestClientBackends(t *testing.T) { + server := startFakeServerForTest(t) + defer server.Stop() + + client := NewClient(server.SocketFile(), 1) + backends, err := client.Backends() + if err != nil { + t.Errorf("TestClientBackends error getting backends: %v", err) + } + if len(backends) == 0 { + t.Errorf("TestClientBackends got no backends") + } +} + +// TestClientFindBackend tests client find a specific backend. +func TestClientFindBackend(t *testing.T) { + testCases := []struct { + name string + backendName string + failureExpected bool + }{ + { + name: "non-existent backend", + backendName: "be_this_does_not_exist", + failureExpected: true, + }, + { + name: "existing backend", + backendName: "be_edge_http:default:example-route", + failureExpected: false, + }, + { + name: "existing http backend", + backendName: "be_edge_http:default:test-http-allow", + failureExpected: false, + }, + { + name: "existing edge backend", + backendName: "be_edge_http:default:test-https", + failureExpected: false, + }, + { + name: "existing passthrough backend", + backendName: "be_tcp:default:test-passthrough", + failureExpected: false, + }, + { + name: "existing reencrypt backend", + backendName: "be_secure:default:test-reencrypt", + failureExpected: false, + }, + { + name: "existing wildcard backend", + backendName: "be_edge_http:default:wildcard-redirect-to-https", + failureExpected: false, + }, + { + name: "bad backend name typo", + backendName: "be_secure:default:test-reencrypt-1234", + failureExpected: true, + }, + } + + server := startFakeServerForTest(t) + defer server.Stop() + + for _, tc := range testCases { + client := NewClient(server.SocketFile(), 1) + backend, err := client.FindBackend(tc.backendName) + if tc.failureExpected { + if err == nil { + t.Errorf("TestClientFindBackend test case %s expected an error and got none", tc.name) + } + if backend != nil { + t.Errorf("TestClientFindBackend test case %s expected an error and got a valid backend: %+v", tc.name, backend) + } + } else { + if err != nil { + t.Errorf("TestClientFindBackend test case %s expected no error and got: %v", tc.name, err) + } + if backend == nil { + t.Errorf("TestClientFindBackend test case %s expected a backend and got none", tc.name) + } + } + } +} + +// TestClientMaps tests client haproxy maps. +func TestClientMaps(t *testing.T) { + server := startFakeServerForTest(t) + defer server.Stop() + + client := NewClient(server.SocketFile(), 1) + maps, err := client.Maps() + if err != nil { + t.Errorf("TestClientMaps error getting maps: %v", err) + } + if len(maps) == 0 { + t.Errorf("TestClientMaps got no maps") + } +} + +// TestClientFindMap tests client find a specific haproxy map. +func TestClientFindMap(t *testing.T) { + testCases := []struct { + name string + mapName string + failureExpected bool + }{ + { + name: "non-existent map", + mapName: "/a/b/c/d/e.map", + failureExpected: true, + }, + { + name: "existing redirect map", + mapName: "/var/lib/haproxy/conf/os_route_http_redirect.map", + failureExpected: false, + }, + { + name: "existing sni passthru map", + mapName: "/var/lib/haproxy/conf/os_sni_passthrough.map", + failureExpected: false, + }, + { + name: "existing http be map", + mapName: "/var/lib/haproxy/conf/os_http_be.map", + failureExpected: false, + }, + { + name: "existing tcp be map", + mapName: "/var/lib/haproxy/conf/os_tcp_be.map", + failureExpected: false, + }, + { + name: "existing edge and reencrypt map", + mapName: "/var/lib/haproxy/conf/os_edge_reencrypt_be.map", + failureExpected: false, + }, + { + name: "bad backend name typo", + mapName: "/var/lib/haproxy/conf/os_http_be.map.with.typo", + failureExpected: true, + }, + } + + server := startFakeServerForTest(t) + defer server.Stop() + + for _, tc := range testCases { + client := NewClient(server.SocketFile(), 1) + haproxyMap, err := client.FindMap(tc.mapName) + if tc.failureExpected { + if err == nil { + t.Errorf("TestClientFindMap test case %s expected an error and got none", tc.name) + } + if haproxyMap != nil { + t.Errorf("TestClientFindMap test case %s expected an error and got a valid haproxy map: %+v", tc.name, haproxyMap) + } + } else { + if err != nil { + t.Errorf("TestClientFindMap test case %s expected no error and got: %v", tc.name, err) + } + if haproxyMap == nil { + t.Errorf("TestClientFindMap test case %s expected a haproxy map and got none", tc.name) + } + } + } +} diff --git a/pkg/router/template/configmanager/haproxy/converter.go b/pkg/router/template/configmanager/haproxy/converter.go new file mode 100644 index 000000000000..0e6ac2614699 --- /dev/null +++ b/pkg/router/template/configmanager/haproxy/converter.go @@ -0,0 +1,88 @@ +package haproxy + +import ( + "bytes" + "encoding/csv" + "io" + + "github.com/gocarina/gocsv" + "github.com/golang/glog" +) + +// Converter transforms a set of bytes. The haproxy dynamic API command +// responses are not always csv/compliant. This allows us to inject custom +// converters to make the responses valid csv and parseable. +type Converter interface { + // Convert converts a set of bytes. + Convert(data []byte) ([]byte, error) +} + +// ByteConverterFunc converts bytes! +type ByteConverterFunc func([]byte) ([]byte, error) + +// CSVConverter is used to convert the haproxy dynamic configuration API +// responses into something that is valid CSV and then parse the response +// and unmarshal it into native golang structs. +type CSVConverter struct { + headers []byte + out interface{} + converterFunc ByteConverterFunc +} + +// NewCSVConverter returns a new CSVConverter. +func NewCSVConverter(headers string, out interface{}, fn ByteConverterFunc) CSVConverter { + return CSVConverter{ + headers: []byte(headers), + out: out, + converterFunc: fn, + } +} + +// Convert runs a haproxy dynamic config API command. +func (c CSVConverter) Convert(data []byte) ([]byte, error) { + glog.V(4).Infof("CSV converter input data bytes: %s", string(data)) + if c.converterFunc != nil { + convertedBytes, err := c.converterFunc(data) + if err != nil { + glog.Errorf("CSV converter error: %v", err) + return data, err + } + data = convertedBytes + glog.V(4).Infof("CSV converter transformed data bytes: %s", string(data)) + } + + if c.out == nil { + return data, nil + } + + // Have an output data structure, so use CSV Reader to populate it. + gocsv.SetCSVReader(func(in io.Reader) gocsv.CSVReader { + r := csv.NewReader(in) + // Allow quotes + r.LazyQuotes = true + r.TrimLeadingSpace = true + // Allows use space as delimiter + r.Comma = ' ' + return r + }) + + glog.V(4).Infof("CSV converter fixing up csv header ...") + data, _ = c.fixupHeaders(data) + glog.V(4).Infof("CSV converter fixed up data bytes: %s", string(data)) + return data, gocsv.Unmarshal(bytes.NewBuffer(data), c.out) +} + +// fixupHeaders fixes up haproxy API responses that don't contain any CSV +// header information. This allows us to easily parse the data and marshal +// into an array of native golang structs. +func (c CSVConverter) fixupHeaders(data []byte) ([]byte, error) { + prefix := []byte("#") + if len(c.headers) > 0 && !bytes.HasPrefix(data, prefix) { + // No header, so insert one. + line := bytes.Join([][]byte{prefix, c.headers}, []byte(" ")) + data = bytes.Join([][]byte{line, data}, []byte("\n")) + } + + // strip off '#', as gocsv treats the first line as csv header info. + return bytes.TrimPrefix(data, prefix), nil +} diff --git a/pkg/router/template/configmanager/haproxy/fake_haproxy.go b/pkg/router/template/configmanager/haproxy/fake_haproxy.go new file mode 100644 index 000000000000..2d961cd283e6 --- /dev/null +++ b/pkg/router/template/configmanager/haproxy/fake_haproxy.go @@ -0,0 +1,453 @@ +package haproxy + +import ( + "bytes" + "fmt" + "io/ioutil" + "net" + "os" + "path" + "strings" + "sync" + "testing" + "time" +) + +const ( + haproxyConfigDir = "/var/lib/haproxy/conf" + + serverName = "_dynamic-pod-1" + + onePodAndOneDynamicServerBackendTemplate = `1 +# be_id be_name srv_id srv_name srv_addr srv_op_state srv_admin_state srv_uweight srv_iweight srv_time_since_last_change srv_check_status srv_check_result srv_check_health srv_check_state srv_agent_state bk_f_forced_id srv_f_forced_id srv_fqdn srv_port +9 %s 1 pod:test-1-l8x8w:test-service:172.17.0.3:1234 172.17.0.3 2 4 256 1 8117 6 3 4 6 0 0 0 - 1234 +9 %s 2 _dynamic-pod-1 172.4.0.4 2 4 256 1 8117 6 3 4 6 0 0 0 - 1234 +` +) + +type fakeHAProxyMap map[string]string + +type fakeHAProxy struct { + socketFile string + backendName string + maps map[string]fakeHAProxyMap + backends map[string]string + lock sync.Mutex + shutdown bool + commands []string +} + +func startFakeHAProxyServer(prefix string) (*fakeHAProxy, error) { + f, err := ioutil.TempFile(os.TempDir(), prefix) + if err != nil { + return nil, err + } + + name := f.Name() + os.Remove(name) + server := newFakeHAProxy(name, "") + server.Start() + return server, nil +} + +func startFakeServerForTest(t *testing.T) *fakeHAProxy { + name := fmt.Sprintf("fake-haproxy-%s", t.Name()) + server, err := startFakeHAProxyServer(name) + if err != nil { + t.Errorf("%s error: %v", t.Name(), err) + } + return server +} + +func newFakeHAProxy(sockFile, backendName string) *fakeHAProxy { + if len(backendName) == 0 { + backendName = "be_edge_http:_hapcm_blueprint_pool:_blueprint-edge-route-1" + } + p := &fakeHAProxy{ + socketFile: sockFile, + backendName: backendName, + maps: make(map[string]fakeHAProxyMap, 0), + backends: make(map[string]string, 0), + shutdown: false, + commands: make([]string, 0), + } + p.initialize() + return p +} + +func (p *fakeHAProxy) SocketFile() string { + p.lock.Lock() + defer p.lock.Unlock() + return p.socketFile +} + +func (p *fakeHAProxy) Reset() { + p.lock.Lock() + p.commands = make([]string, 0) + p.lock.Unlock() + p.initialize() +} + +func (p *fakeHAProxy) Commands() []string { + p.lock.Lock() + defer p.lock.Unlock() + return p.commands +} + +func (p *fakeHAProxy) Start() { + started := make(chan bool) + go func() error { + listener, err := net.Listen("unix", p.socketFile) + if err != nil { + return err + } + + started <- true + for { + p.lock.Lock() + shutdown := p.shutdown + p.lock.Unlock() + if shutdown { + return nil + } + conn, err := listener.Accept() + if err != nil { + return err + } + go p.process(conn) + } + }() + + // wait for server to indicate it started up. + <-started +} + +func (p *fakeHAProxy) Stop() { + p.lock.Lock() + p.shutdown = true + sockFile := p.socketFile + p.lock.Unlock() + go func() { + timeout := time.Duration(10) * time.Second + net.DialTimeout("unix", sockFile, timeout) + if len(sockFile) > 0 { + os.Remove(sockFile) + } + }() +} + +func (p *fakeHAProxy) initialize() { + redirectMap := map[string]string{ + `^route\.edge\.test(:[0-9]+)?(/.*)?$`: `0x559a137bb720 ^route\.edge\.test(:[0-9]+)?(/.*)?$ be_edge_http:ns1:edge-redirect-to-https`, + `^redirect\.blueprints\.test(:[0-9]+)?(/.*)?$`: `0x559a137bb7e0 ^redirect\.blueprints\.test(:[0-9]+)?(/.*)?$ be_edge_http:blueprints:blueprint-redirect-to-https`, + } + + passthruMap := map[string]string{ + `^route\.passthrough\.test(:[0-9]+)?(/.*)?$`: `0x559a137bf730 ^route\.passthrough\.test(:[0-9]+)?(/.*)?$ 1`, + } + + httpMap := map[string]string{ + `^route\.allow-http\.test(:[0-9]+)?(/.*)?$`: `0x559a137b4c10 ^route\.allow-http\.test(:[0-9]+)?(/.*)?$ be_edge_http:default:test-http-allow`, + } + + tcpMap := map[string]string{ + `^route\.reencrypt\.test(:[0-9]+)?(/.*)?$`: `0x559a137b4700 ^route\.reencrypt\.test(:[0-9]+)?(/.*)?$ be_secure:default:test-reencrypt`, + `^reencrypt\.blueprints\.org(:[0-9]+)?(/.*)?$`: `0x559a1400f8a0 ^reencrypt\.blueprints\.org(:[0-9]+)?(/.*)?$ be_secure:blueprints:blueprint-reencrypt`, + `^route\.passthrough\.test(:[0-9]+)?(/.*)?$`: `0x559a1400f960 ^route\.passthrough\.test(:[0-9]+)?(/.*)?$ be_tcp:default:test-passthrough`, + } + + edgeReencryptMap := map[string]string{ + `^www\.example2\.com(:[0-9]+)?(/.*)?$`: `0x559a140103e0 ^www\.example2\.com(:[0-9]+)?(/.*)?$ be_edge_http:default:example-route`, + `^something\.edge\.test(:[0-9]+)?(/.*)?$`: `0x559a14010450 ^something\.edge\.test(:[0-9]+)?(/.*)?$ be_edge_http:default:wildcard-redirect-to-https`, + `^route\.reencrypt\.test(:[0-9]+)?(/.*)?$`: `0x559a14010510 ^route\.reencrypt\.test(:[0-9]+)?(/.*)?$ be_secure:default:test-reencrypt`, + `^reencrypt\.blueprints\.org(:[0-9]+)?(/.*)?$`: `0x559a140105c0 ^reencrypt\.blueprints\.org(:[0-9]+)?(/.*)?$ be_secure:blueprints:blueprint-reencrypt`, + `^redirect\.blueprints\.org(:[0-9]+)?(/.*)?$`: `0x559a140109a0 ^route\.edge\.test(:[0-9]+)?(/.*)?$ be_edge_http:default:test-https`, + `^route\.edge\.test(:[0-9]+)?(/.*)?$`: `0x559a140109a0 ^route\.edge\.test(:[0-9]+)?(/.*)?$ be_edge_http:default:test-https`, + } + + mapNames := map[string]fakeHAProxyMap{ + "os_route_http_redirect.map": redirectMap, + "os_sni_passthrough.map": passthruMap, + "os_http_be.map": httpMap, + "os_tcp_be.map": tcpMap, + "os_edge_reencrypt_be.map": edgeReencryptMap, + } + + p.lock.Lock() + defer p.lock.Unlock() + for k, v := range mapNames { + name := path.Join(haproxyConfigDir, k) + p.maps[name] = v + } +} + +func (p *fakeHAProxy) showInfo() string { + return `Name: HAProxy +Version: 1.8.1 +Release_date: 2017/12/03 +Nbproc: 1 +Process_num: 1 +Pid: 84 +Uptime: 0d5h23m33s +Uptime_sec: 19413 +Memmax_MB: 0 +PoolAlloc_MB: 0 +PoolUsed_MB: 0 +PoolFailed: 0 +Ulimit-n: 40260 +Maxsock: 40260 +Maxconn: 20000 +Hard_maxconn: 20000 +CurrConns: 0 +CumConns: 3945 +CumReq: 3947 +MaxSslConns: 0 +CurrSslConns: 0 +CumSslConns: 7765 +Maxpipes: 0 +PipesUsed: 0 +PipesFree: 0 +ConnRate: 0 +ConnRateLimit: 0 +MaxConnRate: 2 +SessRate: 0 +SessRateLimit: 0 +MaxSessRate: 2 +SslRate: 0 +SslRateLimit: 0 +MaxSslRate: 1 +SslFrontendKeyRate: 0 +SslFrontendMaxKeyRate: 1 +SslFrontendSessionReuse_pct: 0 +SslBackendKeyRate: 0 +SslBackendMaxKeyRate: 2 +SslCacheLookups: 0 +SslCacheMisses: 0 +CompressBpsIn: 0 +CompressBpsOut: 0 +CompressBpsRateLim: 0 +ZlibMemUsage: 0 +MaxZlibMemUsage: 0 +Tasks: 278 +Run_queue: 0 +Idle_pct: 100 +node: f27 +` +} + +func (p *fakeHAProxy) listMaps() string { + return `# id (file) description +1 (/var/lib/haproxy/conf/os_route_http_redirect.map) pattern loaded from file '/var/lib/haproxy/conf/os_route_http_redirect.map' used by map at file '/var/lib/haproxy/conf/haproxy.config' line 68 +5 (/var/lib/haproxy/conf/os_sni_passthrough.map) pattern loaded from file '/var/lib/haproxy/conf/os_sni_passthrough.map' used by map at file '/var/lib/haproxy/conf/haproxy.config' line 87 +-1 (/var/lib/haproxy/conf/os_http_be.map) pattern loaded from file '/var/lib/haproxy/conf/os_http_be.map' used by map at file '/var/lib/haproxy/conf/haproxy.config' line 71 +-1 (/var/lib/haproxy/conf/os_tcp_be.map) pattern loaded from file '/var/lib/haproxy/conf/os_tcp_be.map' used by map at file '/var/lib/haproxy/conf/haproxy.config' line 88 +-1 (/var/lib/haproxy/conf/os_edge_reencrypt_be.map) pattern loaded from file '/var/lib/haproxy/conf/os_edge_reencrypt_be.map' used by map at file '/var/lib/haproxy/conf/haproxy.config' line 127, by map at file '/var/lib/haproxy/conf/haproxy.config' line 163 + +` +} + +func (p *fakeHAProxy) showMap(name string) string { + lines := []string{} + p.lock.Lock() + defer p.lock.Unlock() + if m, ok := p.maps[name]; ok { + for _, v := range m { + lines = append(lines, v) + } + } else { + lines = append(lines, "Unknown map identifier. Please use # or .") + lines = append(lines, "") + } + + return strings.Join(lines, "\n") +} + +func (p *fakeHAProxy) addMap(name, k, v string) string { + lines := []string{} + p.lock.Lock() + defer p.lock.Unlock() + if m, ok := p.maps[name]; !ok { + lines = append(lines, "Unknown map identifier. Please use # or .") + lines = append(lines, "") + } else { + m[k] = v + lines = append(lines, "") + } + + return strings.Join(lines, "\n") +} + +func (p *fakeHAProxy) delMap(name, id string) string { + id = strings.Trim(id, "#") + p.lock.Lock() + defer p.lock.Unlock() + if m, ok := p.maps[name]; ok { + matchingKeys := []string{} + for k, v := range m { + if strings.HasPrefix(v, id) { + matchingKeys = append(matchingKeys, k) + } + } + + for _, v := range matchingKeys { + delete(m, v) + } + } + + return fmt.Sprintf("del map %s\n", name) +} + +func (p *fakeHAProxy) listBackends() string { + return `# name +be_sni +be_no_sni +openshift_default +be_edge_http:_hapcm_blueprint_pool:_blueprint-edge-route-1 +be_edge_http:_hapcm_blueprint_pool:_blueprint-edge-route-2 +be_edge_http:_hapcm_blueprint_pool:_blueprint-edge-route-3 +be_http:_hapcm_blueprint_pool:_blueprint-http-route-1 +be_http:_hapcm_blueprint_pool:_blueprint-http-route-2 +be_http:_hapcm_blueprint_pool:_blueprint-http-route-3 +be_tcp:_hapcm_blueprint_pool:_blueprint-passthrough-route-1 +be_tcp:_hapcm_blueprint_pool:_blueprint-passthrough-route-2 +be_tcp:_hapcm_blueprint_pool:_blueprint-passthrough-route-3 +be_edge_http:blueprints:blueprint-redirect-to-https +be_secure:blueprints:blueprint-reencrypt +be_edge_http:default:example-route +be_edge_http:default:test-http-allow +be_edge_http:default:test-https +be_edge_http:default:test-https-only +be_tcp:default:test-passthrough +be_secure:default:test-reencrypt +be_edge_http:default:wildcard-redirect-to-https +` +} + +func (p *fakeHAProxy) showServers(name string) string { + p.lock.Lock() + defer p.lock.Unlock() + + onePodAndOneDynamicServerBackends := map[string]string{ + "be_edge_http:_hapcm_blueprint_pool:_blueprint-edge-route-1": "", + "be_edge_http:_hapcm_blueprint_pool:_blueprint-edge-route-2": "", + "be_edge_http:_hapcm_blueprint_pool:_blueprint-edge-route-3": "", + + "be_http:_hapcm_blueprint_pool:_blueprint-http-route-1": "", + "be_http:_hapcm_blueprint_pool:_blueprint-http-route-2": "", + "be_http:_hapcm_blueprint_pool:_blueprint-http-route-3": "", + + "be_tcp:_hapcm_blueprint_pool:_blueprint-passthrough-route-1": "", + "be_tcp:_hapcm_blueprint_pool:_blueprint-passthrough-route-2": "", + "be_tcp:_hapcm_blueprint_pool:_blueprint-passthrough-route-3": "", + + "be_edge_http:blueprints:blueprint-redirect-to-https": "", + "be_secure:blueprints:blueprint-reencrypt": "", + "be_edge_http:default:example-route": "", + + "be_edge_http:default:test-http-allow": "", + "be_edge_http:default:test-https": "", + "be_edge_http:default:test-https-only": "", + + "be_tcp:default:test-passthrough": "", + "be_secure:default:test-reencrypt": "", + + "be_edge_http:default:wildcard-redirect-to-https": "", + } + + if name != p.backendName { + if _, ok := onePodAndOneDynamicServerBackends[name]; ok { + return fmt.Sprintf(onePodAndOneDynamicServerBackendTemplate, name) + } + if len(name) > 0 { + return fmt.Sprintf("Can't find backend.\n") + } + } + + return `1 +# be_id be_name srv_id srv_name srv_addr srv_op_state srv_admin_state srv_uweight srv_iweight srv_time_since_last_change srv_check_status srv_check_result srv_check_health srv_check_state srv_agent_state bk_f_forced_id srv_f_forced_id srv_fqdn srv_port +9 be_edge_http:_hapcm_blueprint_pool:_blueprint-edge-route-1 1 _dynamic-pod-1 172.17.0.3 2 4 256 1 8117 6 3 4 6 0 0 0 - 8080 +9 be_edge_http:_hapcm_blueprint_pool:_blueprint-edge-route-1 2 _dynamic-pod-2 172.17.0.3 2 5 256 1 8117 6 3 0 14 0 0 0 - 8080 +9 be_edge_http:_hapcm_blueprint_pool:_blueprint-edge-route-1 3 _dynamic-pod-3 172.4.0.4 0 5 1 1 8206 1 0 0 14 0 0 0 - 8765 +9 be_edge_http:_hapcm_blueprint_pool:_blueprint-edge-route-1 4 _dynamic-pod-4 172.4.0.4 0 5 1 1 8206 1 0 0 14 0 0 0 - 8765 +9 be_edge_http:_hapcm_blueprint_pool:_blueprint-edge-route-1 5 _dynamic-pod-5 172.17.0.2 2 4 256 1 8206 6 3 4 6 0 0 0 - 8080 +` +} + +func (p *fakeHAProxy) setServer(name string, options []string) string { + if len(name) == 0 { + return fmt.Sprintf("Require 'backend/server'.\n") + } + + p.lock.Lock() + defer p.lock.Unlock() + existingServer := fmt.Sprintf("%s/%s", p.backendName, serverName) + if name != existingServer { + return fmt.Sprintf("No such server.\n") + } + + return fmt.Sprintf("\n") +} + +func (p *fakeHAProxy) process(conn net.Conn) error { + readBuffer := make([]byte, 1024) + nread, err := conn.Read(readBuffer) + if err != nil { + response := fmt.Sprintf("error: %v", err) + conn.Write([]byte(response)) + return err + } + + response := "" + cmd := string(bytes.Trim(readBuffer[0:nread], " ")) + cmd = strings.Trim(cmd, "\n") + p.lock.Lock() + p.commands = append(p.commands, cmd) + p.lock.Unlock() + + if strings.HasPrefix(cmd, "show info") { + response = p.showInfo() + } else if strings.HasPrefix(cmd, "show map") { + name := strings.Trim(cmd[len("show map"):], " ") + if len(name) == 0 { + response = p.listMaps() + } else { + response = p.showMap(name) + } + } else if strings.HasPrefix(cmd, "show backend") { + response = p.listBackends() + } else if strings.HasPrefix(cmd, "add map") { + params := strings.Trim(cmd[len("add map"):], " ") + vals := strings.Split(params, " ") + if len(vals) < 3 { + response = fmt.Sprintf("'add map' expects three parameters: map identifier, key and value.\n") + } else { + response = p.addMap(vals[0], vals[1], vals[2]) + } + } else if strings.HasPrefix(cmd, "del map") { + params := strings.Trim(cmd[len("del map"):], " ") + vals := strings.Split(params, " ") + if len(vals) < 2 { + response = fmt.Sprintf("This command expects two parameters: map identifier and key.\n") + } else { + response = p.delMap(vals[0], vals[1]) + } + } else if strings.HasPrefix(cmd, "show servers state") { + name := strings.Trim(cmd[len("show servers state"):], " ") + response = p.showServers(name) + } else if strings.HasPrefix(cmd, "set server") { + params := strings.Trim(cmd[len("set server"):], " ") + name := "" + vals := strings.Split(params, " ") + if len(vals) > 0 { + name = vals[0] + } + response = p.setServer(name, vals[1:]) + } else { + response = fmt.Sprintf("Unknown command. Please enter one of the following commands only :\nhelp\n...\n") + } + + if _, err := conn.Write([]byte(response)); err != nil { + return err + } + return conn.Close() +} diff --git a/pkg/router/template/configmanager/haproxy/manager.go b/pkg/router/template/configmanager/haproxy/manager.go new file mode 100644 index 000000000000..c89d140c83ea --- /dev/null +++ b/pkg/router/template/configmanager/haproxy/manager.go @@ -0,0 +1,954 @@ +package haproxy + +import ( + "fmt" + "io/ioutil" + "os" + "path" + "reflect" + "strconv" + "strings" + "sync" + "time" + + "github.com/golang/glog" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + routeapi "github.com/openshift/origin/pkg/route/apis/route" + templaterouter "github.com/openshift/origin/pkg/router/template" +) + +const ( + // haproxyManagerName is the name of this config manager. + haproxyManagerName = "haproxy-manager" + + // haproxyRunDir is the name of run directory within the haproxy + // router working directory heirarchy. + haproxyRunDir = "run" + + // haproxySocketFile is the name of haproxy socket file. + haproxySocketFile = "haproxy.sock" + + // haproxyConnectionTimeout is the timeout (in seconds) used for + // preventing slow connections to the haproxy socket from blocking + // the config manager from doing any work. + haproxyConnectionTimeout = 30 + + // blueprintRoutePoolNamePrefix is the prefix used for the managed + // pool of blueprint routes. + blueprintRoutePoolNamePrefix = "_hapcm_blueprint_pool" + + // routePoolSizeAnnotation is the annotation on the blueprint route + // overriding the default pool size. + routePoolSizeAnnotation = "router.openshift.io/pool-size" + + // We can only manage endpoint changes (servers upto a limit) and + // can't really dynamically add backends via the haproxy Dynamic + // Configuration API. So what we need to do is pre-allocate backends + // based on the different route blueprints. And we can then enable + // those later when a route is actually added. These constants + // control the pool namespace & service name to use. + blueprintRoutePoolNamespace = blueprintRoutePoolNamePrefix + blueprintRoutePoolServiceName = blueprintRoutePoolNamePrefix + ".svc" +) + +// endpointToDynamicServerMap is a map of endpoint to dynamic server names. +type endpointToDynamicServerMap map[string]string + +// configEntryMap is a map containing name-value pairs representing the +// config entries to add to an haproxy map. +type configEntryMap map[string]string + +// haproxyMapAssociation is a map of haproxy maps and their config entries for a backend. +type haproxyMapAssociation map[string]configEntryMap + +// routeBackendEntry is the entry for a route and its associated backend. +type routeBackendEntry struct { + // id is the route id. + id string + + // termination is the route termination. + termination routeapi.TLSTerminationType + + // wildcard indicates if the route is a wildcard route. + wildcard bool + + // BackendName is the name of the associated haproxy backend. + backendName string + + // mapAssociations is the associated set of haproxy maps and their + // config entries. + mapAssociations haproxyMapAssociation + + // poolRouteBackendName is backend name for any associated route + // from the pre-configured blueprint route pool. + poolRouteBackendName string + + // DynamicServerMap is a map of all the allocated dynamic servers. + dynamicServerMap endpointToDynamicServerMap +} + +// haproxyConfigManager is a template router config manager implementation +// that supports changing haproxy configuration dynamically via the haproxy +// dynamic configuration API. +type haproxyConfigManager struct { + // connectionInfo specifies how to connect to haproxy. + connectionInfo string + + // commitInterval controls how often we call commit to write out + // (to the actual config) all the changes made via the haproxy + // dynamic configuration API. + commitInterval time.Duration + + // blueprintRoutes are the blueprint routes used for pre-allocation. + blueprintRoutes []*routeapi.Route + + // blueprintRoutePoolSize is the size of the pre-allocated pool of + // backends for each route blueprint. + blueprintRoutePoolSize int + + // dynamicServerPrefix is the prefix used in the haproxy template + // for adding dynamic servers (pods) to a backend. + dynamicServerPrefix string + + // maxDynamicServers is the maximum number of dynamic servers + // allocated per backend in the haproxy template configuration. + maxDynamicServers int + + // router is the associated template router. + router templaterouter.RouterInterface + + // defaultCertificate is the default certificate bytes. + defaultCertificate string + + // client is the client used to dynamically manage haproxy. + client *Client + + // reloadInProgress indicates if a router reload is in progress. + reloadInProgress bool + + // backendEntries is a map of route id to the route backend entry. + backendEntries map[string]*routeBackendEntry + + // poolUsage is a mapping of blueprint route pool entries to their + // corresponding routes. + poolUsage map[string]string + + // lock is a mutex used to prevent concurrent config changes. + lock sync.Mutex + + // commitTimer indicates if a router config commit is pending. + commitTimer *time.Timer +} + +// NewHAProxyConfigManager returns a new haproxyConfigManager. +func NewHAProxyConfigManager(options templaterouter.ConfigManagerOptions) *haproxyConfigManager { + client := NewClient(options.ConnectionInfo, haproxyConnectionTimeout) + + glog.V(4).Infof("%s: options = %+v\n", haproxyManagerName, options) + + return &haproxyConfigManager{ + connectionInfo: options.ConnectionInfo, + commitInterval: options.CommitInterval, + blueprintRoutes: buildBlueprintRoutes(options.BlueprintRoutes), + blueprintRoutePoolSize: options.BlueprintRoutePoolSize, + dynamicServerPrefix: options.DynamicServerPrefix, + maxDynamicServers: options.MaxDynamicServers, + defaultCertificate: "", + + client: client, + reloadInProgress: false, + backendEntries: make(map[string]*routeBackendEntry), + poolUsage: make(map[string]string), + } +} + +// Initialize initializes the haproxy config manager. +func (cm *haproxyConfigManager) Initialize(router templaterouter.RouterInterface, certPath string) { + certBytes := []byte{} + if len(certPath) > 0 { + if b, err := ioutil.ReadFile(certPath); err != nil { + glog.Errorf("Loading router default certificate from %s: %v", certPath, err) + } else { + certBytes = b + } + } + + cm.lock.Lock() + cm.router = router + cm.defaultCertificate = string(certBytes) + cm.lock.Unlock() + + cm.provisionBackendPools() + + glog.V(2).Infof("haproxy Config Manager router will flush out any dynamically configured changes within %s of each other", cm.commitInterval.String()) +} + +// Register registers an id with an expected haproxy backend for a route. +func (cm *haproxyConfigManager) Register(id string, route *routeapi.Route, wildcard bool) { + entry := &routeBackendEntry{ + id: id, + termination: routeTerminationType(route), + wildcard: wildcard, + backendName: routeBackendName(id, route), + dynamicServerMap: make(endpointToDynamicServerMap), + } + + cm.lock.Lock() + defer cm.lock.Unlock() + + entry.BuildMapAssociations(route) + cm.backendEntries[id] = entry +} + +// AddRoute adds a new route or updates an existing route. +func (cm *haproxyConfigManager) AddRoute(id string, route *routeapi.Route, wildcard bool) error { + if cm.isReloading() { + return fmt.Errorf("Router reload in progress, cannot dynamically add route %s", id) + } + + glog.V(4).Infof("Removing route id %s, wildcard %+v", id, wildcard) + + if cm.isManagedPoolRoute(route) { + return fmt.Errorf("managed pool blueprint route %s ignored", id) + } + + matchedBlueprint := cm.findMatchingBlueprint(route) + if matchedBlueprint == nil { + return fmt.Errorf("no blueprint found that would match route %s/%s", route.Namespace, route.Name) + } + + cm.Register(id, route, wildcard) + + cm.lock.Lock() + defer cm.lock.Unlock() + + slotName, err := cm.findFreeBackendPoolSlot(matchedBlueprint) + if err != nil { + return fmt.Errorf("finding free backend pool slot for route %s: %v", id, err) + } + + glog.V(4).Infof("Adding route id %s using blueprint pool slot %s", id, slotName) + entry, ok := cm.backendEntries[id] + if !ok { + // Should always find backend info but ... + return fmt.Errorf("route id %s was not registered", id) + } + + // Update mapping to use the free pool slot, set the pool entry + // name and process the map associations. + // Note here that we need to rebuild the map associations since + // those depend on the backend name (or the free slot name now). + cm.poolUsage[slotName] = id + entry.poolRouteBackendName = slotName + entry.BuildMapAssociations(route) + + if err := cm.addMapAssociations(entry.mapAssociations); err != nil { + return fmt.Errorf("adding map associations for id %s: %v", id, err) + } + + glog.V(4).Infof("Route %s added using blueprint pool slot %s", id, slotName) + return nil +} + +// RemoveRoute removes a route. +func (cm *haproxyConfigManager) RemoveRoute(id string, route *routeapi.Route, wildcard bool) error { + glog.V(4).Infof("Removing route %s, wildcard %+v", id, wildcard) + if cm.isReloading() { + return fmt.Errorf("Router reload in progress, cannot dynamically remove route id %s", id) + } + + if cm.isManagedPoolRoute(route) { + return fmt.Errorf("managed pool blueprint route %s ignored", id) + } + + cm.lock.Lock() + defer cm.lock.Unlock() + + entry, ok := cm.backendEntries[id] + if !ok { + // Not registered - return error back. + return fmt.Errorf("route id %s was not registered", id) + } + + backendName := entry.BackendName() + glog.V(4).Infof("For route %s, removing backend %s", id, backendName) + + // Remove the associated haproxy map entries. + if err := cm.removeMapAssociations(entry.mapAssociations); err != nil { + glog.Warningf("Continuing despite errors removing backend %s map associations: %v", backendName, err) + } + + // Remove pool usage entry for a route added in. + if len(entry.poolRouteBackendName) > 0 { + delete(cm.poolUsage, entry.poolRouteBackendName) + } + + // Delete entry for route id to backend info. + delete(cm.backendEntries, id) + + // Finally, disable all the servers. + glog.V(4).Infof("Finding backend %s ...", backendName) + backend, err := cm.client.FindBackend(backendName) + if err != nil { + return err + } + glog.V(4).Infof("Disabling all servers for backend %s", backendName) + if err := backend.Disable(); err != nil { + return err + } + + glog.V(4).Infof("Committing changes made to backend %s", backendName) + return backend.Commit() +} + +// ReplaceRouteEndpoints dynamically replaces a subset of the endpoints for +// a route - modifies a subset of the servers on an haproxy backend. +func (cm *haproxyConfigManager) ReplaceRouteEndpoints(id string, oldEndpoints, newEndpoints []templaterouter.Endpoint, weight int32) error { + glog.V(4).Infof("Replacing route endpoints for %s, weight=%v", id, weight) + if cm.isReloading() { + return fmt.Errorf("Router reload in progress, cannot dynamically add endpoints for %s", id) + } + + cm.lock.Lock() + defer cm.lock.Unlock() + + entry, ok := cm.backendEntries[id] + if !ok { + // Not registered - return error back. + return fmt.Errorf("route id %s was not registered", id) + } + + weightIsRelative := false + if entry.termination == routeapi.TLSTerminationPassthrough { + // Passthrough is a wee bit odd and is like a boolean on/off + // switch. Setting actual weights, causing the haproxy + // dynamic API to either hang or then haproxy dying off. + // So 100% works for us today because we use a dynamic hash + // balancing algorithm. Needs a follow up on this issue. + weightIsRelative = true + weight = 100 + } + + backendName := entry.BackendName() + glog.V(4).Infof("Finding backend %s ...", backendName) + backend, err := cm.client.FindBackend(backendName) + if err != nil { + return err + } + + modifiedEndpoints := make(map[string]templaterouter.Endpoint) + for _, ep := range newEndpoints { + modifiedEndpoints[ep.ID] = ep + } + + deletedEndpoints := make(map[string]templaterouter.Endpoint) + for _, ep := range oldEndpoints { + if v2ep, ok := modifiedEndpoints[ep.ID]; ok { + if reflect.DeepEqual(ep, v2ep) { + // endpoint was unchanged. + delete(modifiedEndpoints, v2ep.ID) + } + } else { + deletedEndpoints[ep.ID] = ep + } + } + + glog.V(4).Infof("Getting servers for backend %s", backendName) + servers, err := backend.Servers() + if err != nil { + return err + } + + glog.V(4).Infof("Processing endpoint changes, deleted=%+v, modified=%+v", deletedEndpoints, modifiedEndpoints) + + // First process the deleted endpoints and update the servers we + // have already used - these would be the ones where the name + // matches the endpoint name or is a dynamic server already in use. + // Also keep track of the unused dynamic servers. + unusedServerNames := []string{} + for _, s := range servers { + relatedEndpointID := s.Name + if cm.isDynamicBackendServer(s) { + if epid, ok := entry.dynamicServerMap[s.Name]; ok { + relatedEndpointID = epid + } else { + unusedServerNames = append(unusedServerNames, s.Name) + continue + } + } + + if _, ok := deletedEndpoints[relatedEndpointID]; ok { + glog.V(4).Infof("For deleted endpoint %s, disabling server %s", relatedEndpointID, s.Name) + backend.DisableServer(s.Name) + if _, ok := entry.dynamicServerMap[s.Name]; ok { + glog.V(4).Infof("Removing server %s from dynamic server map (backend=%s)", s.Name, backendName) + delete(entry.dynamicServerMap, s.Name) + } + continue + } + + if ep, ok := modifiedEndpoints[relatedEndpointID]; ok { + glog.V(4).Infof("For modified endpoint %s, setting server %s info to %s:%s with weight %d and enabling", + relatedEndpointID, s.Name, ep.IP, ep.Port, weight) + backend.UpdateServerInfo(s.Name, ep.IP, ep.Port, weight, weightIsRelative) + backend.EnableServer(s.Name) + + delete(modifiedEndpoints, relatedEndpointID) + } + } + + // Processed all existing endpoints, now check if there's any more + // more modified endpoints (aka newly added ones). For these, we can + // choose any of the unused dynamic servers. + for _, name := range unusedServerNames { + if len(modifiedEndpoints) == 0 { + break + } + + var ep templaterouter.Endpoint + for _, v := range modifiedEndpoints { + // Just get first modified endpoint. + ep = v + break + } + + // Add entry for the dyamic server used. + entry.dynamicServerMap[name] = ep.ID + + glog.V(4).Infof("For added endpoint %s, setting dynamic server %s info: (%s, %s, %d) and enabling", ep.ID, name, ep.IP, ep.Port, weight) + backend.UpdateServerInfo(name, ep.IP, ep.Port, weight, weightIsRelative) + backend.EnableServer(name) + + delete(modifiedEndpoints, ep.ID) + } + + // If we got here, then either we are done with all the endpoints or + // there are no free dynamic server slots available that we can use. + if len(modifiedEndpoints) > 0 { + return fmt.Errorf("no free dynamic server slots for backend %s, %d endpoint(s) remaining", + id, len(modifiedEndpoints)) + } + + glog.V(4).Infof("Committing backend %s", backendName) + return backend.Commit() +} + +// RemoveRouteEndpoints removes servers matching the endpoints from a haproxy backend. +func (cm *haproxyConfigManager) RemoveRouteEndpoints(id string, endpoints []templaterouter.Endpoint) error { + glog.V(4).Infof("Removing endpoints for id %s", id) + if cm.isReloading() { + return fmt.Errorf("Router reload in progress, cannot dynamically delete endpoints for %s", id) + } + + cm.lock.Lock() + defer cm.lock.Unlock() + + entry, ok := cm.backendEntries[id] + if !ok { + // Not registered - return error back. + return fmt.Errorf("route id %s was not registered", id) + } + + backendName := entry.BackendName() + glog.V(4).Infof("Finding backend %s ...", backendName) + backend, err := cm.client.FindBackend(backendName) + if err != nil { + return err + } + + // Build a reversed map (endpoint id -> server name) to allow us to + // search by endpoint. + endpointToDynServerMap := make(map[string]string) + for serverName, endpointID := range entry.dynamicServerMap { + endpointToDynServerMap[endpointID] = serverName + } + + for _, ep := range endpoints { + name := ep.ID + if serverName, ok := endpointToDynServerMap[ep.ID]; ok { + name = serverName + delete(entry.dynamicServerMap, name) + } + + glog.V(4).Infof("For endpoint %s, disabling server %s", ep.ID, name) + backend.DisableServer(name) + } + + glog.V(4).Infof("Committing backend %s", backendName) + return backend.Commit() +} + +// Notify informs the config manager of any template router state changes. +// We only care about the reload specific events. +func (cm *haproxyConfigManager) Notify(event templaterouter.RouterEventType) { + glog.V(4).Infof("Received a %s notification", string(event)) + + cm.lock.Lock() + defer cm.lock.Unlock() + + switch event { + case templaterouter.RouterEventReloadStart: + cm.reloadInProgress = true + case templaterouter.RouterEventReloadError: + cm.reloadInProgress = false + case templaterouter.RouterEventReloadEnd: + cm.reloadInProgress = false + cm.reset() + } +} + +// Commit defers calling commit on the associated template router using a +// internal flush timer. +func (cm *haproxyConfigManager) Commit() { + glog.V(4).Infof("Committing dynamic config manager changes") + + cm.lock.Lock() + defer cm.lock.Unlock() + + if cm.commitTimer == nil { + cm.commitTimer = time.AfterFunc(cm.commitInterval, cm.commitRouterConfig) + } +} + +// ServerTemplateName returns the dynamic server template name. +func (cm *haproxyConfigManager) ServerTemplateName(id string) string { + if len(cm.dynamicServerPrefix) > 0 && cm.maxDynamicServers > 0 { + // Adding the id makes the name unwieldy - use pod. + return fmt.Sprintf("%s-pod", cm.dynamicServerPrefix) + } + + return "" +} + +// ServerTemplateSize returns the dynamic server template size. +// Note this is returned as a string for easier use in the haproxy template. +func (cm *haproxyConfigManager) ServerTemplateSize(id string) string { + if cm.maxDynamicServers < 1 { + return "" + } + + return fmt.Sprintf("%v", cm.maxDynamicServers) +} + +// GenerateDynamicServerNames generates the dynamic server names. +func (cm *haproxyConfigManager) GenerateDynamicServerNames(id string) []string { + if cm.maxDynamicServers > 0 { + if prefix := cm.ServerTemplateName(id); len(prefix) > 0 { + names := make([]string, cm.maxDynamicServers) + for i := 0; i < cm.maxDynamicServers; i++ { + names[i] = fmt.Sprintf("%s-%v", prefix, i+1) + } + return names + } + } + + return []string{} +} + +// commitRouterConfig calls Commit on the associated template router. +func (cm *haproxyConfigManager) commitRouterConfig() { + cm.lock.Lock() + cm.commitTimer = nil + cm.lock.Unlock() + + // [Re]Adding a blueprint pool route triggers a router state change. + // And calling Commit ensures that the config gets written out. + route := createBlueprintRoute(routeapi.TLSTerminationEdge) + route.Name = fmt.Sprintf("%v-1", route.Name) + cm.router.AddRoute(route) + + glog.V(4).Infof("Committing associated template router ... ") + cm.router.Commit() +} + +// reloadInProgress indicates if a router reload is in progress. +func (cm *haproxyConfigManager) isReloading() bool { + cm.lock.Lock() + defer cm.lock.Unlock() + + return cm.reloadInProgress +} + +// isManagedPoolRoute indicates if a given route is a route from the managed +// pool of blueprint routes. +func (cm *haproxyConfigManager) isManagedPoolRoute(route *routeapi.Route) bool { + return route.Namespace == blueprintRoutePoolNamespace +} + +// isDynamicBackendServer indicates if a backend server is a dynamic server. +func (cm *haproxyConfigManager) isDynamicBackendServer(server BackendServerInfo) bool { + if len(cm.dynamicServerPrefix) == 0 { + return false + } + + return strings.HasPrefix(server.Name, cm.dynamicServerPrefix) +} + +// provisionBackendPools pre-allocates pools of backends based on the +// different blueprint routes. +func (cm *haproxyConfigManager) provisionBackendPools() { + for _, r := range cm.blueprintRoutes { + poolSize := getPoolSize(r, cm.blueprintRoutePoolSize) + glog.Infof("Provisioning blueprint route pool %s/%s-[1-%d]", + r.Namespace, r.Name, poolSize) + for i := 0; i < poolSize; i++ { + route := r.DeepCopy() + route.Namespace = blueprintRoutePoolNamespace + route.Name = fmt.Sprintf("%v-%v", route.Name, i+1) + route.Spec.Host = "" + cm.router.AddRoute(route) + } + } +} + +// processMapAssociations processes all the map associations for a backend. +func (cm *haproxyConfigManager) processMapAssociations(associations haproxyMapAssociation, add bool) error { + glog.V(4).Infof("Associations = %+v", associations) + + haproxyMaps, err := cm.client.Maps() + if err != nil { + return err + } + + for _, ham := range haproxyMaps { + name := path.Base(ham.Name()) + if entries, ok := associations[name]; ok { + glog.V(4).Infof("Applying to map %s, entries %+v ", name, entries) + if err := applyMapAssociations(ham, entries, add); err != nil { + return err + } + } + } + + return nil +} + +// findFreeBackendPoolSlot returns a free pool slot backend name. +func (cm *haproxyConfigManager) findFreeBackendPoolSlot(blueprint *routeapi.Route) (string, error) { + poolSize := getPoolSize(blueprint, cm.blueprintRoutePoolSize) + idPrefix := fmt.Sprintf("%s:%s", blueprint.Namespace, blueprint.Name) + for i := 0; i < poolSize; i++ { + id := fmt.Sprintf("%s-%v", idPrefix, i+1) + name := routeBackendName(id, blueprint) + if _, ok := cm.poolUsage[name]; !ok { + return name, nil + } + } + + return "", fmt.Errorf("no %s free pool slot available", idPrefix) +} + +// addMapAssociations adds all the map associations for a backend. +func (cm *haproxyConfigManager) addMapAssociations(m haproxyMapAssociation) error { + return cm.processMapAssociations(m, true) +} + +// removeMapAssociations removes all the map associations for a backend. +func (cm *haproxyConfigManager) removeMapAssociations(m haproxyMapAssociation) error { + return cm.processMapAssociations(m, false) +} + +// reset resets the haproxy dynamic configuration manager to a pristine +// state. Clears out any allocated pool backends and dynamic servers. +func (cm *haproxyConfigManager) reset() { + if cm.commitTimer != nil { + commitTimer := cm.commitTimer + defer func() { + commitTimer.Stop() + }() + + cm.commitTimer = nil + } + + // Reset the blueprint route pool use and dynamic server maps as + // the router was reloaded. + cm.poolUsage = make(map[string]string) + for _, entry := range cm.backendEntries { + entry.poolRouteBackendName = "" + if len(entry.dynamicServerMap) > 0 { + entry.dynamicServerMap = make(endpointToDynamicServerMap) + } + } + + // Reset the client - clear its caches. + cm.client.Reset() +} + +// findMatchingBlueprint finds a matching blueprint route that can be used +// as a "surrogate" for the route. +func (cm *haproxyConfigManager) findMatchingBlueprint(route *routeapi.Route) *routeapi.Route { + termination := routeTerminationType(route) + routeModifiers := backendModAnnotations(route) + for _, candidate := range cm.blueprintRoutes { + t2 := routeTerminationType(candidate) + if termination != t2 { + // not the day of judgement! + continue + } + + if len(routeModifiers) > 0 { + if len(candidate.Annotations) == 0 { + // Can't use this blueprint as it has no annotations. + continue + } + + candidateModifiers := backendModAnnotations(candidate) + if !reflect.DeepEqual(routeModifiers, candidateModifiers) { + continue + } + } + + // Ok we passed termination and annotation checks. Need to + // pass the the certification tests aka no special + // certificate information. + if route.Spec.TLS == nil && candidate.Spec.TLS == nil { + return candidate + } + if route.Spec.TLS != nil && candidate.Spec.TLS != nil { + // So we need compare the TLS fields but don't care + // if InsecureEdgeTerminationPolicy doesn't match. + candidateCopy := candidate.DeepCopy() + candidateCopy.Spec.TLS.InsecureEdgeTerminationPolicy = route.Spec.TLS.InsecureEdgeTerminationPolicy + if reflect.DeepEqual(route.Spec.TLS, candidateCopy.Spec.TLS) { + return candidateCopy + } + } + } + + return nil +} + +// BackendName returns the associated backend name for a route. +func (entry *routeBackendEntry) BackendName() string { + if len(entry.poolRouteBackendName) > 0 { + return entry.poolRouteBackendName + } + + return entry.backendName +} + +// BuildMapAssociations builds the associations to haproxy maps for a route. +func (entry *routeBackendEntry) BuildMapAssociations(route *routeapi.Route) { + termination := routeTerminationType(route) + policy := routeapi.InsecureEdgeTerminationPolicyNone + if route.Spec.TLS != nil { + policy = route.Spec.TLS.InsecureEdgeTerminationPolicy + } + + entry.mapAssociations = make(haproxyMapAssociation) + associate := func(name, k, v string) { + m, ok := entry.mapAssociations[name] + if !ok { + m = make(configEntryMap) + } + + m[k] = v + entry.mapAssociations[name] = m + } + + hostspec := route.Spec.Host + pathspec := route.Spec.Path + if len(hostspec) == 0 { + return + } + + name := entry.BackendName() + + // Do the path specific regular expression usage first. + pathRE := templaterouter.GenerateRouteRegexp(hostspec, pathspec, entry.wildcard) + if policy == routeapi.InsecureEdgeTerminationPolicyRedirect { + associate("os_route_http_redirect.map", pathRE, name) + } + switch termination { + case routeapi.TLSTerminationType(""): + associate("os_http_be.map", pathRE, name) + + case routeapi.TLSTerminationEdge: + associate("os_edge_reencrypt_be.map", pathRE, name) + if policy == routeapi.InsecureEdgeTerminationPolicyAllow { + associate("os_http_be.map", pathRE, name) + } + + case routeapi.TLSTerminationReencrypt: + associate("os_edge_reencrypt_be.map", pathRE, name) + if policy == routeapi.InsecureEdgeTerminationPolicyAllow { + associate("os_http_be.map", pathRE, name) + } + } + + // And then handle the host specific regular expression usage. + hostRE := templaterouter.GenerateRouteRegexp(hostspec, "", entry.wildcard) + if len(os.Getenv("ROUTER_ALLOW_WILDCARD_ROUTES")) > 0 && entry.wildcard { + associate("os_wildcard_domain.map", hostRE, "1") + } + switch termination { + case routeapi.TLSTerminationReencrypt: + associate("os_tcp_be.map", hostRE, name) + + case routeapi.TLSTerminationPassthrough: + associate("os_tcp_be.map", hostRE, name) + associate("os_sni_passthrough.map", hostRE, "1") + } +} + +// buildBlueprintRoutes generates a list of blueprint routes. +func buildBlueprintRoutes(customRoutes []*routeapi.Route) []*routeapi.Route { + routes := make([]*routeapi.Route, 0) + + // Add in defaults based on the different route termination types. + terminationTypes := []routeapi.TLSTerminationType{ + routeapi.TLSTerminationType(""), + routeapi.TLSTerminationEdge, + routeapi.TLSTerminationPassthrough, + // Disable re-encrypt routes for now as we may not be able + // to validate signers. + // routeapi.TLSTerminationReencrypt, + } + for _, v := range terminationTypes { + r := createBlueprintRoute(v) + routes = append(routes, r) + } + + // Clone and add custom routes to the blueprint route pool namespace. + for _, r := range customRoutes { + dolly := r.DeepCopy() + dolly.Namespace = blueprintRoutePoolNamespace + routes = append(routes, dolly) + } + + return routes +} + +// generateRouteName generates a name based on the route type. +func generateRouteName(routeType routeapi.TLSTerminationType) string { + prefix := "http" + + switch routeType { + case routeapi.TLSTerminationEdge: + prefix = "edge" + case routeapi.TLSTerminationPassthrough: + prefix = "passthrough" + case routeapi.TLSTerminationReencrypt: + prefix = "reencrypt" + } + + return fmt.Sprintf("_blueprint-%v-route", prefix) +} + +// createBlueprintRoute creates a new blueprint route based on route type. +func createBlueprintRoute(routeType routeapi.TLSTerminationType) *routeapi.Route { + name := generateRouteName(routeType) + + return &routeapi.Route{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: blueprintRoutePoolNamespace, + Name: name, + }, + Spec: routeapi.RouteSpec{ + Host: "", + TLS: &routeapi.TLSConfig{Termination: routeType}, + To: routeapi.RouteTargetReference{ + Name: blueprintRoutePoolServiceName, + Weight: new(int32), + }, + }, + } +} + +// routeBackendName returns the haproxy backend name for a route. +func routeBackendName(id string, route *routeapi.Route) string { + termination := routeTerminationType(route) + prefix := templaterouter.GenBackendNamePrefix(termination) + return fmt.Sprintf("%s:%s", prefix, id) +} + +// getPoolSize returns the size to allocate for the pool for the specified +// blueprint route. Route annotations if they exist override the defaults. +func getPoolSize(r *routeapi.Route, defaultSize int) int { + v, ok := r.Annotations[routePoolSizeAnnotation] + if ok { + if poolSize, err := strconv.ParseInt(v, 10, 0); err != nil { + return int(poolSize) + } else { + routeName := fmt.Sprintf("%s/%s", r.Namespace, r.Name) + glog.Warningf("Blueprint route %s has an invalid pool size annotation %q, using default size %v, error: %v", + routeName, v, defaultSize, err) + } + } + + return defaultSize +} + +// routeTerminationType returns a termination type for a route. +func routeTerminationType(route *routeapi.Route) routeapi.TLSTerminationType { + termination := routeapi.TLSTerminationType("") + if route.Spec.TLS != nil { + termination = route.Spec.TLS.Termination + } + + return termination +} + +// applyMapAssociations applies the backend associations to a haproxy map. +func applyMapAssociations(m *HAProxyMap, associations map[string]string, add bool) error { + for k, v := range associations { + glog.V(4).Infof("Applying to map %s(k=%v, v=%v), add=%+v", m.Name(), k, v, add) + if add { + if err := m.Add(k, v, true); err != nil { + return err + } + } else { + if err := m.Delete(k); err != nil { + return err + } + } + + if err := m.Commit(); err != nil { + return err + } + } + + return nil +} + +// backendModAnnotations return the annotations in a route that will +// require custom (or modified) backend configuration in haproxy. +func backendModAnnotations(route *routeapi.Route) map[string]string { + termination := routeTerminationType(route) + backendModifiers := modAnnotationsList(termination) + + annotations := make(map[string]string) + for _, name := range backendModifiers { + if v, ok := route.Annotations[name]; ok { + annotations[name] = v + } + } + + return annotations +} + +// modAnnotationsList returns a list of annotations that can modify the +// haproxy config for a backend. +func modAnnotationsList(termination routeapi.TLSTerminationType) []string { + annotations := []string{ + "haproxy.router.openshift.io/balance", + "haproxy.router.openshift.io/ip_whitelist", + "haproxy.router.openshift.io/timeout", + "haproxy.router.openshift.io/rate-limit-connections", + "haproxy.router.openshift.io/rate-limit-connections.concurrent-tcp", + "haproxy.router.openshift.io/rate-limit-connections.rate-tcp", + "haproxy.router.openshift.io/rate-limit-connections.rate-http", + "haproxy.router.openshift.io/pod-concurrent-connections", + "router.openshift.io/haproxy.health.check.interval", + } + + if termination == routeapi.TLSTerminationPassthrough { + return annotations + } + + annotations = append(annotations, "haproxy.router.openshift.io/disable_cookies") + annotations = append(annotations, "router.openshift.io/cookie_name") + annotations = append(annotations, "haproxy.router.openshift.io/hsts_header") + return annotations +} diff --git a/pkg/router/template/configmanager/haproxy/map.go b/pkg/router/template/configmanager/haproxy/map.go new file mode 100644 index 000000000000..5051983f6ca6 --- /dev/null +++ b/pkg/router/template/configmanager/haproxy/map.go @@ -0,0 +1,209 @@ +package haproxy + +import ( + "fmt" + "regexp" + "strings" +) + +const ( + // listMapHeader is the header added if required to the "show map" + // output from haproxy, so that we can parse the CSV output. + // Note: This should match the CSV tags used in mapListEntry. + showMapListHeader = "id (file) description" + + // showMapHeader is the header we add to the "show map $name" + // output from haproxy, so that we can parse the CSV output. + // Note: This should match the CSV tags used in HAProxyMapEntry. + showMapHeader = "id name value" +) + +type mapListEntry struct { + ID string `csv:"id"` + Name string `csv:"(file)"` + Unused string `csv:"-"` +} + +// HAPrroxyMapEntry is an entry in HAProxyMap. +type HAProxyMapEntry struct { + // ID is the internal haproxy id associated with this map entry. + // It is required for deleting map entries. + ID string `csv:"id"` + + // Name is the entry key. + Name string `csv:"name"` + + // Value is the entry value. + Value string `csv:"value"` +} + +// HAProxyMap is a structure representing an haproxy map. +type HAProxyMap struct { + // name is the haproxy specific name for this map. + name string + + // client is the haproxy dynamic API client. + client *Client + + // entries are the haproxy map entries. + // Note: This is _not_ a hashtable/map/dict as it can have + // duplicate entries with the same key. + entries []*HAProxyMapEntry + + // dirty indicates the state of the map. + dirty bool +} + +// GetHAProxyMaps returns a list of loaded haproxy maps. +// Note: Maps are lazily populated based on their usage. +func GetHAProxyMaps(c *Client) ([]*HAProxyMap, error) { + entries := []*mapListEntry{} + converter := NewCSVConverter(showMapListHeader, &entries, fixupMapListOutput) + + if _, err := c.RunCommand("show map", converter); err != nil { + return []*HAProxyMap{}, err + } + + maps := make([]*HAProxyMap, len(entries)) + for k, v := range entries { + m := newHAProxyMap(v.Name, c) + maps[k] = m + } + + return maps, nil +} + +// newHAProxyMap returns a new HAProxyMap representing a haproxy map. +func newHAProxyMap(name string, client *Client) *HAProxyMap { + return &HAProxyMap{ + name: name, + client: client, + entries: make([]*HAProxyMapEntry, 0), + dirty: true, + } +} + +// Refresh refreshes the data in this haproxy map. +func (m *HAProxyMap) Refresh() error { + cmd := fmt.Sprintf("show map %s", m.name) + converter := NewCSVConverter(showMapHeader, &m.entries, nil) + if _, err := m.client.RunCommand(cmd, converter); err != nil { + return err + } + + m.dirty = false + return nil +} + +// Commit commits all the pending changes made to this haproxy map. +// We do map changes "in-band" as that's handled dynamically by haproxy. +func (m *HAProxyMap) Commit() error { + // noop + return nil +} + +// Name returns the name of this map. +func (m *HAProxyMap) Name() string { + return m.name +} + +// Find returns a list of matching entries in the haproxy map. +func (m *HAProxyMap) Find(k string) ([]HAProxyMapEntry, error) { + found := make([]HAProxyMapEntry, 0) + + if m.dirty { + if err := m.Refresh(); err != nil { + return found, err + } + } + + for _, entry := range m.entries { + if entry.Name == k { + clonedEntry := HAProxyMapEntry{ + ID: entry.ID, + Name: entry.Name, + Value: entry.Value, + } + found = append(found, clonedEntry) + } + } + + return found, nil +} + +// Add adds a new key and value to the haproxy map and allows all previous +// entries in the map to be deleted (replaced). +func (m *HAProxyMap) Add(k, v string, replace bool) error { + if replace { + if err := m.Delete(k); err != nil { + return err + } + } + + return m.addEntry(k, v) +} + +// Delete removes all the matching keys from the haproxy map. +func (m *HAProxyMap) Delete(k string) error { + entries, err := m.Find(k) + if err != nil { + return err + } + + for _, entry := range entries { + if err := m.deleteEntry(entry.ID); err != nil { + return err + } + } + + return nil +} + +// DeleteEntry removes a specific haproxy map entry. +func (m *HAProxyMap) DeleteEntry(id string) error { + return m.deleteEntry(id) +} + +// addEntry adds a new haproxy map entry. +func (m *HAProxyMap) addEntry(k, v string) error { + keyExpr := escapeKeyExpr(k) + cmd := fmt.Sprintf("add map %s %s %s", m.name, keyExpr, v) + responseBytes, err := m.client.Execute(cmd) + if err != nil { + return err + } + + response := strings.TrimSpace(string(responseBytes)) + if len(response) > 0 { + return fmt.Errorf("adding map %s entry %s: %v", m.name, keyExpr, string(response)) + } + + m.dirty = true + return nil +} + +// deleteEntry removes a specific haproxy map entry. +func (m *HAProxyMap) deleteEntry(id string) error { + cmd := fmt.Sprintf("del map %s #%s", m.name, id) + if _, err := m.client.Execute(cmd); err != nil { + return err + } + + m.dirty = true + return nil +} + +// escapeKeyExpr escapes meta characters in the haproxy map entry key name. +func escapeKeyExpr(k string) string { + v := strings.Replace(k, `\`, `\\`, -1) + return strings.Replace(v, `.`, `\.`, -1) +} + +// Regular expression to fixup haproxy map list funky output. +var listMapOutputRE *regexp.Regexp = regexp.MustCompile(`(?m)^(-|)([0-9]*) \((.*)?\).*$`) + +// fixupMapListOutput fixes up the funky output haproxy "show map" returns. +func fixupMapListOutput(data []byte) ([]byte, error) { + replacement := []byte(`$1$2 $3 loaded`) + return listMapOutputRE.ReplaceAll(data, replacement), nil +} diff --git a/pkg/router/template/configmanager/haproxy/map_test.go b/pkg/router/template/configmanager/haproxy/map_test.go new file mode 100644 index 000000000000..7a802b42e649 --- /dev/null +++ b/pkg/router/template/configmanager/haproxy/map_test.go @@ -0,0 +1,841 @@ +package haproxy + +import ( + "testing" +) + +// TestGetHAProxyMaps tests haproxy maps. +func TestGetHAProxyMaps(t *testing.T) { + server := startFakeServerForTest(t) + defer server.Stop() + + testCases := []struct { + name string + sockFile string + failureExpected bool + }{ + { + name: "empty socket", + sockFile: "", + failureExpected: true, + }, + { + name: "valid socket", + sockFile: server.SocketFile(), + failureExpected: false, + }, + { + name: "non-existent socket", + sockFile: "/non-existent/fake-haproxy.sock", + failureExpected: true, + }, + } + + for _, tc := range testCases { + client := NewClient(tc.sockFile, 0) + if client == nil { + t.Errorf("TestGetHAProxyMaps test case %s failed with no client.", tc.name) + } + + haproxyMaps, err := GetHAProxyMaps(client) + if tc.failureExpected { + if err == nil { + t.Errorf("TestGetHAProxyMaps test case %s expected an error but got none.", tc.name) + } + continue + } + + if err != nil { + t.Errorf("TestGetHAProxyMaps test case %s expected no error but got: %v", tc.name, err) + } + if len(haproxyMaps) == 0 { + t.Errorf("TestGetHAProxyMaps test case %s expected to get maps", tc.name) + } + } +} + +// TestNewHAProxyMap tests a new haproxy map. +func TestNewHAProxyMap(t *testing.T) { + server := startFakeServerForTest(t) + defer server.Stop() + + testCases := []struct { + name string + sockFile string + }{ + { + name: "empty", + sockFile: "", + }, + { + name: "valid socket", + sockFile: server.SocketFile(), + }, + { + name: "non-existent socket", + sockFile: "/non-existent/fake-haproxy.sock", + }, + } + + for _, tc := range testCases { + client := NewClient(tc.sockFile, 0) + if client == nil { + t.Errorf("TestNewHAProxyMap test case %s failed with no client.", tc.name) + } + + if m := newHAProxyMap(tc.name, client); m == nil { + t.Errorf("TestNewHAProxyMap test case %s expected a map but got none", tc.name) + } + } +} + +// TestHAProxyMapRefresh tests haproxy map refresh. +func TestHAProxyMapRefresh(t *testing.T) { + server := startFakeServerForTest(t) + defer server.Stop() + + testCases := []struct { + name string + sockFile string + mapName string + failureExpected bool + }{ + { + name: "empty socket", + sockFile: "", + mapName: "empty.map", + failureExpected: true, + }, + { + name: "empty socket and valid map", + sockFile: "", + mapName: "/var/lib/haproxy/conf/os_sni_passthrough.map", + failureExpected: true, + }, + { + name: "valid socket and map", + sockFile: server.SocketFile(), + mapName: "/var/lib/haproxy/conf/os_http_be.map", + failureExpected: false, + }, + { + name: "valid socket but invalid map", + sockFile: server.SocketFile(), + mapName: "missing.map", + failureExpected: true, + }, + { + name: "valid socket but typo map", + sockFile: server.SocketFile(), + mapName: "/var/lib/haproxy/conf/os_http_be.map-1234", + failureExpected: true, + }, + { + name: "non-existent socket", + sockFile: "/non-existent/fake-haproxy.sock", + mapName: "non-existent.map", + failureExpected: true, + }, + { + name: "non-existent socket valid map", + sockFile: "/non-existent/fake-haproxy.sock", + mapName: "/var/lib/haproxy/conf/os_tcp_be.map", + failureExpected: true, + }, + } + + for _, tc := range testCases { + client := NewClient(tc.sockFile, 0) + if client == nil { + t.Errorf("TestHAProxyMapRefresh test case %s failed with no client.", tc.name) + } + + m := newHAProxyMap(tc.mapName, client) + err := m.Refresh() + if tc.failureExpected { + if err == nil { + t.Errorf("TestHAProxyMapRefresh test case %s expected an error but got none.", tc.name) + } + continue + } + + if err != nil { + t.Errorf("TestHAProxyMapRefresh test case %s expected no error but got: %v", tc.name, err) + } + } +} + +// TestHAProxyMapCommit tests haproxy map commit. +func TestHAProxyMapCommit(t *testing.T) { + server := startFakeServerForTest(t) + defer server.Stop() + + testCases := []struct { + name string + sockFile string + mapName string + }{ + { + name: "empty socket", + sockFile: "", + mapName: "empty.map", + }, + { + name: "empty socket valid map", + sockFile: "", + mapName: "/var/lib/haproxy/conf/os_sni_passthrough.map", + }, + { + name: "valid socket", + sockFile: server.SocketFile(), + mapName: "/var/lib/haproxy/conf/os_http_be.map", + }, + { + name: "valid socket but invalid map", + sockFile: server.SocketFile(), + mapName: "missing.map", + }, + { + name: "valid socket but typo map", + sockFile: server.SocketFile(), + mapName: "/var/lib/haproxy/conf/os_http_be.map-1234", + }, + { + name: "non-existent socket", + sockFile: "/non-existent/fake-haproxy.sock", + mapName: "non-existent.map", + }, + { + name: "non-existent socket valid map", + sockFile: "/non-existent/fake-haproxy.sock", + mapName: "/var/lib/haproxy/conf/os_tcp_be.map", + }, + } + + for _, tc := range testCases { + client := NewClient(tc.sockFile, 0) + if client == nil { + t.Errorf("TestHAProxyMapCommit test case %s failed with no client.", tc.name) + } + + m := newHAProxyMap(tc.mapName, client) + if err := m.Commit(); err != nil { + t.Errorf("TestHAProxyMapCommit test case %s expected no error but got: %v", tc.name, err) + } + } +} + +// TestHAProxyMapName tests haproxy map returns its name. +func TestHAProxyMapName(t *testing.T) { + server := startFakeServerForTest(t) + defer server.Stop() + + testCases := []struct { + name string + sockFile string + mapName string + failureExpected bool + }{ + { + name: "empty socket", + sockFile: "", + mapName: "empty.map", + failureExpected: true, + }, + { + name: "empty socket valid map", + sockFile: "", + mapName: "/var/lib/haproxy/conf/os_sni_passthrough.map", + failureExpected: true, + }, + { + name: "valid socket", + sockFile: server.SocketFile(), + mapName: "/var/lib/haproxy/conf/os_http_be.map", + failureExpected: false, + }, + { + name: "valid socket but invalid map", + sockFile: server.SocketFile(), + mapName: "missing.map", + failureExpected: true, + }, + { + name: "valid socket but typo map", + sockFile: server.SocketFile(), + mapName: "/var/lib/haproxy/conf/os_http_be.map-1234", + failureExpected: true, + }, + { + name: "non-existent socket", + sockFile: "/non-existent/fake-haproxy.sock", + mapName: "non-existent.map", + failureExpected: true, + }, + { + name: "non-existent socket valid map", + sockFile: "/non-existent/fake-haproxy.sock", + mapName: "/var/lib/haproxy/conf/os_tcp_be.map", + failureExpected: true, + }, + } + + for _, tc := range testCases { + client := NewClient(tc.sockFile, 0) + if client == nil { + t.Errorf("TestHAProxyMapRefresh test case %s failed with no client.", tc.name) + } + + m := newHAProxyMap(tc.mapName, client) + err := m.Refresh() + if tc.failureExpected { + if err == nil { + t.Errorf("TestHAProxyMapRefresh test case %s expected an error but got none.", tc.name) + } + continue + } + + if err != nil { + t.Errorf("TestHAProxyMapRefresh test case %s expected no error but got: %v", tc.name, err) + } + } +} + +// TestHAProxyMapFind tests finding an entry in a haproxy map. +func TestHAProxyMapFind(t *testing.T) { + server := startFakeServerForTest(t) + defer server.Stop() + + testCases := []struct { + name string + sockFile string + mapName string + keyName string + failureExpected bool + entriesExpected bool + }{ + { + name: "empty socket", + sockFile: "", + mapName: "empty.map", + keyName: "k1", + failureExpected: true, + entriesExpected: false, + }, + { + name: "empty socket valid map and key", + sockFile: "", + mapName: "/var/lib/haproxy/conf/os_sni_passthrough.map", + keyName: `^route\.passthrough\.test(:[0-9]+)?(/.*)?$`, + failureExpected: true, + entriesExpected: false, + }, + { + name: "empty socket valid map and invalid key", + sockFile: "", + mapName: "/var/lib/haproxy/conf/os_sni_passthrough.map", + keyName: "non-existent-key", + failureExpected: true, + entriesExpected: false, + }, + { + name: "valid socket, map and key", + sockFile: server.SocketFile(), + mapName: "/var/lib/haproxy/conf/os_http_be.map", + keyName: `^route\.allow-http\.test(:[0-9]+)?(/.*)?$`, + failureExpected: false, + entriesExpected: true, + }, + { + name: "valid socket but invalid map", + sockFile: server.SocketFile(), + mapName: "missing.map", + keyName: `^route\.allow-http\.test(:[0-9]+)?(/.*)?$`, + failureExpected: true, + entriesExpected: false, + }, + { + name: "valid socket but invalid map and key", + sockFile: server.SocketFile(), + mapName: "missing.map", + keyName: "invalid-key", + failureExpected: true, + entriesExpected: false, + }, + { + name: "valid socket but invalid key", + sockFile: server.SocketFile(), + mapName: "/var/lib/haproxy/conf/os_http_be.map", + keyName: "invalid-key", + failureExpected: false, + entriesExpected: false, + }, + { + name: "valid socket but typo map", + sockFile: server.SocketFile(), + mapName: "/var/lib/haproxy/conf/os_http_be.map-1234", + keyName: `^route\.allow-http\.test(:[0-9]+)?(/.*)?$`, + failureExpected: true, + entriesExpected: false, + }, + { + name: "non-existent socket", + sockFile: "/non-existent/fake-haproxy.sock", + mapName: "non-existent.map", + keyName: "invalid-key", + failureExpected: true, + entriesExpected: false, + }, + { + name: "non-existent socket valid map", + sockFile: "/non-existent/fake-haproxy.sock", + mapName: "/var/lib/haproxy/conf/os_tcp_be.map", + keyName: "invalid-key", + failureExpected: true, + entriesExpected: false, + }, + { + name: "non-existent socket invalid map", + sockFile: "/non-existent/fake-haproxy.sock", + mapName: "404.map", + keyName: `^reencrypt\.blueprints\.org(:[0-9]+)?(/.*)?$`, + failureExpected: true, + entriesExpected: false, + }, + { + name: "non-existent socket valid map and key", + sockFile: "/non-existent/fake-haproxy.sock", + mapName: "/var/lib/haproxy/conf/os_tcp_be.map", + keyName: `^reencrypt\.blueprints\.org(:[0-9]+)?(/.*)?$`, + failureExpected: true, + entriesExpected: false, + }, + } + + for _, tc := range testCases { + client := NewClient(tc.sockFile, 0) + if client == nil { + t.Errorf("TestHAProxyMapFind test case %s failed with no client.", tc.name) + } + + // Ensure server is in clean state for test. + server.Reset() + + m := newHAProxyMap(tc.mapName, client) + entries, err := m.Find(tc.keyName) + if tc.failureExpected { + if err == nil { + t.Errorf("TestHAProxyMapFind test case %s expected an error but got none.", tc.name) + } + continue + } + + if err != nil { + t.Errorf("TestHAProxyMapFind test case %s expected no error but got: %v", tc.name, err) + } + if tc.entriesExpected && len(entries) < 1 { + t.Errorf("TestHAProxyMapFind test case %s expected to find an entry but got: %v", tc.name, len(entries)) + } + } +} + +// TestHAProxyMapAdd tests adding an entry in a haproxy map. +func TestHAProxyMapAdd(t *testing.T) { + server := startFakeServerForTest(t) + defer server.Stop() + + testCases := []struct { + name string + sockFile string + mapName string + keyName string + value string + replace bool + failureExpected bool + }{ + { + name: "empty socket and map", + sockFile: "", + mapName: "empty.map", + keyName: "k1", + value: "v1", + replace: true, + failureExpected: true, + }, + { + name: "empty socket valid map and key", + sockFile: "", + mapName: "/var/lib/haproxy/conf/os_sni_passthrough.map", + keyName: `^route\.passthrough\.test(:[0-9]+)?(/.*)?$`, + value: "1", + replace: true, + failureExpected: true, + }, + { + name: "empty socket valid map and invalid key", + sockFile: "", + mapName: "/var/lib/haproxy/conf/os_sni_passthrough.map", + keyName: "non-existent-key", + value: "something", + replace: false, + failureExpected: true, + }, + { + name: "valid socket", + sockFile: server.SocketFile(), + mapName: "/var/lib/haproxy/conf/os_http_be.map", + keyName: `^route\.allow-http\.test(:[0-9]+)?(/.*)?$`, + value: "be_edge_http:default:test-http-allow", + replace: true, + failureExpected: false, + }, + { + name: "valid socket no replace", + sockFile: server.SocketFile(), + mapName: "/var/lib/haproxy/conf/os_http_be.map", + keyName: `^route\.allow-http\.test(:[0-9]+)?(/.*)?$`, + value: "be_edge_http:default:test-http-allow", + replace: false, + failureExpected: false, + }, + { + name: "valid socket but invalid map", + sockFile: server.SocketFile(), + mapName: "missing.map", + keyName: `^route\.allow-http\.test(:[0-9]+)?(/.*)?$`, + value: "be_edge_http:default:test-http-allow", + replace: true, + failureExpected: true, + }, + { + name: "valid socket but invalid map and key", + sockFile: server.SocketFile(), + mapName: "missing.map", + keyName: "invalid-key1", + value: "something", + replace: false, + failureExpected: true, + }, + { + name: "valid socket but invalid key", + sockFile: server.SocketFile(), + mapName: "/var/lib/haproxy/conf/os_http_be.map", + keyName: "invalid-key2", + value: "something", + replace: true, + failureExpected: false, + }, + { + name: "valid socket but typo map", + sockFile: server.SocketFile(), + mapName: "/var/lib/haproxy/conf/os_http_be.map-1234", + keyName: `^route\.allow-http\.test(:[0-9]+)?(/.*)?$`, + value: "be_edge_http:default:test-http-allow", + replace: true, + failureExpected: true, + }, + { + name: "non-existent socket", + sockFile: "/non-existent/fake-haproxy.sock", + mapName: "non-existent.map", + keyName: "invalid-key3", + value: "some-value", + replace: false, + failureExpected: true, + }, + { + name: "non-existent socket valid map", + sockFile: "/non-existent/fake-haproxy.sock", + mapName: "/var/lib/haproxy/conf/os_tcp_be.map", + keyName: "invalid-key4", + value: "some-value", + replace: true, + failureExpected: true, + }, + { + name: "non-existent socket invalid map", + sockFile: "/non-existent/fake-haproxy.sock", + mapName: "404.map", + keyName: `^reencrypt\.blueprints\.org(:[0-9]+)?(/.*)?$`, + value: "be_secure:blueprints:blueprint-reencrypt", + replace: true, + failureExpected: true, + }, + { + name: "non-existent socket valid map and key", + sockFile: "/non-existent/fake-haproxy.sock", + mapName: "/var/lib/haproxy/conf/os_tcp_be.map", + keyName: `^reencrypt\.blueprints\.org(:[0-9]+)?(/.*)?$`, + value: "1234", + replace: false, + failureExpected: true, + }, + } + + for _, tc := range testCases { + client := NewClient(tc.sockFile, 0) + if client == nil { + t.Errorf("TestHAProxyMapAdd test case %s failed with no client.", tc.name) + } + + // Ensure server is in clean state for test. + server.Reset() + + m := newHAProxyMap(tc.mapName, client) + err := m.Add(tc.keyName, tc.value, tc.replace) + if tc.failureExpected { + if err == nil { + t.Errorf("TestHAProxyMapAdd test case %s expected an error but got none.", tc.name) + } + continue + } + + if err != nil { + t.Errorf("TestHAProxyMapAdd test case %s expected no error but got: %v", tc.name, err) + } + } +} + +// TestHAProxyMapDelete tests deleting entries in a haproxy map. +func TestHAProxyMapDelete(t *testing.T) { + server := startFakeServerForTest(t) + defer server.Stop() + + testCases := []struct { + name string + sockFile string + mapName string + keyName string + failureExpected bool + }{ + { + name: "empty socket and map", + sockFile: "", + mapName: "empty.map", + keyName: "k1", + failureExpected: true, + }, + { + name: "empty socket valid map and key", + sockFile: "", + mapName: "/var/lib/haproxy/conf/os_sni_passthrough.map", + keyName: `^route\.passthrough\.test(:[0-9]+)?(/.*)?$`, + failureExpected: true, + }, + { + name: "empty socket valid map and invalid key", + sockFile: "", + mapName: "/var/lib/haproxy/conf/os_sni_passthrough.map", + keyName: "non-existent-key", + failureExpected: true, + }, + { + name: "valid socket", + sockFile: server.SocketFile(), + mapName: "/var/lib/haproxy/conf/os_http_be.map", + keyName: `^route\.allow-http\.test(:[0-9]+)?(/.*)?$`, + failureExpected: false, + }, + { + name: "valid socket but invalid map", + sockFile: server.SocketFile(), + mapName: "missing.map", + keyName: `^route\.allow-http\.test(:[0-9]+)?(/.*)?$`, + failureExpected: true, + }, + { + name: "valid socket but invalid map and key", + sockFile: server.SocketFile(), + mapName: "missing.map", + keyName: "invalid-key1", + failureExpected: true, + }, + { + name: "valid socket but invalid key", + sockFile: server.SocketFile(), + mapName: "/var/lib/haproxy/conf/os_http_be.map", + keyName: "invalid-key2", + failureExpected: false, + }, + { + name: "valid socket but typo map", + sockFile: server.SocketFile(), + mapName: "/var/lib/haproxy/conf/os_http_be.map-1234", + keyName: `^route\.allow-http\.test(:[0-9]+)?(/.*)?$`, + failureExpected: true, + }, + { + name: "non-existent socket", + sockFile: "/non-existent/fake-haproxy.sock", + mapName: "non-existent.map", + keyName: "invalid-key3", + failureExpected: true, + }, + { + name: "non-existent socket valid map", + sockFile: "/non-existent/fake-haproxy.sock", + mapName: "/var/lib/haproxy/conf/os_tcp_be.map", + keyName: "invalid-key4", + failureExpected: true, + }, + { + name: "non-existent socket invalid map", + sockFile: "/non-existent/fake-haproxy.sock", + mapName: "404.map", + keyName: `^reencrypt\.blueprints\.org(:[0-9]+)?(/.*)?$`, + failureExpected: true, + }, + { + name: "non-existent socket valid map and key", + sockFile: "/non-existent/fake-haproxy.sock", + mapName: "/var/lib/haproxy/conf/os_tcp_be.map", + keyName: `^reencrypt\.blueprints\.org(:[0-9]+)?(/.*)?$`, + failureExpected: true, + }, + } + + for _, tc := range testCases { + client := NewClient(tc.sockFile, 0) + if client == nil { + t.Errorf("TestHAProxyMapDelete test case %s failed with no client.", tc.name) + } + + // Ensure server is in clean state for test. + server.Reset() + + m := newHAProxyMap(tc.mapName, client) + err := m.Delete(tc.keyName) + if tc.failureExpected { + if err == nil { + t.Errorf("TestHAProxyMapDelete test case %s expected an error but got none.", tc.name) + } + continue + } + + if err != nil { + t.Errorf("TestHAProxyMapDelete test case %s expected no error but got: %v", tc.name, err) + } + } +} + +// TestHAProxyMapDeleteEntry tests deleting an entry in a haproxy map. +func TestHAProxyMapDeleteEntry(t *testing.T) { + server := startFakeServerForTest(t) + defer server.Stop() + + testCases := []struct { + name string + sockFile string + mapName string + entryID string + failureExpected bool + }{ + { + name: "empty socket and map", + sockFile: "", + mapName: "empty.map", + entryID: "id1", + failureExpected: true, + }, + { + name: "empty socket valid map and key", + sockFile: "", + mapName: "/var/lib/haproxy/conf/os_sni_passthrough.map", + entryID: "0x559a137bf730", + failureExpected: true, + }, + { + name: "empty socket valid map and invalid key", + sockFile: "", + mapName: "/var/lib/haproxy/conf/os_sni_passthrough.map", + entryID: "non-existent-id", + failureExpected: true, + }, + { + name: "valid socket", + sockFile: server.SocketFile(), + mapName: "/var/lib/haproxy/conf/os_http_be.map", + entryID: "0x559a137b4c10", + failureExpected: false, + }, + { + name: "valid socket but invalid map", + sockFile: server.SocketFile(), + mapName: "missing.map", + entryID: "0x559a137b4c10", + failureExpected: false, + }, + { + name: "valid socket but invalid map and key", + sockFile: server.SocketFile(), + mapName: "missing.map", + entryID: "invalid-id", + failureExpected: false, + }, + { + name: "valid socket but invalid key", + sockFile: server.SocketFile(), + mapName: "/var/lib/haproxy/conf/os_http_be.map", + entryID: "invalid-id", + failureExpected: false, + }, + { + name: "valid socket but typo map", + sockFile: server.SocketFile(), + mapName: "/var/lib/haproxy/conf/os_http_be.map-1234", + entryID: "0x559a137b4c10", + failureExpected: false, + }, + { + name: "non-existent socket", + sockFile: "/non-existent/fake-haproxy.sock", + mapName: "non-existent.map", + entryID: "invalid-id3", + failureExpected: true, + }, + { + name: "non-existent socket valid map", + sockFile: "/non-existent/fake-haproxy.sock", + mapName: "/var/lib/haproxy/conf/os_tcp_be.map", + entryID: "invalid-id", + failureExpected: true, + }, + { + name: "non-existent socket invalid map", + sockFile: "/non-existent/fake-haproxy.sock", + mapName: "404.map", + entryID: "0x559a1400f8a0", + failureExpected: true, + }, + { + name: "non-existent socket valid map and key", + sockFile: "/non-existent/fake-haproxy.sock", + mapName: "/var/lib/haproxy/conf/os_tcp_be.map", + entryID: "0x559a1400f8a0", + failureExpected: true, + }, + } + + for _, tc := range testCases { + client := NewClient(tc.sockFile, 0) + if client == nil { + t.Errorf("TestHAProxyMapDeleteEntry test case %s failed with no client.", tc.name) + } + + // Ensure server is in clean state for test. + server.Reset() + + m := newHAProxyMap(tc.mapName, client) + err := m.DeleteEntry(tc.entryID) + if tc.failureExpected { + if err == nil { + t.Errorf("TestHAProxyMapDeleteEntry test case %s expected an error but got none.", tc.name) + } + continue + } + + if err != nil { + t.Errorf("TestHAProxyMapDeleteEntry test case %s expected no error but got: %v", tc.name, err) + } + } +} diff --git a/pkg/router/template/plugin.go b/pkg/router/template/plugin.go index e96b9c76da24..bfbffc85cd6f 100644 --- a/pkg/router/template/plugin.go +++ b/pkg/router/template/plugin.go @@ -29,12 +29,12 @@ const ( // TemplatePlugin implements the router.Plugin interface to provide // a template based, backend-agnostic router. type TemplatePlugin struct { - Router routerInterface + Router RouterInterface IncludeUDP bool ServiceFetcher ServiceLookup } -func newDefaultTemplatePlugin(router routerInterface, includeUDP bool, lookupSvc ServiceLookup) *TemplatePlugin { +func newDefaultTemplatePlugin(router RouterInterface, includeUDP bool, lookupSvc ServiceLookup) *TemplatePlugin { return &TemplatePlugin{ Router: router, IncludeUDP: includeUDP, @@ -62,10 +62,11 @@ type TemplatePluginConfig struct { MaxConnections string Ciphers string StrictSNI bool + DynamicConfigManager ConfigManager } -// routerInterface controls the interaction of the plugin with the underlying router implementation -type routerInterface interface { +// RouterInterface controls the interaction of the plugin with the underlying router implementation +type RouterInterface interface { // Mutative operations in this interface do not return errors. // The only error state for these methods is when an unknown // frontend key is used; all call sites make certain the frontend @@ -156,6 +157,7 @@ func NewTemplatePlugin(cfg TemplatePluginConfig, lookupSvc ServiceLookup) (*Temp allowWildcardRoutes: cfg.AllowWildcardRoutes, peerEndpointsKey: peerKey, bindPortsAfterSync: cfg.BindPortsAfterSync, + dynamicConfigManager: cfg.DynamicConfigManager, } router, err := newTemplateRouter(templateRouterCfg) return newDefaultTemplatePlugin(router, cfg.IncludeUDP, lookupSvc), err diff --git a/pkg/router/template/router.go b/pkg/router/template/router.go index 2c2f074a67b3..c2f231b0d419 100644 --- a/pkg/router/template/router.go +++ b/pkg/router/template/router.go @@ -101,6 +101,12 @@ type templateRouter struct { metricReload prometheus.Summary // metricWriteConfig tracks writing config metricWriteConfig prometheus.Summary + // dynamicConfigManager configures route changes dynamically on the + // underlying router. + dynamicConfigManager ConfigManager + // dynamicallyConfigured indicates whether all the [state] changes + // were also successfully applied via the dynamic config manager. + dynamicallyConfigured bool } // templateRouterCfg holds all configuration items required to initialize the template router @@ -121,6 +127,7 @@ type templateRouterCfg struct { peerEndpointsKey string includeUDP bool bindPortsAfterSync bool + dynamicConfigManager ConfigManager } // templateConfig is a subset of the templateRouter information that should be passed to the template for generating @@ -146,6 +153,8 @@ type templateData struct { StatsPort int // whether the router should bind the default ports BindPorts bool + // The dynamic configuration manager if "configured". + DynamicConfigManager ConfigManager } func newTemplateRouter(cfg templateRouterCfg) (*templateRouter, error) { @@ -200,6 +209,7 @@ func newTemplateRouter(cfg templateRouterCfg) (*templateRouter, error) { peerEndpointsKey: cfg.peerEndpointsKey, peerEndpoints: []Endpoint{}, bindPortsAfterSync: cfg.bindPortsAfterSync, + dynamicConfigManager: cfg.dynamicConfigManager, metricReload: metricsReload, metricWriteConfig: metricWriteConfig, @@ -216,6 +226,10 @@ func newTemplateRouter(cfg templateRouterCfg) (*templateRouter, error) { if err := router.readState(); err != nil { return nil, err } + if router.dynamicConfigManager != nil { + glog.Infof("Initializing dynamic config manager ... ") + router.dynamicConfigManager.Initialize(router, router.defaultCertificatePath) + } glog.V(4).Infof("Committing state") // Bypass the rate limiter to ensure the first sync will be // committed without delay. @@ -313,9 +327,10 @@ func (r *templateRouter) Commit() { glog.V(4).Infof("Router state synchronized for the first time") r.synced = true r.stateChanged = true + r.dynamicallyConfigured = false } - needsCommit := r.stateChanged + needsCommit := r.stateChanged && !r.dynamicallyConfigured r.lock.Unlock() if needsCommit { @@ -336,6 +351,10 @@ func (r *templateRouter) commitAndReload() error { } r.stateChanged = false + r.dynamicallyConfigured = true + if r.dynamicConfigManager != nil { + r.dynamicConfigManager.Notify(RouterEventReloadStart) + } glog.V(4).Infof("Writing the router config") reloadStart := time.Now() @@ -356,9 +375,16 @@ func (r *templateRouter) commitAndReload() error { err := r.reloadRouter() r.metricReload.Observe(float64(time.Now().Sub(reloadStart)) / float64(time.Second)) if err != nil { + if r.dynamicConfigManager != nil { + r.dynamicConfigManager.Notify(RouterEventReloadError) + } return err } + if r.dynamicConfigManager != nil { + r.dynamicConfigManager.Notify(RouterEventReloadEnd) + } + return nil } @@ -424,6 +450,7 @@ func (r *templateRouter) writeConfig() error { StatsPassword: r.statsPassword, StatsPort: r.statsPort, BindPorts: !r.bindPortsAfterSync || r.synced, + DynamicConfigManager: r.dynamicConfigManager, } if err := template.Execute(file, data); err != nil { file.Close() @@ -501,6 +528,8 @@ func (r *templateRouter) createServiceUnitInternal(id string) { Name: id, Hostname: fmt.Sprintf("%s.%s.svc", name, namespace), EndpointTable: []Endpoint{}, + + ServiceAliasAssociations: make(map[string]bool), } r.serviceUnits[id] = service @@ -536,16 +565,168 @@ func (r *templateRouter) DeleteServiceUnit(id string) { r.stateChanged = true } +// addServiceAliasAssociation adds a reference to the backend in the ServiceUnit config. +func (r *templateRouter) addServiceAliasAssociation(id, alias string) { + if serviceUnit, ok := r.findMatchingServiceUnit(id); ok { + glog.V(4).Infof("associated service unit %s -> service alias %s", id, alias) + serviceUnit.ServiceAliasAssociations[alias] = true + } +} + +// removeServiceAliasAssociation removes the reference to the backend in the ServiceUnit config. +func (r *templateRouter) removeServiceAliasAssociation(id, alias string) { + if serviceUnit, ok := r.findMatchingServiceUnit(id); ok { + glog.V(4).Infof("removed association for service unit %s -> service alias %s", id, alias) + delete(serviceUnit.ServiceAliasAssociations, alias) + } +} + +// dynamicallyAddRoute attempts to dynamically add a route. +// Note: The config should have been synced at least once initially and +// the caller needs to acquire a lock [and release it]. +func (r *templateRouter) dynamicallyAddRoute(backendKey string, route *routeapi.Route, backend *ServiceAliasConfig) bool { + if r.dynamicConfigManager == nil { + return false + } + + glog.V(4).Infof("Dynamically adding route backend %s", backendKey) + wildcard := backend.IsWildcard + r.dynamicConfigManager.Register(backendKey, route, wildcard) + + // If no initial sync was done, don't try to dynamically add the + // route as we will need a reload anyway. + if !r.synced { + return false + } + + err := r.dynamicConfigManager.AddRoute(backendKey, route, wildcard) + if err != nil { + glog.Warningf("Router will reload as there was an error dynamically adding route backend %s: %v", backendKey, err) + return false + } + + // For each referenced service unit replace the route endpoints. + oldEndpoints := []Endpoint{} + + // As the endpoints have changed, recalculate the weights. + newWeights := r.calculateServiceWeights(backend.ServiceUnits) + for key := range backend.ServiceUnits { + if service, ok := r.findMatchingServiceUnit(key); ok { + newEndpoints := service.EndpointTable + glog.V(4).Infof("For new route backend %s, replacing endpoints for service %s: %+v", backendKey, key, service.EndpointTable) + + weight, ok := newWeights[key] + if !ok { + weight = 0 + } + if err := r.dynamicConfigManager.ReplaceRouteEndpoints(backendKey, oldEndpoints, newEndpoints, weight); err != nil { + glog.Warningf("Router will reload as there was an error dynamically replacing endpoints for new route backend %s, service %s: %v", + backendKey, key, err) + return false + } + } + } + + glog.V(4).Infof("Dynamically added route backend %s", backendKey) + return true +} + +// dynamicallyRemoveRoute attempts to dynamically remove a route. +// Note: The config should have been synced at least once initially and +// the caller needs to acquire a lock [and release it]. +func (r *templateRouter) dynamicallyRemoveRoute(backendKey string, route *routeapi.Route, backend ServiceAliasConfig) bool { + if r.dynamicConfigManager == nil || !r.synced { + return false + } + + glog.V(4).Infof("Dynamically removing route backend %s", backendKey) + + if err := r.dynamicConfigManager.RemoveRoute(backendKey, route, backend.IsWildcard); err != nil { + glog.Warningf("Router will reload as there was an error dynamically removing a route backend %s: %v", backendKey, err) + return false + } + + return true +} + +// dynamicallyReplaceEndpoints attempts to dynamically replace endpoints +// on all the routes associated with a given service. +// Note: The config should have been synced at least once initially and +// the caller needs to acquire a lock [and release it]. +func (r *templateRouter) dynamicallyReplaceEndpoints(id string, service ServiceUnit, oldEndpoints []Endpoint) bool { + if r.dynamicConfigManager == nil || !r.synced { + return false + } + + glog.V(4).Infof("Replacing endpoints dynamically for service %s", id) + newEndpoints := service.EndpointTable + + // Update each of the routes that reference this service unit. + for backendKey := range service.ServiceAliasAssociations { + cfg, ok := r.state[backendKey] + if !ok { + glog.V(4).Infof("Associated service alias %s not found in state, ignoring ...", backendKey) + continue + } + + // As the endpoints have changed, recalculate the weights. + newWeights := r.calculateServiceWeights(cfg.ServiceUnits) + + // Get the weight for this service unit. + weight, ok := newWeights[id] + if !ok { + weight = 0 + } + + glog.V(4).Infof("Dynamically replacing endpoints for associated backend %s", backendKey) + if err := r.dynamicConfigManager.ReplaceRouteEndpoints(backendKey, oldEndpoints, newEndpoints, weight); err != nil { + // Error dynamically modifying the config, so return false to cause a reload to happen. + glog.Warningf("Router will reload as dynamic endpoint replacement for service id %s (backend=%s, weight=%v) failed: %v", id, backendKey, weight, err) + return false + } + } + + return true +} + +// dynamicallyRemoveEndpoints attempts to dynamically remove endpoints on +// all the routes associated with a given service. +// Note: The config should have been synced at least once initially and +// the caller needs to acquire a lock [and release it]. +func (r *templateRouter) dynamicallyRemoveEndpoints(service ServiceUnit, endpoints []Endpoint) bool { + if r.dynamicConfigManager == nil || !r.synced { + return false + } + + glog.V(4).Infof("Dynamically removing endpoints for service unit %s", service.Name) + + for backendKey := range service.ServiceAliasAssociations { + if _, ok := r.state[backendKey]; !ok { + continue + } + + glog.V(4).Infof("Dynamically removing endpoints for associated backend %s", backendKey) + if err := r.dynamicConfigManager.RemoveRouteEndpoints(backendKey, endpoints); err != nil { + // Error dynamically modifying the config, so return false to cause a reload to happen. + glog.Warningf("Router will reload as dynamic endpoint removal for backend %s failed: %v", backendKey, err) + return false + } + } + + return true +} + // DeleteEndpoints deletes the endpoints for the service with the given id. func (r *templateRouter) DeleteEndpoints(id string) { r.lock.Lock() defer r.lock.Unlock() - service, ok := r.findMatchingServiceUnit(id) if !ok { return } + configChanged := r.dynamicallyRemoveEndpoints(service, service.EndpointTable) + service.EndpointTable = []Endpoint{} r.serviceUnits[id] = service @@ -558,6 +739,7 @@ func (r *templateRouter) DeleteEndpoints(id string) { } r.stateChanged = true + r.dynamicallyConfigured = r.dynamicallyConfigured && configChanged } // routeKey generates route key. This allows templates to use this key without having to create a separate method @@ -701,10 +883,14 @@ func (r *templateRouter) AddRoute(route *routeapi.Route) { glog.V(4).Infof("Creating new frontend for key: %v", key) r.createServiceUnitInternal(key) } + r.addServiceAliasAssociation(key, backendKey) } + configChanged := r.dynamicallyAddRoute(backendKey, route, newConfig) + r.state[backendKey] = *newConfig r.stateChanged = true + r.dynamicallyConfigured = r.dynamicallyConfigured && configChanged } // RemoveRoute removes the given route @@ -724,9 +910,16 @@ func (r *templateRouter) removeRouteInternal(route *routeapi.Route) { return } + configChanged := r.dynamicallyRemoveRoute(backendKey, route, serviceAliasConfig) + + for key := range serviceAliasConfig.ServiceUnits { + r.removeServiceAliasAssociation(key, backendKey) + } + r.cleanUpServiceAliasConfig(&serviceAliasConfig) delete(r.state, backendKey) r.stateChanged = true + r.dynamicallyConfigured = r.dynamicallyConfigured && configChanged } // numberOfEndpoints returns the number of endpoints @@ -752,15 +945,20 @@ func (r *templateRouter) AddEndpoints(id string, endpoints []Endpoint) { return } + oldEndpoints := frontend.EndpointTable + frontend.EndpointTable = endpoints r.serviceUnits[id] = frontend + configChanged := r.dynamicallyReplaceEndpoints(id, frontend, oldEndpoints) + if id == r.peerEndpointsKey { r.peerEndpoints = frontend.EndpointTable glog.V(4).Infof("Peer endpoints updated to: %#v", r.peerEndpoints) } r.stateChanged = true + r.dynamicallyConfigured = r.dynamicallyConfigured && configChanged } // cleanUpServiceAliasConfig performs any necessary steps to clean up a service alias config before deleting it from diff --git a/pkg/router/template/router_test.go b/pkg/router/template/router_test.go index af578be186e6..b6f34490e43e 100644 --- a/pkg/router/template/router_test.go +++ b/pkg/router/template/router_test.go @@ -363,6 +363,8 @@ func TestAddRoute(t *testing.T) { Name: suName, Hostname: "TestService.foo.svc", EndpointTable: []Endpoint{}, + + ServiceAliasAssociations: map[string]bool{"foo:bar": true}, }, } diff --git a/pkg/router/template/template_helper.go b/pkg/router/template/template_helper.go index 6e627fd579e7..d5f6e12535f7 100644 --- a/pkg/router/template/template_helper.go +++ b/pkg/router/template/template_helper.go @@ -119,6 +119,22 @@ func genCertificateHostName(hostname string, wildcard bool) string { return templateutil.GenCertificateHostName(hostname, wildcard) } +// Generates the backend name prefix based on the termination. +func GenBackendNamePrefix(termination routeapi.TLSTerminationType) string { + prefix := "be_http" + + switch termination { + case routeapi.TLSTerminationEdge: + prefix = "be_edge_http" + case routeapi.TLSTerminationReencrypt: + prefix = "be_secure" + case routeapi.TLSTerminationPassthrough: + prefix = "be_tcp" + } + + return prefix +} + // processEndpointsForAlias returns the list of endpoints for the given route's service // action argument further processes the list e.g. shuffle // The default action is in-order traversal of internal data structure that stores @@ -262,6 +278,7 @@ var helperFunctions = template.FuncMap{ "genSubdomainWildcardRegexp": genSubdomainWildcardRegexp, //generates a regular expression matching the subdomain for hosts (and paths) with a wildcard policy "generateRouteRegexp": generateRouteRegexp, //generates a regular expression matching the route hosts (and paths) "genCertificateHostName": genCertificateHostName, //generates host name to use for serving/matching certificates + "genBackendNamePrefix": GenBackendNamePrefix, //generates the prefix for the backend name "isTrue": isTrue, //determines if a given variable is a true value "firstMatch": firstMatch, //anchors provided regular expression and evaluates against given strings, returns the first matched string or "" diff --git a/pkg/router/template/template_helper_test.go b/pkg/router/template/template_helper_test.go index dc01be92a0a4..cd2a0a0efc13 100644 --- a/pkg/router/template/template_helper_test.go +++ b/pkg/router/template/template_helper_test.go @@ -680,3 +680,44 @@ func TestGetPrimaryAliasKey(t *testing.T) { } } } + +func TestGenBackendNamePrefix(t *testing.T) { + testPrefixes := []struct { + name string + termination routeapi.TLSTerminationType + expectedPrefix string + }{ + { + name: "http route", + termination: routeapi.TLSTerminationType(""), + expectedPrefix: "be_http", + }, + { + name: "edge secured route", + termination: routeapi.TLSTerminationEdge, + expectedPrefix: "be_edge_http", + }, + { + name: "reencrypt route", + termination: routeapi.TLSTerminationReencrypt, + expectedPrefix: "be_secure", + }, + { + name: "passthrough route", + termination: routeapi.TLSTerminationPassthrough, + expectedPrefix: "be_tcp", + }, + { + name: "unknown route", + termination: routeapi.TLSTerminationType("foo"), + expectedPrefix: "be_http", + }, + } + + for _, tc := range testPrefixes { + prefix := GenBackendNamePrefix(tc.termination) + if prefix != tc.expectedPrefix { + t.Errorf("%s: expected %s to get %s, but got %s", tc.name, tc.expectedPrefix, prefix) + } + } +} diff --git a/pkg/router/template/types.go b/pkg/router/template/types.go index f6764684ebbb..c0cc440732ff 100644 --- a/pkg/router/template/types.go +++ b/pkg/router/template/types.go @@ -2,6 +2,7 @@ package templaterouter import ( "strings" + "time" routeapi "github.com/openshift/origin/pkg/route/apis/route" ) @@ -15,6 +16,9 @@ type ServiceUnit struct { // EndpointTable are endpoints that back the service, this translates into a final backend // implementation for routers. EndpointTable []Endpoint + // ServiceAliasAssociations indicates what service aliases are + // associated with this service unit. + ServiceAliasAssociations map[string]bool } // ServiceAliasConfig is a route for a service. Uniquely identified by host + path. @@ -136,6 +140,88 @@ type certificateWriter interface { DeleteCertificate(directory, id string) error } +// ConfigManagerOptions is the options passed to a template router's +// configuration manager. +type ConfigManagerOptions struct { + // ConnectionInfo specifies how to connect to the underlying router. + ConnectionInfo string + + // CommitInterval specifies how often to commit changes made to the + // underlying router via the configuration manager. + CommitInterval time.Duration + + // BlueprintRoutes are a list of routes blueprints pre-allocated by + // the config manager to dynamically manage route additions. + BlueprintRoutes []*routeapi.Route + + // BlueprintRoutePoolSize is the size of the pre-allocated pool for + // each route blueprint. This can be overriden on an individual + // route basis with a route annotation: + // router.openshift.io/pool-size + BlueprintRoutePoolSize int + + // DynamicServerPrefix is the prefix used for naming the dynamic + // servers associated with a route. These dynamic servers are used to + // quickly modify the router config for any endpoint changes. + DynamicServerPrefix string + + // MaxDynamicServers is the maximum number of dynamic servers we + // will allocate on a per-route basis. + MaxDynamicServers int +} + +// ConfigManager is used by the router to make configuration changes using +// the template router's dynamic configuration API (if any). +type ConfigManager interface { + // Initialize initializes the config manager. + Initialize(router RouterInterface, certPath string) + + // Register registers an id to be associated with a route. + Register(id string, route *routeapi.Route, wildcard bool) + + // AddRoute adds a new route or updates an existing route. + AddRoute(id string, route *routeapi.Route, wildcard bool) error + + // RemoveRoute removes a route. + RemoveRoute(id string, route *routeapi.Route, wildcard bool) error + + // ReplaceRouteEndpoints replaces a subset (the ones associated with + // a single service unit) of a route endpoints. + ReplaceRouteEndpoints(id string, oldEndpoints, newEndpoints []Endpoint, weight int32) error + + // RemoveRouteEndpoints removes a set of endpoints from a route. + RemoveRouteEndpoints(id string, endpoints []Endpoint) error + + // Notify notifies a configuration manager of a router event. + // Currently the only ones that are received are on reload* events, + // which indicates whether or not the configuration manager should + // reset all the dynamically applied changes it is keeping track of. + Notify(event RouterEventType) + + // ServerTemplateName returns the dynamic server template name. + ServerTemplateName(id string) string + + // ServerTemplateSize returns the dynamic server template size. + ServerTemplateSize(id string) string + + // GenerateDynamicServerNames generates the dynamic server names. + GenerateDynamicServerNames(id string) []string +} + +// RouterEventType indicates the type of event fired by the router. +type RouterEventType string + +const ( + // RouterEventReloadStart indicates start of a template router reload. + RouterEventReloadStart = "reload-start" + + // RouterEventReloadEnd indicates end of a template router reload. + RouterEventReloadEnd = "reload-end" + + // RouterEventReloadError indicates error on a template router reload. + RouterEventReloadError = "reload-error" +) + //TemplateSafeName provides a name that can be used in the template that does not contain restricted //characters like / which is used to concat namespace and name in the service unit key func (s ServiceUnit) TemplateSafeName() string { diff --git a/test/end-to-end/router_test.go b/test/end-to-end/router_test.go index dec31fead1cf..805c7d5ebe27 100644 --- a/test/end-to-end/router_test.go +++ b/test/end-to-end/router_test.go @@ -1322,6 +1322,10 @@ func createAndStartRouterContainerExtended(dockerCli *dockerClient.Client, maste fmt.Sprintf("DEFAULT_CERTIFICATE=%s\n%s", defaultCert, defaultKey), fmt.Sprintf("ROUTER_BIND_PORTS_AFTER_SYNC=%s", strconv.FormatBool(bindPortsAfterSync)), fmt.Sprintf("NAMESPACE_LABELS=%s", namespaceLabels), + fmt.Sprintf("ROUTER_CONFIG_MANAGER=haproxy-manager"), + fmt.Sprintf("ROUTER_DYNAMIC_SERVER_PREFIX=_test-dynamic"), + fmt.Sprintf("ROUTER_MAX_DYNAMIC_SERVERS=3"), + fmt.Sprintf("ROUTER_BLUEPRINT_ROUTE_POOL_SIZE=5"), } reloadIntVar := fmt.Sprintf("RELOAD_INTERVAL=%ds", reloadInterval) From 3f5cccf70ba32f438f02ea475eb10e448f56d784 Mon Sep 17 00:00:00 2001 From: ramr Date: Tue, 20 Mar 2018 06:32:37 -0700 Subject: [PATCH 2/9] Fixes as per review comments from @smarterclayton --- pkg/cmd/infra/router/template.go | 50 +- pkg/oc/admin/router/router.go | 1 - .../template/configmanager/haproxy/backend.go | 4 +- .../template/configmanager/haproxy/client.go | 48 +- .../configmanager/haproxy/converter.go | 23 +- .../configmanager/haproxy/converter_test.go | 443 ++++++++++++++++++ .../template/configmanager/haproxy/manager.go | 57 ++- .../template/configmanager/haproxy/map.go | 4 +- .../configmanager/haproxy/map_test.go | 14 +- pkg/router/template/router.go | 21 +- pkg/router/template/template_helper.go | 24 +- pkg/router/template/types.go | 20 +- pkg/router/template/util/util_test.go | 41 ++ test/end-to-end/router_test.go | 3 - 14 files changed, 628 insertions(+), 125 deletions(-) create mode 100644 pkg/router/template/configmanager/haproxy/converter_test.go diff --git a/pkg/cmd/infra/router/template.go b/pkg/cmd/infra/router/template.go index 671c3aae2bc6..ac9f000cc9a2 100644 --- a/pkg/cmd/infra/router/template.go +++ b/pkg/cmd/infra/router/template.go @@ -67,7 +67,32 @@ var routerLong = templates.LongDesc(` You may restrict the set of routes exposed to a single project (with --namespace), projects your client has access to with a set of labels (--project-labels), namespaces matching a label (--namespace-labels), or all namespaces (no argument). You can limit the routes to those matching a --labels or --fields selector. Note - that you must have a cluster-wide administrative role to view all namespaces.`) + that you must have a cluster-wide administrative role to view all namespaces. + + For certain template routers, you can specify if a dynamic configuration + manager should be used. Certain template routers like haproxy and + its associated haproxy config manager, allow route and endpoint changes + to be propogated to the underlying router via a dynamic API. + In the case of haproxy, the haproxy-manager uses this dynamic config + API to modify the operational state of haproxy backends. + Any endpoint changes (scaling, node evictions, etc) are handled by + provisioning each backend with a pool of dynamic servers, which can + then be used as needed. The max-dynamic-servers option (and/or + ROUTER_MAX_DYNAMIC_SERVERS environment variable) controls the size + of this pool. + For new routes to be made available immediately, the haproxy-manager + provisions a pre-allocated pool of routes called blueprints. A backend + from this blueprint pool is used if the new route matches a specific blueprint. + The default set of blueprints support for passthrough, insecure (or http) + and edge secured routes using the default certificates. + The blueprint-route-pool-size option (and/or the + ROUTER_BLUEPRINT_ROUTE_POOL_SIZE environment variable) control the + size of this pre-allocated pool. + + These blueprints can be extended or customized by using the blueprint route + namespace and the blueprint label selector. Those options allow selected routes + from a certain namespace (matching the label selection criteria) to + serve as custom blueprints.`) type TemplateRouterOptions struct { Config *Config @@ -98,11 +123,10 @@ type TemplateRouter struct { type TemplateRouterConfigManager struct { ConfigManagerName string - ConfigManagerConnectionInfo string CommitInterval time.Duration BlueprintRouteNamespace string + BlueprintRouteLabelSelector string BlueprintRoutePoolSize int - DynamicServerPrefix string MaxDynamicServers int } @@ -139,11 +163,10 @@ func (o *TemplateRouter) Bind(flag *pflag.FlagSet) { flag.BoolVar(&o.StrictSNI, "strict-sni", isTrue(util.Env("ROUTER_STRICT_SNI", "")), "Use strict-sni bind processing (do not use default cert).") flag.StringVar(&o.MetricsType, "metrics-type", util.Env("ROUTER_METRICS_TYPE", ""), "Specifies the type of metrics to gather. Supports 'haproxy'.") flag.StringVar(&o.ConfigManagerName, "config-manager", util.Env("ROUTER_CONFIG_MANAGER", ""), "Specifies the manager to use for dynamically configuring changes with the underlying router. Supports 'haproxy-manager'.") - flag.StringVar(&o.ConfigManagerConnectionInfo, "config-manager-connection-info", "", "Specifies connection information for the dynamic configuration manager.") flag.DurationVar(&o.CommitInterval, "commit-interval", getIntervalFromEnv("COMMIT_INTERVAL", defaultCommitInterval), "Controls how often to commit (to the actual config) all the changes made using the router specific dynamic configuration manager.") flag.StringVar(&o.BlueprintRouteNamespace, "blueprint-route-namespace", util.Env("ROUTER_BLUEPRINT_ROUTE_NAMESPACE", ""), "Specifies the namespace which contains the routes that serve as blueprints for the dynamic configuration manager.") + flag.StringVar(&o.BlueprintRouteLabelSelector, "blueprint-route-labels", util.Env("ROUTER_BLUEPRINT_ROUTE_LABELS", ""), "A label selector to apply to the routes in the blueprint route namespace. These selected routes will serve as blueprints for the dynamic dynamic configuration manager.") flag.IntVar(&o.BlueprintRoutePoolSize, "blueprint-route-pool-size", int(util.EnvInt("ROUTER_BLUEPRINT_ROUTE_POOL_SIZE", 10, 1)), "Specifies the size of the pre-allocated pool for each route blueprint managed by the router specific dynamic configuration manager. This can be overriden by an annotation router.openshift.io/pool-size on an individual route.") - flag.StringVar(&o.DynamicServerPrefix, "dynamic-server-prefix", util.Env("ROUTER_DYNAMIC_SERVER_PREFIX", ""), "Specifies the prefix for dynamic servers added to router backends. These dynamic servers are handled by the router specific dynamic configuration manager.") flag.IntVar(&o.MaxDynamicServers, "max-dynamic-servers", int(util.EnvInt("ROUTER_MAX_DYNAMIC_SERVERS", 5, 1)), "Specifies the maximum number of dynamic servers added to a route for use by the router specific dynamic configuration manager.") } @@ -437,19 +460,13 @@ func (o *TemplateRouterOptions) Run() error { if err != nil { return err } - - uri := o.ConfigManagerConnectionInfo - if len(o.ConfigManagerConnectionInfo) == 0 { - uri = "unix:///var/lib/haproxy/run/haproxy.sock" - } - cmopts := templateplugin.ConfigManagerOptions{ - ConnectionInfo: uri, + ConnectionInfo: "unix:///var/lib/haproxy/run/haproxy.sock", CommitInterval: o.CommitInterval, BlueprintRoutes: blueprintRoutes, BlueprintRoutePoolSize: o.BlueprintRoutePoolSize, - DynamicServerPrefix: o.DynamicServerPrefix, MaxDynamicServers: o.MaxDynamicServers, + WildcardRoutesAllowed: o.AllowWildcardRoutes, } cfgManager = haproxyconfigmanager.NewHAProxyConfigManager(cmopts) } @@ -533,7 +550,12 @@ func (o *TemplateRouterOptions) blueprintRoutes(routeclient *routeinternalclient return blueprints, nil } - routeList, err := routeclient.Route().Routes(o.BlueprintRouteNamespace).List(metav1.ListOptions{}) + options := metav1.ListOptions{} + if len(o.BlueprintRouteLabelSelector) > 0 { + options.LabelSelector = o.BlueprintRouteLabelSelector + } + + routeList, err := routeclient.Route().Routes(o.BlueprintRouteNamespace).List(options) if err != nil { return blueprints, err } diff --git a/pkg/oc/admin/router/router.go b/pkg/oc/admin/router/router.go index 78183f9886ea..fa63a75c84c0 100644 --- a/pkg/oc/admin/router/router.go +++ b/pkg/oc/admin/router/router.go @@ -808,7 +808,6 @@ func RunCmdRouter(f kcmdutil.Factory, cmd *cobra.Command, out, errout io.Writer, // automatically start the internal metrics agent if we are handling a known type if cfg.Type == "haproxy-router" { env["ROUTER_CONFIG_MANAGER"] = "haproxy-manager" - env["ROUTER_DYNAMIC_SERVER_PREFIX"] = "_dynamic" if cfg.StatsPort != 0 { env["ROUTER_LISTEN_ADDR"] = fmt.Sprintf("0.0.0.0:%d", cfg.StatsPort) env["ROUTER_METRICS_TYPE"] = "haproxy" diff --git a/pkg/router/template/configmanager/haproxy/backend.go b/pkg/router/template/configmanager/haproxy/backend.go index 011926db0d8f..57420cb5b2f7 100644 --- a/pkg/router/template/configmanager/haproxy/backend.go +++ b/pkg/router/template/configmanager/haproxy/backend.go @@ -103,8 +103,8 @@ type backendServer struct { updatedState BackendServerState } -// GetHAProxyBackends returns a list of haproxy backends. -func GetHAProxyBackends(c *Client) ([]*Backend, error) { +// buildHAProxyBackends builds and returns a list of haproxy backends. +func buildHAProxyBackends(c *Client) ([]*Backend, error) { entries := []*backendEntry{} converter := NewCSVConverter(showBackendHeader, &entries, nil) _, err := c.RunCommand(ListBackendsCommand, converter) diff --git a/pkg/router/template/configmanager/haproxy/client.go b/pkg/router/template/configmanager/haproxy/client.go index 639cfc7dabfc..8d9227d3ceae 100644 --- a/pkg/router/template/configmanager/haproxy/client.go +++ b/pkg/router/template/configmanager/haproxy/client.go @@ -8,6 +8,8 @@ import ( haproxy "github.com/bcicen/go-haproxy" "github.com/golang/glog" + + utilwait "k8s.io/apimachinery/pkg/util/wait" ) const ( @@ -95,7 +97,7 @@ func (c *Client) Commit() error { // Backends returns the list of configured haproxy backends. func (c *Client) Backends() ([]*Backend, error) { if len(c.backends) == 0 { - if backends, err := GetHAProxyBackends(c); err != nil { + if backends, err := buildHAProxyBackends(c); err != nil { return nil, err } else { c.backends = backends @@ -123,7 +125,7 @@ func (c *Client) FindBackend(id string) (*Backend, error) { // Maps returns the list of configured haproxy maps. func (c *Client) Maps() ([]*HAProxyMap, error) { if len(c.maps) == 0 { - hapMaps, err := GetHAProxyMaps(c) + hapMaps, err := buildHAProxyMaps(c) if err != nil { return nil, err } @@ -161,33 +163,41 @@ func (c *Client) FindMap(name string) (*HAProxyMap, error) { // runCommandWithRetries retries a haproxy command upto the retry limit // if the error for the command is a retryable error. func (c *Client) runCommandWithRetries(cmd string, limit int) (*bytes.Buffer, error) { - retryAttempt := 0 - for { + var buffer *bytes.Buffer + var cmdErr error + + cmdWaitBackoff := utilwait.Backoff{ + Duration: 10 * time.Millisecond, + Factor: 2, + Steps: limit, + } + + n := 0 + utilwait.ExponentialBackoff(cmdWaitBackoff, func() (bool, error) { + n++ client := &haproxy.HAProxyClient{ Addr: c.socketAddress, Timeout: c.timeout, } - buffer, err := client.RunCommand(cmd) - if err == nil || !isRetryable(err, cmd) { - return buffer, err + buffer, cmdErr = client.RunCommand(cmd) + if cmdErr == nil { + return true, nil } - - retryAttempt++ - if retryAttempt > limit { - return buffer, err + if !isRetriable(cmdErr, cmd) { + return false, cmdErr } + return false, nil + }) - msecs := retryAttempt * 10 - if msecs > 60 { - msecs = 60 - } - time.Sleep(time.Duration(msecs) * time.Millisecond) - glog.V(4).Infof("retry #%d: cmd: %q, err was %v", retryAttempt, cmd, err) + if cmdErr != nil { + glog.V(4).Infof("%d attempt(s) to run haproxy command %q failed: %v", n, cmd, cmdErr) } + + return buffer, cmdErr } -// isRetryable checks if a haproxy command can be retried. -func isRetryable(err error, cmd string) bool { +// isRetriable checks if a haproxy command can be retried. +func isRetriable(err error, cmd string) bool { retryableErrors := []string{ "connection reset by peer", "connection refused", diff --git a/pkg/router/template/configmanager/haproxy/converter.go b/pkg/router/template/configmanager/haproxy/converter.go index 0e6ac2614699..36f64436cb6b 100644 --- a/pkg/router/template/configmanager/haproxy/converter.go +++ b/pkg/router/template/configmanager/haproxy/converter.go @@ -30,8 +30,8 @@ type CSVConverter struct { } // NewCSVConverter returns a new CSVConverter. -func NewCSVConverter(headers string, out interface{}, fn ByteConverterFunc) CSVConverter { - return CSVConverter{ +func NewCSVConverter(headers string, out interface{}, fn ByteConverterFunc) *CSVConverter { + return &CSVConverter{ headers: []byte(headers), out: out, converterFunc: fn, @@ -39,16 +39,15 @@ func NewCSVConverter(headers string, out interface{}, fn ByteConverterFunc) CSVC } // Convert runs a haproxy dynamic config API command. -func (c CSVConverter) Convert(data []byte) ([]byte, error) { - glog.V(4).Infof("CSV converter input data bytes: %s", string(data)) +func (c *CSVConverter) Convert(data []byte) ([]byte, error) { + glog.V(5).Infof("CSV converter input data bytes: %s", string(data)) if c.converterFunc != nil { convertedBytes, err := c.converterFunc(data) if err != nil { - glog.Errorf("CSV converter error: %v", err) return data, err } data = convertedBytes - glog.V(4).Infof("CSV converter transformed data bytes: %s", string(data)) + glog.V(5).Infof("CSV converter transformed data bytes: %s", string(data)) } if c.out == nil { @@ -66,20 +65,20 @@ func (c CSVConverter) Convert(data []byte) ([]byte, error) { return r }) - glog.V(4).Infof("CSV converter fixing up csv header ...") - data, _ = c.fixupHeaders(data) - glog.V(4).Infof("CSV converter fixed up data bytes: %s", string(data)) + glog.V(5).Infof("CSV converter fixing up csv header ...") + data, _ = fixupHeaders(data, c.headers) + glog.V(5).Infof("CSV converter fixed up data bytes: %s", string(data)) return data, gocsv.Unmarshal(bytes.NewBuffer(data), c.out) } // fixupHeaders fixes up haproxy API responses that don't contain any CSV // header information. This allows us to easily parse the data and marshal // into an array of native golang structs. -func (c CSVConverter) fixupHeaders(data []byte) ([]byte, error) { +func fixupHeaders(data, headers []byte) ([]byte, error) { prefix := []byte("#") - if len(c.headers) > 0 && !bytes.HasPrefix(data, prefix) { + if len(headers) > 0 && !bytes.HasPrefix(data, prefix) { // No header, so insert one. - line := bytes.Join([][]byte{prefix, c.headers}, []byte(" ")) + line := bytes.Join([][]byte{prefix, headers}, []byte(" ")) data = bytes.Join([][]byte{line, data}, []byte("\n")) } diff --git a/pkg/router/template/configmanager/haproxy/converter_test.go b/pkg/router/template/configmanager/haproxy/converter_test.go new file mode 100644 index 000000000000..23fc17272030 --- /dev/null +++ b/pkg/router/template/configmanager/haproxy/converter_test.go @@ -0,0 +1,443 @@ +package haproxy + +import ( + "bytes" + "fmt" + "testing" +) + +// TestNewConverter tests a new converter. +func TestNewConverter(t *testing.T) { + testCases := []struct { + name string + headers string + fn ByteConverterFunc + }{ + { + name: "empty headers", + headers: "", + fn: noopConverter, + }, + { + name: "nil converter", + headers: "#a", + }, + { + name: "noop converter", + headers: "#no o p", + fn: noopConverter, + }, + { + name: "removing leading hash converter", + headers: "#d e l", + fn: removeLeadingHashConverter, + }, + { + name: "comment first line converter", + headers: "#comment", + fn: commentFirstLineConverter, + }, + { + name: "remove first line converter", + headers: "#rm - r f", + fn: removeFirstLineConverter, + }, + { + name: "error converter", + headers: "#raise throw error", + fn: errorConverter, + }, + } + + for _, tc := range testCases { + entries := []*infoEntry{} + if c := NewCSVConverter(tc.headers, entries, tc.fn); c == nil { + t.Errorf("TestNewConverter test case %s failed. Unexpected error", tc.name) + } + } +} + +// TestShowInfoCommandConverter tests show info command output with a converter. +func TestShowInfoCommandConverter(t *testing.T) { + infoCommandOutput := `Name: converter-test +Version: 0.0.1 +Nbproc: 1 +Process_num: 1 +Pid: 42 +` + + testCases := []struct { + name string + commandOutput string + header string + converter ByteConverterFunc + failureExpected bool + }{ + { + name: "info parser", + commandOutput: infoCommandOutput, + header: "name value", + converter: nil, + failureExpected: false, + }, + { + name: "info parser with noop converter", + commandOutput: infoCommandOutput, + header: "name value", + converter: noopConverter, + failureExpected: false, + }, + { + name: "info parser with comment header", + commandOutput: infoCommandOutput, + header: "#name value", + converter: noopConverter, + failureExpected: false, + }, + { + name: "output with header", + commandOutput: "#name value\n" + infoCommandOutput, + header: "", + converter: removeLeadingHashConverter, + failureExpected: false, + }, + { + name: "output without header", + commandOutput: "name value\n" + infoCommandOutput, + header: "", + converter: commentFirstLineConverter, + failureExpected: false, + }, + { + name: "output with error converter", + commandOutput: infoCommandOutput, + header: "#name value", + converter: errorConverter, + failureExpected: true, + }, + { + name: "output with bad header", + commandOutput: infoCommandOutput, + header: "# name value extra1 extra2", + converter: nil, + failureExpected: true, + }, + { + name: "output with bad header 2", + commandOutput: infoCommandOutput, + header: "# name value extra1 extra2", + converter: removeLeadingHashConverter, + failureExpected: true, + }, + { + name: "output with empty header", + commandOutput: "name value\n" + infoCommandOutput, + header: "", + converter: commentFirstLineConverter, + failureExpected: false, + }, + { + name: "bad command output with header", + commandOutput: "command error 404 - check params", + header: "field1 field2 field3", + converter: nil, + failureExpected: true, + }, + } + + for _, tc := range testCases { + entries := []*infoEntry{} + c := NewCSVConverter(tc.header, &entries, tc.converter) + response, err := c.Convert([]byte(tc.commandOutput)) + if tc.failureExpected && err == nil { + t.Errorf("TestShowInfoCommandConverter test case %s expected a failure but got none, response=%s", + tc.name, string(response)) + } + if !tc.failureExpected && err != nil { + t.Errorf("TestShowInfoCommandConverter test case %s expected no failure but got one: %v", tc.name, err) + } + } +} + +// TestShowBackendCommandConverter tests show backend command output with a converter. +func TestShowBackendCommandConverter(t *testing.T) { + showBackendOutput := `# name +be_sni +be_no_sni +openshift_default +be_edge_http:blueprints:blueprint-redirect-to-https +be_edge_http:default:example-route +be_edge_http:default:test-http-allow +be_edge_http:default:test-https +be_edge_http:default:test-https-only +be_edge_http:ns1:example-route +be_tcp:default:test-passthrough +be_tcp:ns1:passthru-1 +be_tcp:ns2:passthru-1 +be_secure:default:test +be_secure:ns1:re1 +be_secure:ns2:reencrypt-one +be_secure:ns2:reencrypt-two +be_secure:ns2:reencrypt-three +be_secure:ns3:re1 +be_secure:ns3:re2 +` + testCases := []struct { + name string + commandOutput string + header string + converter ByteConverterFunc + failureExpected bool + }{ + { + name: "show backend command", + commandOutput: showBackendOutput, + header: "name", + converter: nil, + failureExpected: false, + }, + { + name: "show backend with noop converter", + commandOutput: showBackendOutput, + header: "name", + converter: noopConverter, + failureExpected: false, + }, + { + name: "show backend removing leading hash", + commandOutput: showBackendOutput, + header: "name", + converter: removeLeadingHashConverter, + failureExpected: false, + }, + { + name: "show backend removing leading hash 2", + commandOutput: showBackendOutput, + header: "#name", + converter: removeLeadingHashConverter, + failureExpected: false, + }, + { + name: "show backend comment first line", + commandOutput: showBackendOutput[1:], + header: "name", + converter: commentFirstLineConverter, + failureExpected: false, + }, + { + name: "show backend remove first line", + commandOutput: showBackendOutput, + header: "name", + converter: removeFirstLineConverter, + failureExpected: false, + }, + { + name: "show backend error converter", + commandOutput: showBackendOutput, + header: "name", + converter: errorConverter, + failureExpected: true, + }, + { + name: "empty output error converter", + commandOutput: "", + header: "name", + converter: errorConverter, + failureExpected: true, + }, + { + name: "show backend error output", + commandOutput: "connection failed, no backends", + header: "name", + converter: noopConverter, + failureExpected: true, + }, + } + + for _, tc := range testCases { + entries := []*backendEntry{} + c := NewCSVConverter(tc.header, &entries, tc.converter) + response, err := c.Convert([]byte(tc.commandOutput)) + if tc.failureExpected && err == nil { + t.Errorf("TestShowBackendCommandConverter test case %s expected a failure but got none, response=%s", + tc.name, string(response)) + } + if !tc.failureExpected && err != nil { + t.Errorf("TestShowBackendCommandConverter test case %s expected no failure but got one: %v", tc.name, err) + } + } +} + +// TestShowMapCommandConverter tests show map command output with a converter. +func TestShowMapCommandConverter(t *testing.T) { + listMapOutput := `# id (file) description +1 (/var/lib/haproxy/conf/os_route_http_redirect.map) pattern loaded from file '/var/lib/haproxy/conf/os_route_http_redirect.map' used by map at file '/var/lib/haproxy/conf/haproxy.config' line 68 +5 (/var/lib/haproxy/conf/os_sni_passthrough.map) pattern loaded from file '/var/lib/haproxy/conf/os_sni_passthrough.map' used by map at file '/var/lib/haproxy/conf/haproxy.config' line 87 +-1 (/var/lib/haproxy/conf/os_http_be.map) pattern loaded from file '/var/lib/haproxy/conf/os_http_be.map' used by map at file '/var/lib/haproxy/conf/haproxy.config' line 71 +` + + testCases := []struct { + name string + commandOutput string + header string + converter ByteConverterFunc + failureExpected bool + }{ + { + name: "show map", + commandOutput: listMapOutput, + header: "id (file) description", + converter: fixupMapListOutput, + failureExpected: false, + }, + { + name: "show map with no converter", + commandOutput: listMapOutput, + header: "id (file) description", + converter: nil, + failureExpected: true, + }, + { + name: "show map without map fixup", + commandOutput: listMapOutput, + header: "id (file) description", + converter: removeFirstLineConverter, + failureExpected: true, + }, + { + name: "show map with error converter", + commandOutput: listMapOutput, + header: "id (file) description", + converter: errorConverter, + failureExpected: true, + }, + { + name: "show map with error converter 2", + commandOutput: "", + header: "id (file) description", + converter: errorConverter, + failureExpected: true, + }, + { + name: "show map bad output", + commandOutput: "error fetching list of maps: connection failed", + header: "id (file) description", + converter: fixupMapListOutput, + failureExpected: true, + }, + } + + for _, tc := range testCases { + entries := []*mapListEntry{} + c := NewCSVConverter(tc.header, &entries, tc.converter) + response, err := c.Convert([]byte(tc.commandOutput)) + if tc.failureExpected && err == nil { + t.Errorf("TestShowMapCommandConverter test case %s expected a failure but got none, response=%s", + tc.name, string(response)) + } + if !tc.failureExpected && err != nil { + t.Errorf("TestShowMapCommandConverter test case %s expected no failure but got one: %v", tc.name, err) + } + } +} + +// TestShowServerStateOutputConverter tests show servers state output with a converter. +func TestShowServerStateOutputConverter(t *testing.T) { + testCases := []struct { + name string + commandOutput string + header string + converter ByteConverterFunc + failureExpected bool + }{ + { + name: "show servers state", + commandOutput: onePodAndOneDynamicServerBackendTemplate, + header: serversStateHeader, + converter: stripVersionNumber, + failureExpected: false, + }, + { + name: "show servers state without a converter", + commandOutput: onePodAndOneDynamicServerBackendTemplate, + header: serversStateHeader, + converter: nil, + failureExpected: true, + }, + { + name: "show servers state without removing version number", + commandOutput: onePodAndOneDynamicServerBackendTemplate, + header: serversStateHeader, + converter: removeLeadingHashConverter, + failureExpected: true, + }, + { + name: "show servers state removing first line with version number", + commandOutput: onePodAndOneDynamicServerBackendTemplate, + header: serversStateHeader, + converter: removeFirstLineConverter, + failureExpected: false, + }, + { + name: "show servers state with error converter", + commandOutput: onePodAndOneDynamicServerBackendTemplate, + header: serversStateHeader, + converter: errorConverter, + failureExpected: true, + }, + { + name: "show servers state with error output", + commandOutput: "error: failed to find backend", + header: serversStateHeader, + converter: nil, + failureExpected: true, + }, + } + + for _, tc := range testCases { + entries := []*serverStateInfo{} + c := NewCSVConverter(tc.header, &entries, tc.converter) + response, err := c.Convert([]byte(tc.commandOutput)) + if tc.failureExpected && err == nil { + t.Errorf("TestShowServerStateOutputConverter test case %s expected a failure but got none, response=%s", + tc.name, string(response)) + } + if !tc.failureExpected && err != nil { + t.Errorf("TestShowServerStateOutputConverter test case %s expected no failure but got one: %v", tc.name, err) + } + } +} + +func noopConverter(data []byte) ([]byte, error) { + return data, nil +} + +func removeLeadingHashConverter(data []byte) ([]byte, error) { + prefix := []byte("#") + idx := 0 + if len(data) > 0 && !bytes.HasPrefix(data, prefix) { + idx = 1 + } + + return data[idx:], nil +} + +func commentFirstLineConverter(data []byte) ([]byte, error) { + return bytes.Join([][]byte{[]byte("#"), data}, []byte("")), nil +} + +func removeFirstLineConverter(data []byte) ([]byte, error) { + if len(data) > 0 { + idx := bytes.Index(data, []byte("\n")) + if idx > -1 { + if idx+1 < len(data) { + return data[idx+1:], nil + } + } + } + return []byte(""), nil +} + +func errorConverter(data []byte) ([]byte, error) { + return data, fmt.Errorf("converter test error") +} diff --git a/pkg/router/template/configmanager/haproxy/manager.go b/pkg/router/template/configmanager/haproxy/manager.go index c89d140c83ea..0f176b363661 100644 --- a/pkg/router/template/configmanager/haproxy/manager.go +++ b/pkg/router/template/configmanager/haproxy/manager.go @@ -16,6 +16,7 @@ import ( routeapi "github.com/openshift/origin/pkg/route/apis/route" templaterouter "github.com/openshift/origin/pkg/router/template" + templateutil "github.com/openshift/origin/pkg/router/template/util" ) const ( @@ -38,6 +39,10 @@ const ( // pool of blueprint routes. blueprintRoutePoolNamePrefix = "_hapcm_blueprint_pool" + // dynamicServerPrefix is the prefix used in the haproxy template + // for adding dynamic servers (pods) to a backend. + dynamicServerPrefix = "_dynamic" + // routePoolSizeAnnotation is the annotation on the blueprint route // overriding the default pool size. routePoolSizeAnnotation = "router.openshift.io/pool-size" @@ -107,14 +112,13 @@ type haproxyConfigManager struct { // backends for each route blueprint. blueprintRoutePoolSize int - // dynamicServerPrefix is the prefix used in the haproxy template - // for adding dynamic servers (pods) to a backend. - dynamicServerPrefix string - // maxDynamicServers is the maximum number of dynamic servers // allocated per backend in the haproxy template configuration. maxDynamicServers int + // wildcardRoutesAllowed indicates if wildcard routes are allowed. + wildcardRoutesAllowed bool + // router is the associated template router. router templaterouter.RouterInterface @@ -152,8 +156,8 @@ func NewHAProxyConfigManager(options templaterouter.ConfigManagerOptions) *hapro commitInterval: options.CommitInterval, blueprintRoutes: buildBlueprintRoutes(options.BlueprintRoutes), blueprintRoutePoolSize: options.BlueprintRoutePoolSize, - dynamicServerPrefix: options.DynamicServerPrefix, maxDynamicServers: options.MaxDynamicServers, + wildcardRoutesAllowed: options.WildcardRoutesAllowed, defaultCertificate: "", client: client, @@ -185,7 +189,8 @@ func (cm *haproxyConfigManager) Initialize(router templaterouter.RouterInterface } // Register registers an id with an expected haproxy backend for a route. -func (cm *haproxyConfigManager) Register(id string, route *routeapi.Route, wildcard bool) { +func (cm *haproxyConfigManager) Register(id string, route *routeapi.Route) { + wildcard := cm.wildcardRoutesAllowed && (route.Spec.WildcardPolicy == routeapi.WildcardPolicySubdomain) entry := &routeBackendEntry{ id: id, termination: routeTerminationType(route), @@ -202,12 +207,12 @@ func (cm *haproxyConfigManager) Register(id string, route *routeapi.Route, wildc } // AddRoute adds a new route or updates an existing route. -func (cm *haproxyConfigManager) AddRoute(id string, route *routeapi.Route, wildcard bool) error { +func (cm *haproxyConfigManager) AddRoute(id string, route *routeapi.Route) error { if cm.isReloading() { return fmt.Errorf("Router reload in progress, cannot dynamically add route %s", id) } - glog.V(4).Infof("Removing route id %s, wildcard %+v", id, wildcard) + glog.V(4).Infof("Adding route id %s", id) if cm.isManagedPoolRoute(route) { return fmt.Errorf("managed pool blueprint route %s ignored", id) @@ -218,7 +223,7 @@ func (cm *haproxyConfigManager) AddRoute(id string, route *routeapi.Route, wildc return fmt.Errorf("no blueprint found that would match route %s/%s", route.Namespace, route.Name) } - cm.Register(id, route, wildcard) + cm.Register(id, route) cm.lock.Lock() defer cm.lock.Unlock() @@ -252,8 +257,8 @@ func (cm *haproxyConfigManager) AddRoute(id string, route *routeapi.Route, wildc } // RemoveRoute removes a route. -func (cm *haproxyConfigManager) RemoveRoute(id string, route *routeapi.Route, wildcard bool) error { - glog.V(4).Infof("Removing route %s, wildcard %+v", id, wildcard) +func (cm *haproxyConfigManager) RemoveRoute(id string, route *routeapi.Route) error { + glog.V(4).Infof("Removing route %s", id) if cm.isReloading() { return fmt.Errorf("Router reload in progress, cannot dynamically remove route id %s", id) } @@ -369,7 +374,7 @@ func (cm *haproxyConfigManager) ReplaceRouteEndpoints(id string, oldEndpoints, n unusedServerNames := []string{} for _, s := range servers { relatedEndpointID := s.Name - if cm.isDynamicBackendServer(s) { + if isDynamicBackendServer(s) { if epid, ok := entry.dynamicServerMap[s.Name]; ok { relatedEndpointID = epid } else { @@ -513,9 +518,9 @@ func (cm *haproxyConfigManager) Commit() { // ServerTemplateName returns the dynamic server template name. func (cm *haproxyConfigManager) ServerTemplateName(id string) string { - if len(cm.dynamicServerPrefix) > 0 && cm.maxDynamicServers > 0 { + if cm.maxDynamicServers > 0 { // Adding the id makes the name unwieldy - use pod. - return fmt.Sprintf("%s-pod", cm.dynamicServerPrefix) + return fmt.Sprintf("%s-pod", dynamicServerPrefix) } return "" @@ -576,15 +581,6 @@ func (cm *haproxyConfigManager) isManagedPoolRoute(route *routeapi.Route) bool { return route.Namespace == blueprintRoutePoolNamespace } -// isDynamicBackendServer indicates if a backend server is a dynamic server. -func (cm *haproxyConfigManager) isDynamicBackendServer(server BackendServerInfo) bool { - if len(cm.dynamicServerPrefix) == 0 { - return false - } - - return strings.HasPrefix(server.Name, cm.dynamicServerPrefix) -} - // provisionBackendPools pre-allocates pools of backends based on the // different blueprint routes. func (cm *haproxyConfigManager) provisionBackendPools() { @@ -756,7 +752,7 @@ func (entry *routeBackendEntry) BuildMapAssociations(route *routeapi.Route) { name := entry.BackendName() // Do the path specific regular expression usage first. - pathRE := templaterouter.GenerateRouteRegexp(hostspec, pathspec, entry.wildcard) + pathRE := templateutil.GenerateRouteRegexp(hostspec, pathspec, entry.wildcard) if policy == routeapi.InsecureEdgeTerminationPolicyRedirect { associate("os_route_http_redirect.map", pathRE, name) } @@ -778,7 +774,7 @@ func (entry *routeBackendEntry) BuildMapAssociations(route *routeapi.Route) { } // And then handle the host specific regular expression usage. - hostRE := templaterouter.GenerateRouteRegexp(hostspec, "", entry.wildcard) + hostRE := templateutil.GenerateRouteRegexp(hostspec, "", entry.wildcard) if len(os.Getenv("ROUTER_ALLOW_WILDCARD_ROUTES")) > 0 && entry.wildcard { associate("os_wildcard_domain.map", hostRE, "1") } @@ -859,7 +855,7 @@ func createBlueprintRoute(routeType routeapi.TLSTerminationType) *routeapi.Route // routeBackendName returns the haproxy backend name for a route. func routeBackendName(id string, route *routeapi.Route) string { termination := routeTerminationType(route) - prefix := templaterouter.GenBackendNamePrefix(termination) + prefix := templateutil.GenerateBackendNamePrefix(termination) return fmt.Sprintf("%s:%s", prefix, id) } @@ -890,6 +886,15 @@ func routeTerminationType(route *routeapi.Route) routeapi.TLSTerminationType { return termination } +// isDynamicBackendServer indicates if a backend server is a dynamic server. +func isDynamicBackendServer(server BackendServerInfo) bool { + if len(dynamicServerPrefix) == 0 { + return false + } + + return strings.HasPrefix(server.Name, dynamicServerPrefix) +} + // applyMapAssociations applies the backend associations to a haproxy map. func applyMapAssociations(m *HAProxyMap, associations map[string]string, add bool) error { for k, v := range associations { diff --git a/pkg/router/template/configmanager/haproxy/map.go b/pkg/router/template/configmanager/haproxy/map.go index 5051983f6ca6..c5351ae3344a 100644 --- a/pkg/router/template/configmanager/haproxy/map.go +++ b/pkg/router/template/configmanager/haproxy/map.go @@ -54,9 +54,9 @@ type HAProxyMap struct { dirty bool } -// GetHAProxyMaps returns a list of loaded haproxy maps. +// buildHAProxyMaps builds and returns a list of haproxy maps. // Note: Maps are lazily populated based on their usage. -func GetHAProxyMaps(c *Client) ([]*HAProxyMap, error) { +func buildHAProxyMaps(c *Client) ([]*HAProxyMap, error) { entries := []*mapListEntry{} converter := NewCSVConverter(showMapListHeader, &entries, fixupMapListOutput) diff --git a/pkg/router/template/configmanager/haproxy/map_test.go b/pkg/router/template/configmanager/haproxy/map_test.go index 7a802b42e649..8e712c505e89 100644 --- a/pkg/router/template/configmanager/haproxy/map_test.go +++ b/pkg/router/template/configmanager/haproxy/map_test.go @@ -4,8 +4,8 @@ import ( "testing" ) -// TestGetHAProxyMaps tests haproxy maps. -func TestGetHAProxyMaps(t *testing.T) { +// TestBuildHAProxyMaps tests haproxy maps. +func TestBuildHAProxyMaps(t *testing.T) { server := startFakeServerForTest(t) defer server.Stop() @@ -34,22 +34,22 @@ func TestGetHAProxyMaps(t *testing.T) { for _, tc := range testCases { client := NewClient(tc.sockFile, 0) if client == nil { - t.Errorf("TestGetHAProxyMaps test case %s failed with no client.", tc.name) + t.Errorf("TestBuildHAProxyMaps test case %s failed with no client.", tc.name) } - haproxyMaps, err := GetHAProxyMaps(client) + haproxyMaps, err := buildHAProxyMaps(client) if tc.failureExpected { if err == nil { - t.Errorf("TestGetHAProxyMaps test case %s expected an error but got none.", tc.name) + t.Errorf("TestBuildHAProxyMaps test case %s expected an error but got none.", tc.name) } continue } if err != nil { - t.Errorf("TestGetHAProxyMaps test case %s expected no error but got: %v", tc.name, err) + t.Errorf("TestBuildHAProxyMaps test case %s expected no error but got: %v", tc.name, err) } if len(haproxyMaps) == 0 { - t.Errorf("TestGetHAProxyMaps test case %s expected to get maps", tc.name) + t.Errorf("TestBuildHAProxyMaps test case %s expected to get maps", tc.name) } } } diff --git a/pkg/router/template/router.go b/pkg/router/template/router.go index c2f231b0d419..51832b9c764f 100644 --- a/pkg/router/template/router.go +++ b/pkg/router/template/router.go @@ -590,8 +590,7 @@ func (r *templateRouter) dynamicallyAddRoute(backendKey string, route *routeapi. } glog.V(4).Infof("Dynamically adding route backend %s", backendKey) - wildcard := backend.IsWildcard - r.dynamicConfigManager.Register(backendKey, route, wildcard) + r.dynamicConfigManager.Register(backendKey, route) // If no initial sync was done, don't try to dynamically add the // route as we will need a reload anyway. @@ -599,9 +598,9 @@ func (r *templateRouter) dynamicallyAddRoute(backendKey string, route *routeapi. return false } - err := r.dynamicConfigManager.AddRoute(backendKey, route, wildcard) + err := r.dynamicConfigManager.AddRoute(backendKey, route) if err != nil { - glog.Warningf("Router will reload as there was an error dynamically adding route backend %s: %v", backendKey, err) + glog.V(4).Infof("Router will reload as the ConfigManager could not dynamically add route for backend %s: %v", backendKey, err) return false } @@ -620,7 +619,7 @@ func (r *templateRouter) dynamicallyAddRoute(backendKey string, route *routeapi. weight = 0 } if err := r.dynamicConfigManager.ReplaceRouteEndpoints(backendKey, oldEndpoints, newEndpoints, weight); err != nil { - glog.Warningf("Router will reload as there was an error dynamically replacing endpoints for new route backend %s, service %s: %v", + glog.V(4).Infof("Router will reload as the ConfigManager could not dynamically replace endpoints for route backend %s, service %s: %v", backendKey, key, err) return false } @@ -634,15 +633,15 @@ func (r *templateRouter) dynamicallyAddRoute(backendKey string, route *routeapi. // dynamicallyRemoveRoute attempts to dynamically remove a route. // Note: The config should have been synced at least once initially and // the caller needs to acquire a lock [and release it]. -func (r *templateRouter) dynamicallyRemoveRoute(backendKey string, route *routeapi.Route, backend ServiceAliasConfig) bool { +func (r *templateRouter) dynamicallyRemoveRoute(backendKey string, route *routeapi.Route) bool { if r.dynamicConfigManager == nil || !r.synced { return false } glog.V(4).Infof("Dynamically removing route backend %s", backendKey) - if err := r.dynamicConfigManager.RemoveRoute(backendKey, route, backend.IsWildcard); err != nil { - glog.Warningf("Router will reload as there was an error dynamically removing a route backend %s: %v", backendKey, err) + if err := r.dynamicConfigManager.RemoveRoute(backendKey, route); err != nil { + glog.V(4).Infof("Router will reload as the ConfigManager could not dynamically remove route backend %s: %v", backendKey, err) return false } @@ -681,7 +680,7 @@ func (r *templateRouter) dynamicallyReplaceEndpoints(id string, service ServiceU glog.V(4).Infof("Dynamically replacing endpoints for associated backend %s", backendKey) if err := r.dynamicConfigManager.ReplaceRouteEndpoints(backendKey, oldEndpoints, newEndpoints, weight); err != nil { // Error dynamically modifying the config, so return false to cause a reload to happen. - glog.Warningf("Router will reload as dynamic endpoint replacement for service id %s (backend=%s, weight=%v) failed: %v", id, backendKey, weight, err) + glog.V(4).Infof("Router will reload as the ConfigManager could not dynamically replace endpoints for service id %s (backend=%s, weight=%v): %v", id, backendKey, weight, err) return false } } @@ -708,7 +707,7 @@ func (r *templateRouter) dynamicallyRemoveEndpoints(service ServiceUnit, endpoin glog.V(4).Infof("Dynamically removing endpoints for associated backend %s", backendKey) if err := r.dynamicConfigManager.RemoveRouteEndpoints(backendKey, endpoints); err != nil { // Error dynamically modifying the config, so return false to cause a reload to happen. - glog.Warningf("Router will reload as dynamic endpoint removal for backend %s failed: %v", backendKey, err) + glog.V(4).Infof("Router will reload as the ConfigManager could not dynamically remove endpoints for backend %s: %v", backendKey, err) return false } } @@ -910,7 +909,7 @@ func (r *templateRouter) removeRouteInternal(route *routeapi.Route) { return } - configChanged := r.dynamicallyRemoveRoute(backendKey, route, serviceAliasConfig) + configChanged := r.dynamicallyRemoveRoute(backendKey, route) for key := range serviceAliasConfig.ServiceUnits { r.removeServiceAliasAssociation(key, backendKey) diff --git a/pkg/router/template/template_helper.go b/pkg/router/template/template_helper.go index d5f6e12535f7..c2f899f5fabf 100644 --- a/pkg/router/template/template_helper.go +++ b/pkg/router/template/template_helper.go @@ -119,22 +119,6 @@ func genCertificateHostName(hostname string, wildcard bool) string { return templateutil.GenCertificateHostName(hostname, wildcard) } -// Generates the backend name prefix based on the termination. -func GenBackendNamePrefix(termination routeapi.TLSTerminationType) string { - prefix := "be_http" - - switch termination { - case routeapi.TLSTerminationEdge: - prefix = "be_edge_http" - case routeapi.TLSTerminationReencrypt: - prefix = "be_secure" - case routeapi.TLSTerminationPassthrough: - prefix = "be_tcp" - } - - return prefix -} - // processEndpointsForAlias returns the list of endpoints for the given route's service // action argument further processes the list e.g. shuffle // The default action is in-order traversal of internal data structure that stores @@ -275,10 +259,10 @@ var helperFunctions = template.FuncMap{ "isInteger": isInteger, //determines if a given variable is an integer "matchValues": matchValues, //compares a given string to a list of allowed strings - "genSubdomainWildcardRegexp": genSubdomainWildcardRegexp, //generates a regular expression matching the subdomain for hosts (and paths) with a wildcard policy - "generateRouteRegexp": generateRouteRegexp, //generates a regular expression matching the route hosts (and paths) - "genCertificateHostName": genCertificateHostName, //generates host name to use for serving/matching certificates - "genBackendNamePrefix": GenBackendNamePrefix, //generates the prefix for the backend name + "genSubdomainWildcardRegexp": genSubdomainWildcardRegexp, //generates a regular expression matching the subdomain for hosts (and paths) with a wildcard policy + "generateRouteRegexp": generateRouteRegexp, //generates a regular expression matching the route hosts (and paths) + "genCertificateHostName": genCertificateHostName, //generates host name to use for serving/matching certificates + "genBackendNamePrefix": templateutil.GenerateBackendNamePrefix, //generates the prefix for the backend name "isTrue": isTrue, //determines if a given variable is a true value "firstMatch": firstMatch, //anchors provided regular expression and evaluates against given strings, returns the first matched string or "" diff --git a/pkg/router/template/types.go b/pkg/router/template/types.go index c0cc440732ff..b09fbc4b9347 100644 --- a/pkg/router/template/types.go +++ b/pkg/router/template/types.go @@ -160,30 +160,34 @@ type ConfigManagerOptions struct { // router.openshift.io/pool-size BlueprintRoutePoolSize int - // DynamicServerPrefix is the prefix used for naming the dynamic - // servers associated with a route. These dynamic servers are used to - // quickly modify the router config for any endpoint changes. - DynamicServerPrefix string - // MaxDynamicServers is the maximum number of dynamic servers we // will allocate on a per-route basis. MaxDynamicServers int + + // WildcardRoutesAllowed indicates if wildcard routes are allowed. + WildcardRoutesAllowed bool } // ConfigManager is used by the router to make configuration changes using // the template router's dynamic configuration API (if any). +// Please note that the code calling the ConfigManager interface methods +// needs to ensure that a lock is acquired and released in order to +// guarantee Config Manager consistency. +// The haproxy specific implementation of the ConfigManager itself does +// guarantee consistency with internal locks but it is not a hard +// requirement for a ConfigManager "provider". type ConfigManager interface { // Initialize initializes the config manager. Initialize(router RouterInterface, certPath string) // Register registers an id to be associated with a route. - Register(id string, route *routeapi.Route, wildcard bool) + Register(id string, route *routeapi.Route) // AddRoute adds a new route or updates an existing route. - AddRoute(id string, route *routeapi.Route, wildcard bool) error + AddRoute(id string, route *routeapi.Route) error // RemoveRoute removes a route. - RemoveRoute(id string, route *routeapi.Route, wildcard bool) error + RemoveRoute(id string, route *routeapi.Route) error // ReplaceRouteEndpoints replaces a subset (the ones associated with // a single service unit) of a route endpoints. diff --git a/pkg/router/template/util/util_test.go b/pkg/router/template/util/util_test.go index 22e7474f444a..beaa568084fa 100644 --- a/pkg/router/template/util/util_test.go +++ b/pkg/router/template/util/util_test.go @@ -246,3 +246,44 @@ func TestGenerateBackendNamePrefix(t *testing.T) { } } } + +func TestGenerateBackendNamePrefix(t *testing.T) { + testPrefixes := []struct { + name string + termination routeapi.TLSTerminationType + expectedPrefix string + }{ + { + name: "http route", + termination: routeapi.TLSTerminationType(""), + expectedPrefix: "be_http", + }, + { + name: "edge secured route", + termination: routeapi.TLSTerminationEdge, + expectedPrefix: "be_edge_http", + }, + { + name: "reencrypt route", + termination: routeapi.TLSTerminationReencrypt, + expectedPrefix: "be_secure", + }, + { + name: "passthrough route", + termination: routeapi.TLSTerminationPassthrough, + expectedPrefix: "be_tcp", + }, + { + name: "unknown route", + termination: routeapi.TLSTerminationType("foo"), + expectedPrefix: "be_http", + }, + } + + for _, tc := range testPrefixes { + prefix := GenerateBackendNamePrefix(tc.termination) + if prefix != tc.expectedPrefix { + t.Errorf("%s: expected %s to get %s, but got %s", tc.name, tc.expectedPrefix, prefix) + } + } +} diff --git a/test/end-to-end/router_test.go b/test/end-to-end/router_test.go index 805c7d5ebe27..733d74361415 100644 --- a/test/end-to-end/router_test.go +++ b/test/end-to-end/router_test.go @@ -1323,9 +1323,6 @@ func createAndStartRouterContainerExtended(dockerCli *dockerClient.Client, maste fmt.Sprintf("ROUTER_BIND_PORTS_AFTER_SYNC=%s", strconv.FormatBool(bindPortsAfterSync)), fmt.Sprintf("NAMESPACE_LABELS=%s", namespaceLabels), fmt.Sprintf("ROUTER_CONFIG_MANAGER=haproxy-manager"), - fmt.Sprintf("ROUTER_DYNAMIC_SERVER_PREFIX=_test-dynamic"), - fmt.Sprintf("ROUTER_MAX_DYNAMIC_SERVERS=3"), - fmt.Sprintf("ROUTER_BLUEPRINT_ROUTE_POOL_SIZE=5"), } reloadIntVar := fmt.Sprintf("RELOAD_INTERVAL=%ds", reloadInterval) From d599bfb69c485eb5dcd73354a6b50279e10f5a65 Mon Sep 17 00:00:00 2001 From: ramr Date: Mon, 2 Apr 2018 14:55:03 -0700 Subject: [PATCH 3/9] Add support for a new plugin to manage blueprint routes and configure the manager appropriately. And some more changes as per review comments. --- hack/lib/start.sh | 7 + pkg/cmd/infra/router/template.go | 10 +- pkg/oc/admin/router/router.go | 13 +- .../configmanager/haproxy/blueprint_plugin.go | 55 +++++ .../haproxy/blueprint_plugin_test.go | 212 ++++++++++++++++++ .../template/configmanager/haproxy/manager.go | 166 +++++++++++--- pkg/router/template/types.go | 6 + test/end-to-end/router_test.go | 2 +- 8 files changed, 431 insertions(+), 40 deletions(-) create mode 100644 pkg/router/template/configmanager/haproxy/blueprint_plugin.go create mode 100644 pkg/router/template/configmanager/haproxy/blueprint_plugin_test.go diff --git a/hack/lib/start.sh b/hack/lib/start.sh index a09d502bfffe..c0853928d869 100644 --- a/hack/lib/start.sh +++ b/hack/lib/start.sh @@ -613,6 +613,13 @@ function os::start::router() { else oc adm router --config="${ADMIN_KUBECONFIG}" --images="${USE_IMAGES}" --service-account=router fi + + # Note that when the haproxy config manager is set based on router type, + # the env entry may need to be always set or removed (if defaulted). + if [[ -n "${ROUTER_HAPROXY_CONFIG_MANAGER:-}" ]]; then + os::log::debug "Changing the router DC to enable the haproxy config manager" + oc set env dc/router -c router ROUTER_HAPROXY_CONFIG_MANAGER=true + fi } readonly -f os::start::router diff --git a/pkg/cmd/infra/router/template.go b/pkg/cmd/infra/router/template.go index ac9f000cc9a2..b647a8503d07 100644 --- a/pkg/cmd/infra/router/template.go +++ b/pkg/cmd/infra/router/template.go @@ -122,7 +122,7 @@ type TemplateRouter struct { } type TemplateRouterConfigManager struct { - ConfigManagerName string + UseHAProxyConfigManager bool CommitInterval time.Duration BlueprintRouteNamespace string BlueprintRouteLabelSelector string @@ -162,7 +162,7 @@ func (o *TemplateRouter) Bind(flag *pflag.FlagSet) { flag.StringVar(&o.Ciphers, "ciphers", util.Env("ROUTER_CIPHERS", ""), "Specifies the cipher suites to use. You can choose a predefined cipher set ('modern', 'intermediate', or 'old') or specify exact cipher suites by passing a : separated list.") flag.BoolVar(&o.StrictSNI, "strict-sni", isTrue(util.Env("ROUTER_STRICT_SNI", "")), "Use strict-sni bind processing (do not use default cert).") flag.StringVar(&o.MetricsType, "metrics-type", util.Env("ROUTER_METRICS_TYPE", ""), "Specifies the type of metrics to gather. Supports 'haproxy'.") - flag.StringVar(&o.ConfigManagerName, "config-manager", util.Env("ROUTER_CONFIG_MANAGER", ""), "Specifies the manager to use for dynamically configuring changes with the underlying router. Supports 'haproxy-manager'.") + flag.BoolVar(&o.UseHAProxyConfigManager, "haproxy-config-manager", isTrue(util.Env("ROUTER_HAPROXY_CONFIG_MANAGER", "")), "Use the the haproxy config manager (and dynamic configuration API) to configure route and endpoint changes. Reduces the number of haproxy reloads needed on configuration changes.") flag.DurationVar(&o.CommitInterval, "commit-interval", getIntervalFromEnv("COMMIT_INTERVAL", defaultCommitInterval), "Controls how often to commit (to the actual config) all the changes made using the router specific dynamic configuration manager.") flag.StringVar(&o.BlueprintRouteNamespace, "blueprint-route-namespace", util.Env("ROUTER_BLUEPRINT_ROUTE_NAMESPACE", ""), "Specifies the namespace which contains the routes that serve as blueprints for the dynamic configuration manager.") flag.StringVar(&o.BlueprintRouteLabelSelector, "blueprint-route-labels", util.Env("ROUTER_BLUEPRINT_ROUTE_LABELS", ""), "A label selector to apply to the routes in the blueprint route namespace. These selected routes will serve as blueprints for the dynamic dynamic configuration manager.") @@ -455,7 +455,8 @@ func (o *TemplateRouterOptions) Run() error { } var cfgManager templateplugin.ConfigManager - if o.ConfigManagerName == "haproxy-manager" { + var blueprintPlugin router.Plugin + if o.UseHAProxyConfigManager { blueprintRoutes, err := o.blueprintRoutes(routeclient) if err != nil { return err @@ -469,6 +470,9 @@ func (o *TemplateRouterOptions) Run() error { WildcardRoutesAllowed: o.AllowWildcardRoutes, } cfgManager = haproxyconfigmanager.NewHAProxyConfigManager(cmopts) + if len(o.BlueprintRouteNamespace) > 0 { + blueprintPlugin = haproxyconfigmanager.NewBlueprintPlugin(cfgManager) + } } pluginCfg := templateplugin.TemplatePluginConfig{ diff --git a/pkg/oc/admin/router/router.go b/pkg/oc/admin/router/router.go index fa63a75c84c0..1f33b09a1134 100644 --- a/pkg/oc/admin/router/router.go +++ b/pkg/oc/admin/router/router.go @@ -806,14 +806,11 @@ func RunCmdRouter(f kcmdutil.Factory, cmd *cobra.Command, out, errout io.Writer, env["ROUTER_CANONICAL_HOSTNAME"] = cfg.RouterCanonicalHostname } // automatically start the internal metrics agent if we are handling a known type - if cfg.Type == "haproxy-router" { - env["ROUTER_CONFIG_MANAGER"] = "haproxy-manager" - if cfg.StatsPort != 0 { - env["ROUTER_LISTEN_ADDR"] = fmt.Sprintf("0.0.0.0:%d", cfg.StatsPort) - env["ROUTER_METRICS_TYPE"] = "haproxy" - env["ROUTER_METRICS_TLS_CERT_FILE"] = "/etc/pki/tls/metrics/tls.crt" - env["ROUTER_METRICS_TLS_KEY_FILE"] = "/etc/pki/tls/metrics/tls.key" - } + if cfg.Type == "haproxy-router" && cfg.StatsPort != 0 { + env["ROUTER_LISTEN_ADDR"] = fmt.Sprintf("0.0.0.0:%d", cfg.StatsPort) + env["ROUTER_METRICS_TYPE"] = "haproxy" + env["ROUTER_METRICS_TLS_CERT_FILE"] = "/etc/pki/tls/metrics/tls.crt" + env["ROUTER_METRICS_TLS_KEY_FILE"] = "/etc/pki/tls/metrics/tls.key" } mtlsAuth := strings.TrimSpace(cfg.MutualTLSAuth) if len(mtlsAuth) > 0 && mtlsAuth != defaultMutualTLSAuth { diff --git a/pkg/router/template/configmanager/haproxy/blueprint_plugin.go b/pkg/router/template/configmanager/haproxy/blueprint_plugin.go new file mode 100644 index 000000000000..fe0165993aab --- /dev/null +++ b/pkg/router/template/configmanager/haproxy/blueprint_plugin.go @@ -0,0 +1,55 @@ +package haproxy + +import ( + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/watch" + kapi "k8s.io/kubernetes/pkg/apis/core" + + routeapi "github.com/openshift/origin/pkg/route/apis/route" + templaterouter "github.com/openshift/origin/pkg/router/template" +) + +// BlueprintPlugin implements the router.Plugin interface to process routes +// from the blueprint namespace for the associated config manager. +type BlueprintPlugin struct { + manager templaterouter.ConfigManager +} + +// NewBlueprintPlugin returns a new blueprint routes plugin. +func NewBlueprintPlugin(cm templaterouter.ConfigManager) *BlueprintPlugin { + return &BlueprintPlugin{manager: cm} +} + +// HandleRoute processes watch events on blueprint routes. +func (p *BlueprintPlugin) HandleRoute(eventType watch.EventType, route *routeapi.Route) error { + switch eventType { + case watch.Added, watch.Modified: + p.manager.AddBlueprint(route) + case watch.Deleted: + p.manager.RemoveBlueprint(route) + } + + return nil +} + +// HandleNode processes watch events on the Node resource. +func (p *BlueprintPlugin) HandleNode(eventType watch.EventType, node *kapi.Node) error { + return nil +} + +// HandleEndpoints processes watch events on the Endpoints resource. +func (p *BlueprintPlugin) HandleEndpoints(eventType watch.EventType, endpoints *kapi.Endpoints) error { + return nil +} + +// HandleNamespaces processes watch events on namespaces. +func (p *BlueprintPlugin) HandleNamespaces(namespaces sets.String) error { + return nil +} + +// Commit commits the changes made to a watched resource. +func (p *BlueprintPlugin) Commit() error { + // Nothing to do as the config manager does an automatic commit when + // any blueprint routes change. + return nil +} diff --git a/pkg/router/template/configmanager/haproxy/blueprint_plugin_test.go b/pkg/router/template/configmanager/haproxy/blueprint_plugin_test.go new file mode 100644 index 000000000000..4da5cb1a5e81 --- /dev/null +++ b/pkg/router/template/configmanager/haproxy/blueprint_plugin_test.go @@ -0,0 +1,212 @@ +package haproxy + +import ( + "fmt" + "testing" + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/watch" + kapi "k8s.io/kubernetes/pkg/apis/core" + + routeapi "github.com/openshift/origin/pkg/route/apis/route" + templaterouter "github.com/openshift/origin/pkg/router/template" +) + +type fakeConfigManager struct { + blueprints map[string]*routeapi.Route +} + +func newFakeConfigManager() *fakeConfigManager { + return &fakeConfigManager{ + blueprints: make(map[string]*routeapi.Route), + } +} + +func (cm *fakeConfigManager) Initialize(router templaterouter.RouterInterface, certPath string) { +} + +func (cm *fakeConfigManager) AddBlueprint(route *routeapi.Route) { + cm.blueprints[routeKey(route)] = route +} + +func (cm *fakeConfigManager) RemoveBlueprint(route *routeapi.Route) { + delete(cm.blueprints, routeKey(route)) +} + +func (cm *fakeConfigManager) FindBlueprint(id string) (*routeapi.Route, bool) { + route, ok := cm.blueprints[id] + return route, ok +} + +func (cm *fakeConfigManager) Register(id string, route *routeapi.Route) { +} + +func (cm *fakeConfigManager) AddRoute(id string, route *routeapi.Route) error { + return nil +} + +func (cm *fakeConfigManager) RemoveRoute(id string, route *routeapi.Route) error { + return nil +} + +func (cm *fakeConfigManager) ReplaceRouteEndpoints(id string, oldEndpoints, newEndpoints []templaterouter.Endpoint, weight int32) error { + return nil +} + +func (cm *fakeConfigManager) RemoveRouteEndpoints(id string, endpoints []templaterouter.Endpoint) error { + return nil +} + +func (cm *fakeConfigManager) Notify(event templaterouter.RouterEventType) { +} + +func (cm *fakeConfigManager) ServerTemplateName(id string) string { + return "fakeConfigManager" +} + +func (cm *fakeConfigManager) ServerTemplateSize(id string) string { + return "1" +} + +func (cm *fakeConfigManager) GenerateDynamicServerNames(id string) []string { + return []string{} +} + +func routeKey(route *routeapi.Route) string { + return fmt.Sprintf("%s:%s", route.Name, route.Namespace) +} + +// TestHandleRoute test route watch events +func TestHandleRoute(t *testing.T) { + original := metav1.Time{Time: time.Now()} + + route := &routeapi.Route{ + ObjectMeta: metav1.ObjectMeta{ + CreationTimestamp: original, + Namespace: "bp", + Name: "chevron", + }, + Spec: routeapi.RouteSpec{ + Host: "www.blueprints.org", + To: routeapi.RouteTargetReference{ + Name: "TestService", + Weight: new(int32), + }, + }, + } + + cm := newFakeConfigManager() + plugin := NewBlueprintPlugin(cm) + plugin.HandleRoute(watch.Added, route) + + id := routeKey(route) + if _, ok := cm.FindBlueprint(id); !ok { + t.Errorf("TestHandleRoute was unable to find a blueprint %s after HandleRoute was called", id) + } + + // update a blueprint with a newer time and host + v2route := route.DeepCopy() + v2route.CreationTimestamp = metav1.Time{Time: original.Add(time.Hour)} + v2route.Spec.Host = "updated.blueprint.org" + if err := plugin.HandleRoute(watch.Added, v2route); err != nil { + t.Errorf("TestHandleRoute unexpected error after blueprint update: %v", err) + } + + blueprints := []*routeapi.Route{v2route, route} + for _, r := range blueprints { + // delete the blueprint and check that it doesn't exist. + if err := plugin.HandleRoute(watch.Deleted, v2route); err != nil { + t.Errorf("TestHandleRoute unexpected error after blueprint delete: %v", err) + } + + routeId := routeKey(r) + if _, ok := cm.FindBlueprint(routeId); ok { + t.Errorf("TestHandleRoute found a blueprint %s after it was deleted", routeId) + } + } +} + +func TestHandleNode(t *testing.T) { + node := &kapi.Node{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{"design": "blueprint"}, + }, + } + + cm := newFakeConfigManager() + plugin := NewBlueprintPlugin(cm) + + if err := plugin.HandleNode(watch.Added, node); err != nil { + t.Errorf("TestHandleNode unexpected error after node add: %v", err) + } + + if err := plugin.HandleNode(watch.Modified, node); err != nil { + t.Errorf("TestHandleNode unexpected error after node modify: %v", err) + } + + if err := plugin.HandleNode(watch.Deleted, node); err != nil { + t.Errorf("TestHandleNode unexpected error after node delete: %v", err) + } +} + +func TestHandleEndpoints(t *testing.T) { + endpoints := &kapi.Endpoints{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "bpe", + Name: "shell", + }, + Subsets: []kapi.EndpointSubset{{ + Addresses: []kapi.EndpointAddress{{IP: "1.1.1.1"}}, + Ports: []kapi.EndpointPort{{Port: 9876}}, + }}, + } + + v2Endpoints := &kapi.Endpoints{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "bpe", + Name: "shell", + }, + Subsets: []kapi.EndpointSubset{{ + Addresses: []kapi.EndpointAddress{{IP: "1.1.1.1"}, {IP: "2.2.2.2"}}, + Ports: []kapi.EndpointPort{{Port: 9876}, {Port: 8888}}, + }}, + } + + cm := newFakeConfigManager() + plugin := NewBlueprintPlugin(cm) + + if err := plugin.HandleEndpoints(watch.Added, endpoints); err != nil { + t.Errorf("TestHandleEndpoints unexpected error after endpoints add: %v", err) + } + + if err := plugin.HandleEndpoints(watch.Modified, v2Endpoints); err != nil { + t.Errorf("TestHandleEndpoints unexpected error after endpoints modify: %v", err) + } + + if err := plugin.HandleEndpoints(watch.Deleted, v2Endpoints); err != nil { + t.Errorf("TestHandleEndpoints unexpected error after endpoints delete: %v", err) + } +} + +func TestHandleNamespaces(t *testing.T) { + cm := newFakeConfigManager() + plugin := NewBlueprintPlugin(cm) + + if err := plugin.HandleNamespaces(sets.String{}); err != nil { + t.Errorf("TestHandleNamespaces unexpected error after empty set: %v", err) + } + + if err := plugin.HandleNamespaces(sets.NewString("76")); err != nil { + t.Errorf("TestHandleNamespaces unexpected error after set: %v", err) + } + + if err := plugin.HandleNamespaces(sets.NewString("76", "711")); err != nil { + t.Errorf("TestHandleNamespaces unexpected error after set multiple: %v", err) + } + + if err := plugin.HandleNamespaces(sets.NewString("arco")); err != nil { + t.Errorf("TestHandleNamespaces unexpected error after reset: %v", err) + } +} diff --git a/pkg/router/template/configmanager/haproxy/manager.go b/pkg/router/template/configmanager/haproxy/manager.go index 0f176b363661..b6c2686f3c02 100644 --- a/pkg/router/template/configmanager/haproxy/manager.go +++ b/pkg/router/template/configmanager/haproxy/manager.go @@ -181,13 +181,92 @@ func (cm *haproxyConfigManager) Initialize(router templaterouter.RouterInterface cm.lock.Lock() cm.router = router cm.defaultCertificate = string(certBytes) + blueprints := cm.blueprintRoutes cm.lock.Unlock() - cm.provisionBackendPools() + // Ensure this is done outside of the lock as the router will call + // back into the manager code for all the routes we provision. + for _, r := range blueprints { + cm.provisionRoutePool(r) + } glog.V(2).Infof("haproxy Config Manager router will flush out any dynamically configured changes within %s of each other", cm.commitInterval.String()) } +// AddBlueprint adds a new (or replaces an existing) route blueprint. +func (cm *haproxyConfigManager) AddBlueprint(route *routeapi.Route) { + newRoute := route.DeepCopy() + newRoute.Namespace = blueprintRoutePoolNamespace + newRoute.Spec.Host = "" + + cm.lock.Lock() + existingBlueprints := cm.blueprintRoutes + cm.lock.Unlock() + + routeExists := false + updated := false + blueprints := make([]*routeapi.Route, 0) + for _, r := range existingBlueprints { + if r.Namespace == newRoute.Namespace && r.Name == newRoute.Name { + // Existing route, check if if anything changed, + // other than the host name. + routeExists = true + newRoute.Spec.Host = r.Spec.Host + if !reflect.DeepEqual(r, newRoute) { + updated = true + blueprints = append(blueprints, route.DeepCopy()) + continue + } + } + blueprints = append(blueprints, r) + } + + if !routeExists { + blueprints = append(blueprints, route.DeepCopy()) + updated = true + } + + if !updated { + return + } + + cm.lock.Lock() + cm.blueprintRoutes = blueprints + cm.lock.Unlock() + + cm.provisionRoutePool(route) +} + +// RemoveBlueprint removes a route blueprint. +func (cm *haproxyConfigManager) RemoveBlueprint(route *routeapi.Route) { + deletedRoute := route.DeepCopy() + deletedRoute.Namespace = blueprintRoutePoolNamespace + + cm.lock.Lock() + existingBlueprints := cm.blueprintRoutes + cm.lock.Unlock() + + updated := false + blueprints := make([]*routeapi.Route, 0) + for _, r := range existingBlueprints { + if r.Namespace == deletedRoute.Namespace && r.Name == deletedRoute.Name { + updated = true + } else { + blueprints = append(blueprints, r) + } + } + + if !updated { + return + } + + cm.lock.Lock() + cm.blueprintRoutes = blueprints + cm.lock.Unlock() + + cm.removeRoutePool(route) +} + // Register registers an id with an expected haproxy backend for a route. func (cm *haproxyConfigManager) Register(id string, route *routeapi.Route) { wildcard := cm.wildcardRoutesAllowed && (route.Spec.WildcardPolicy == routeapi.WildcardPolicySubdomain) @@ -226,7 +305,10 @@ func (cm *haproxyConfigManager) AddRoute(id string, route *routeapi.Route) error cm.Register(id, route) cm.lock.Lock() - defer cm.lock.Unlock() + defer func() { + cm.lock.Unlock() + cm.scheduleRouterReload() + }() slotName, err := cm.findFreeBackendPoolSlot(matchedBlueprint) if err != nil { @@ -268,7 +350,10 @@ func (cm *haproxyConfigManager) RemoveRoute(id string, route *routeapi.Route) er } cm.lock.Lock() - defer cm.lock.Unlock() + defer func() { + cm.lock.Unlock() + cm.scheduleRouterReload() + }() entry, ok := cm.backendEntries[id] if !ok { @@ -315,8 +400,14 @@ func (cm *haproxyConfigManager) ReplaceRouteEndpoints(id string, oldEndpoints, n return fmt.Errorf("Router reload in progress, cannot dynamically add endpoints for %s", id) } + configChanged := false cm.lock.Lock() - defer cm.lock.Unlock() + defer func() { + cm.lock.Unlock() + if configChanged { + cm.scheduleRouterReload() + } + }() entry, ok := cm.backendEntries[id] if !ok { @@ -355,6 +446,7 @@ func (cm *haproxyConfigManager) ReplaceRouteEndpoints(id string, oldEndpoints, n delete(modifiedEndpoints, v2ep.ID) } } else { + configChanged = true deletedEndpoints[ep.ID] = ep } } @@ -384,6 +476,7 @@ func (cm *haproxyConfigManager) ReplaceRouteEndpoints(id string, oldEndpoints, n } if _, ok := deletedEndpoints[relatedEndpointID]; ok { + configChanged = true glog.V(4).Infof("For deleted endpoint %s, disabling server %s", relatedEndpointID, s.Name) backend.DisableServer(s.Name) if _, ok := entry.dynamicServerMap[s.Name]; ok { @@ -394,6 +487,7 @@ func (cm *haproxyConfigManager) ReplaceRouteEndpoints(id string, oldEndpoints, n } if ep, ok := modifiedEndpoints[relatedEndpointID]; ok { + configChanged = true glog.V(4).Infof("For modified endpoint %s, setting server %s info to %s:%s with weight %d and enabling", relatedEndpointID, s.Name, ep.IP, ep.Port, weight) backend.UpdateServerInfo(s.Name, ep.IP, ep.Port, weight, weightIsRelative) @@ -419,6 +513,7 @@ func (cm *haproxyConfigManager) ReplaceRouteEndpoints(id string, oldEndpoints, n } // Add entry for the dyamic server used. + configChanged = true entry.dynamicServerMap[name] = ep.ID glog.V(4).Infof("For added endpoint %s, setting dynamic server %s info: (%s, %s, %d) and enabling", ep.ID, name, ep.IP, ep.Port, weight) @@ -447,7 +542,10 @@ func (cm *haproxyConfigManager) RemoveRouteEndpoints(id string, endpoints []temp } cm.lock.Lock() - defer cm.lock.Unlock() + defer func() { + cm.lock.Unlock() + cm.scheduleRouterReload() + }() entry, ok := cm.backendEntries[id] if !ok { @@ -503,17 +601,10 @@ func (cm *haproxyConfigManager) Notify(event templaterouter.RouterEventType) { } } -// Commit defers calling commit on the associated template router using a -// internal flush timer. +// Commit commits the configuration and reloads the associated router. func (cm *haproxyConfigManager) Commit() { glog.V(4).Infof("Committing dynamic config manager changes") - - cm.lock.Lock() - defer cm.lock.Unlock() - - if cm.commitTimer == nil { - cm.commitTimer = time.AfterFunc(cm.commitInterval, cm.commitRouterConfig) - } + cm.commitRouterConfig() } // ServerTemplateName returns the dynamic server template name. @@ -551,6 +642,16 @@ func (cm *haproxyConfigManager) GenerateDynamicServerNames(id string) []string { return []string{} } +// scheduleRouterReload schedules a reload by deferring commit on the +// associated template router using a internal flush timer. +func (cm *haproxyConfigManager) scheduleRouterReload() { + cm.lock.Lock() + defer cm.lock.Unlock() + if cm.commitTimer == nil { + cm.commitTimer = time.AfterFunc(cm.commitInterval, cm.commitRouterConfig) + } +} + // commitRouterConfig calls Commit on the associated template router. func (cm *haproxyConfigManager) commitRouterConfig() { cm.lock.Lock() @@ -581,20 +682,29 @@ func (cm *haproxyConfigManager) isManagedPoolRoute(route *routeapi.Route) bool { return route.Namespace == blueprintRoutePoolNamespace } -// provisionBackendPools pre-allocates pools of backends based on the -// different blueprint routes. -func (cm *haproxyConfigManager) provisionBackendPools() { - for _, r := range cm.blueprintRoutes { - poolSize := getPoolSize(r, cm.blueprintRoutePoolSize) - glog.Infof("Provisioning blueprint route pool %s/%s-[1-%d]", - r.Namespace, r.Name, poolSize) - for i := 0; i < poolSize; i++ { - route := r.DeepCopy() - route.Namespace = blueprintRoutePoolNamespace - route.Name = fmt.Sprintf("%v-%v", route.Name, i+1) - route.Spec.Host = "" - cm.router.AddRoute(route) - } +// provisionRoutePool provisions a pre-allocated pool of routes based on a blueprint. +func (cm *haproxyConfigManager) provisionRoutePool(blueprint *routeapi.Route) { + poolSize := getPoolSize(blueprint, cm.blueprintRoutePoolSize) + glog.Infof("Provisioning blueprint route pool %s/%s-[1-%d]", blueprint.Namespace, blueprint.Name, poolSize) + for i := 0; i < poolSize; i++ { + route := blueprint.DeepCopy() + route.Namespace = blueprintRoutePoolNamespace + route.Name = fmt.Sprintf("%v-%v", route.Name, i+1) + route.Spec.Host = "" + cm.router.AddRoute(route) + } +} + +// removeRoutePool removes a pre-allocated pool of routes based on a blueprint. +func (cm *haproxyConfigManager) removeRoutePool(blueprint *routeapi.Route) { + poolSize := getPoolSize(blueprint, cm.blueprintRoutePoolSize) + glog.Infof("Removing blueprint route pool %s/%s-[1-%d]", blueprint.Namespace, blueprint.Name, poolSize) + for i := 0; i < poolSize; i++ { + route := blueprint.DeepCopy() + route.Namespace = blueprintRoutePoolNamespace + route.Name = fmt.Sprintf("%v-%v", route.Name, i+1) + route.Spec.Host = "" + cm.router.RemoveRoute(route) } } diff --git a/pkg/router/template/types.go b/pkg/router/template/types.go index b09fbc4b9347..edebd5310c48 100644 --- a/pkg/router/template/types.go +++ b/pkg/router/template/types.go @@ -180,6 +180,12 @@ type ConfigManager interface { // Initialize initializes the config manager. Initialize(router RouterInterface, certPath string) + // AddBlueprint adds a new (or replaces an existing) route blueprint. + AddBlueprint(route *routeapi.Route) + + // RemoveBlueprint removes a route blueprint. + RemoveBlueprint(route *routeapi.Route) + // Register registers an id to be associated with a route. Register(id string, route *routeapi.Route) diff --git a/test/end-to-end/router_test.go b/test/end-to-end/router_test.go index 733d74361415..836e6a1514f7 100644 --- a/test/end-to-end/router_test.go +++ b/test/end-to-end/router_test.go @@ -1322,7 +1322,7 @@ func createAndStartRouterContainerExtended(dockerCli *dockerClient.Client, maste fmt.Sprintf("DEFAULT_CERTIFICATE=%s\n%s", defaultCert, defaultKey), fmt.Sprintf("ROUTER_BIND_PORTS_AFTER_SYNC=%s", strconv.FormatBool(bindPortsAfterSync)), fmt.Sprintf("NAMESPACE_LABELS=%s", namespaceLabels), - fmt.Sprintf("ROUTER_CONFIG_MANAGER=haproxy-manager"), + fmt.Sprintf("ROUTER_HAPROXY_CONFIG_MANAGER=true"), } reloadIntVar := fmt.Sprintf("RELOAD_INTERVAL=%ds", reloadInterval) From a06d009a75ac61041ae55a63bd9b7988d07d9c7c Mon Sep 17 00:00:00 2001 From: ramr Date: Tue, 5 Jun 2018 17:18:41 -0700 Subject: [PATCH 4/9] Changes due to rebase - fixup tests and fix a bug that rebase caused with passthrough routes. --- .../configmanager/haproxy/fake_haproxy.go | 2 +- pkg/router/template/template_helper_test.go | 41 ------------------- pkg/router/template/util/haproxy/map_entry.go | 2 +- .../template/util/haproxy/map_entry_test.go | 6 ++- pkg/router/template/util/util_test.go | 38 +---------------- 5 files changed, 8 insertions(+), 81 deletions(-) diff --git a/pkg/router/template/configmanager/haproxy/fake_haproxy.go b/pkg/router/template/configmanager/haproxy/fake_haproxy.go index 2d961cd283e6..f6de2432c28b 100644 --- a/pkg/router/template/configmanager/haproxy/fake_haproxy.go +++ b/pkg/router/template/configmanager/haproxy/fake_haproxy.go @@ -356,7 +356,7 @@ func (p *fakeHAProxy) showServers(name string) string { if name != p.backendName { if _, ok := onePodAndOneDynamicServerBackends[name]; ok { - return fmt.Sprintf(onePodAndOneDynamicServerBackendTemplate, name) + return fmt.Sprintf(onePodAndOneDynamicServerBackendTemplate, name, name) } if len(name) > 0 { return fmt.Sprintf("Can't find backend.\n") diff --git a/pkg/router/template/template_helper_test.go b/pkg/router/template/template_helper_test.go index cd2a0a0efc13..dc01be92a0a4 100644 --- a/pkg/router/template/template_helper_test.go +++ b/pkg/router/template/template_helper_test.go @@ -680,44 +680,3 @@ func TestGetPrimaryAliasKey(t *testing.T) { } } } - -func TestGenBackendNamePrefix(t *testing.T) { - testPrefixes := []struct { - name string - termination routeapi.TLSTerminationType - expectedPrefix string - }{ - { - name: "http route", - termination: routeapi.TLSTerminationType(""), - expectedPrefix: "be_http", - }, - { - name: "edge secured route", - termination: routeapi.TLSTerminationEdge, - expectedPrefix: "be_edge_http", - }, - { - name: "reencrypt route", - termination: routeapi.TLSTerminationReencrypt, - expectedPrefix: "be_secure", - }, - { - name: "passthrough route", - termination: routeapi.TLSTerminationPassthrough, - expectedPrefix: "be_tcp", - }, - { - name: "unknown route", - termination: routeapi.TLSTerminationType("foo"), - expectedPrefix: "be_http", - }, - } - - for _, tc := range testPrefixes { - prefix := GenBackendNamePrefix(tc.termination) - if prefix != tc.expectedPrefix { - t.Errorf("%s: expected %s to get %s, but got %s", tc.name, tc.expectedPrefix, prefix) - } - } -} diff --git a/pkg/router/template/util/haproxy/map_entry.go b/pkg/router/template/util/haproxy/map_entry.go index d3594a1c2f18..b7c9541da3a3 100644 --- a/pkg/router/template/util/haproxy/map_entry.go +++ b/pkg/router/template/util/haproxy/map_entry.go @@ -73,7 +73,7 @@ func generateTCPMapEntry(cfg *BackendConfig) *HAProxyMapEntry { if len(cfg.Host) > 0 && len(cfg.Path) == 0 && (cfg.Termination == routeapi.TLSTerminationPassthrough || cfg.Termination == routeapi.TLSTerminationReencrypt) { return &HAProxyMapEntry{ Key: templateutil.GenerateRouteRegexp(cfg.Host, "", cfg.IsWildcard), - Value: cfg.Name, + Value: fmt.Sprintf("%s:%s", templateutil.GenerateBackendNamePrefix(cfg.Termination), cfg.Name), } } diff --git a/pkg/router/template/util/haproxy/map_entry_test.go b/pkg/router/template/util/haproxy/map_entry_test.go index 279fcb7a8a15..e685432db60b 100644 --- a/pkg/router/template/util/haproxy/map_entry_test.go +++ b/pkg/router/template/util/haproxy/map_entry_test.go @@ -579,11 +579,15 @@ func TestGenerateTCPMapEntry(t *testing.T) { testCases := []*testCase{} for _, termination := range getTestTerminations() { for _, policy := range getTestInsecurePolicies() { + backendKey := fmt.Sprintf("be_secure:%s", tt.backendKey) + if termination == routeapi.TLSTerminationPassthrough { + backendKey = fmt.Sprintf("be_tcp:%s", tt.backendKey) + } testCases = append(testCases, &testCase{ name: fmt.Sprintf("%s:termination=%s:policy=%s", tt.name, termination, policy), cfg: testBackendConfig(tt.backendKey, tt.hostname, tt.path, tt.wildcard, termination, policy, false), - expectation: buildTestExpectation(tt.backendKey, tt.expectedKey, termination), + expectation: buildTestExpectation(backendKey, tt.expectedKey, termination), }) } } diff --git a/pkg/router/template/util/util_test.go b/pkg/router/template/util/util_test.go index beaa568084fa..8a008d0fad6f 100644 --- a/pkg/router/template/util/util_test.go +++ b/pkg/router/template/util/util_test.go @@ -211,42 +211,6 @@ func TestGenCertificateHostName(t *testing.T) { } } -func TestGenerateBackendNamePrefix(t *testing.T) { - tests := []struct { - name string - termination routeapi.TLSTerminationType - expected string - }{ - { - name: "empty termination", - termination: routeapi.TLSTerminationType(""), - expected: "be_http", - }, - { - name: "edge termination", - termination: routeapi.TLSTerminationEdge, - expected: "be_edge_http", - }, - { - name: "reencrypt termination", - termination: routeapi.TLSTerminationReencrypt, - expected: "be_secure", - }, - { - name: "passthru termination", - termination: routeapi.TLSTerminationPassthrough, - expected: "be_tcp", - }, - } - - for _, tc := range tests { - prefix := GenerateBackendNamePrefix(tc.termination) - if prefix != tc.expected { - t.Errorf("%s: expected %s to match %s, but didn't", tc.name, tc.expected, prefix) - } - } -} - func TestGenerateBackendNamePrefix(t *testing.T) { testPrefixes := []struct { name string @@ -283,7 +247,7 @@ func TestGenerateBackendNamePrefix(t *testing.T) { for _, tc := range testPrefixes { prefix := GenerateBackendNamePrefix(tc.termination) if prefix != tc.expectedPrefix { - t.Errorf("%s: expected %s to get %s, but got %s", tc.name, tc.expectedPrefix, prefix) + t.Errorf("TestGenerateBackendNamePrefix: expected %s to get %s, but got %s", tc.name, tc.expectedPrefix, prefix) } } } From 5f45fc00d2d254c90a88cff0ba9aed4baf1295a1 Mon Sep 17 00:00:00 2001 From: ramr Date: Thu, 14 Jun 2018 15:43:19 -0700 Subject: [PATCH 5/9] Bug fix after rebase to handle http routes without reloading. --- pkg/router/template/configmanager/haproxy/manager.go | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/pkg/router/template/configmanager/haproxy/manager.go b/pkg/router/template/configmanager/haproxy/manager.go index b6c2686f3c02..55ccbe5ddb7f 100644 --- a/pkg/router/template/configmanager/haproxy/manager.go +++ b/pkg/router/template/configmanager/haproxy/manager.go @@ -811,12 +811,16 @@ func (cm *haproxyConfigManager) findMatchingBlueprint(route *routeapi.Route) *ro if route.Spec.TLS == nil && candidate.Spec.TLS == nil { return candidate } - if route.Spec.TLS != nil && candidate.Spec.TLS != nil { + tlsSpec := route.Spec.TLS + if tlsSpec == nil { + tlsSpec = &routeapi.TLSConfig{Termination: routeapi.TLSTerminationType("")} + } + if tlsSpec != nil && candidate.Spec.TLS != nil { // So we need compare the TLS fields but don't care // if InsecureEdgeTerminationPolicy doesn't match. candidateCopy := candidate.DeepCopy() - candidateCopy.Spec.TLS.InsecureEdgeTerminationPolicy = route.Spec.TLS.InsecureEdgeTerminationPolicy - if reflect.DeepEqual(route.Spec.TLS, candidateCopy.Spec.TLS) { + candidateCopy.Spec.TLS.InsecureEdgeTerminationPolicy = tlsSpec.InsecureEdgeTerminationPolicy + if reflect.DeepEqual(tlsSpec, candidateCopy.Spec.TLS) { return candidateCopy } } From 9ddee4fc630c4aba8eb1cce610ccb33851dfa0fa Mon Sep 17 00:00:00 2001 From: ramr Date: Wed, 20 Jun 2018 22:30:31 -0700 Subject: [PATCH 6/9] Add mini stress test for the different types of routes. --- test/extended/router/config-manager.go | 177 +++++++++++ test/extended/router/headers.go | 2 +- test/extended/router/metrics.go | 2 +- test/extended/router/reencrypt.go | 2 +- test/extended/router/router.go | 2 +- test/extended/router/scoped.go | 2 +- test/extended/router/stress.go | 2 +- test/extended/router/unprivileged.go | 2 +- test/extended/router/weighted.go | 2 +- test/extended/testdata/bindata.go | 294 ++++++++++++++++++ .../testdata/router-config-manager.yaml | 274 ++++++++++++++++ 11 files changed, 753 insertions(+), 8 deletions(-) create mode 100644 test/extended/router/config-manager.go create mode 100644 test/extended/testdata/router-config-manager.yaml diff --git a/test/extended/router/config-manager.go b/test/extended/router/config-manager.go new file mode 100644 index 000000000000..cec5b1cba503 --- /dev/null +++ b/test/extended/router/config-manager.go @@ -0,0 +1,177 @@ +package router + +import ( + "fmt" + "os" + "strings" + "time" + + g "github.com/onsi/ginkgo" + o "github.com/onsi/gomega" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" + e2e "k8s.io/kubernetes/test/e2e/framework" + + routeclientset "github.com/openshift/client-go/route/clientset/versioned" + exutil "github.com/openshift/origin/test/extended/util" +) + +const timeoutSeconds = 3 * 60 + +var _ = g.Describe("[Conformance][Area:Networking][Feature:Router]", func() { + defer g.GinkgoRecover() + var ( + configPath = exutil.FixturePath("testdata", "router-config-manager.yaml") + oc *exutil.CLI + ns string + ) + + // this hook must be registered before the framework namespace teardown + // hook + g.AfterEach(func() { + if g.CurrentGinkgoTestDescription().Failed { + client := routeclientset.NewForConfigOrDie(oc.AdminConfig()).Route().Routes(ns) + if routes, _ := client.List(metav1.ListOptions{}); routes != nil { + outputIngress(routes.Items...) + } + exutil.DumpPodLogsStartingWith("router-", oc) + } + }) + + oc = exutil.NewCLI("router-config-manager", exutil.KubeConfigPath()) + + g.BeforeEach(func() { + ns = oc.Namespace() + + image := os.Getenv("OS_IMAGE_PREFIX") + if len(image) == 0 { + image = "openshift/origin" + } + image += "-haproxy-router" + + if dc, err := oc.AdminAppsClient().Apps().DeploymentConfigs("default").Get("router", metav1.GetOptions{}); err == nil { + if len(dc.Spec.Template.Spec.Containers) > 0 && dc.Spec.Template.Spec.Containers[0].Image != "" { + image = dc.Spec.Template.Spec.Containers[0].Image + } + } + + err := oc.AsAdmin().Run("new-app").Args("-f", configPath, "-p", "IMAGE="+image).Execute() + o.Expect(err).NotTo(o.HaveOccurred()) + }) + + g.Describe("The HAProxy router", func() { + g.It("should serve the correct routes when running with the haproxy config manager", func() { + + ns := oc.KubeFramework().Namespace.Name + execPodName := exutil.CreateExecPodOrFail(oc.AdminKubeClient().CoreV1(), ns, "execpod") + defer func() { oc.AdminKubeClient().CoreV1().Pods(ns).Delete(execPodName, metav1.NewDeleteOptions(1)) }() + + g.By(fmt.Sprintf("creating a router with haproxy config manager from a config file %q", configPath)) + + var routerIP string + err := wait.Poll(time.Second, timeoutSeconds*time.Second, func() (bool, error) { + pod, err := oc.KubeFramework().ClientSet.CoreV1().Pods(oc.KubeFramework().Namespace.Name).Get("router-haproxy-cfgmgr", metav1.GetOptions{}) + if err != nil { + return false, err + } + if len(pod.Status.PodIP) == 0 { + return false, nil + } + routerIP = pod.Status.PodIP + return true, nil + }) + o.Expect(err).NotTo(o.HaveOccurred()) + + g.By("waiting for the healthz endpoint to respond") + healthzURI := fmt.Sprintf("http://%s:1936/healthz", routerIP) + err = waitForRouterOKResponseExec(ns, execPodName, healthzURI, routerIP, timeoutSeconds) + o.Expect(err).NotTo(o.HaveOccurred()) + + g.By("waiting for the valid routes to respond") + err = waitForRouteToRespond(ns, execPodName, "http", "insecure.hapcm.test", "/", routerIP, 0) + o.Expect(err).NotTo(o.HaveOccurred()) + + for _, host := range []string{"edge.allow.hapcm.test", "reencrypt.hapcm.test", "passthrough.hapcm.test"} { + err = waitForRouteToRespond(ns, execPodName, "https", host, "/", routerIP, 0) + o.Expect(err).NotTo(o.HaveOccurred()) + } + + g.By("mini stress test by adding (and removing) different routes and checking that they are exposed") + for i := 0; i < 16; i++ { + name := fmt.Sprintf("hapcm-stress-insecure-%d", i) + hostName := fmt.Sprintf("stress.insecure-%d.hapcm.test", i) + err := oc.AsAdmin().Run("expose").Args("service", "insecure-service", "--name", name, "--hostname", hostName, "--labels", "select=haproxy-cfgmgr").Execute() + o.Expect(err).NotTo(o.HaveOccurred()) + + err = waitForRouteToRespond(ns, execPodName, "http", hostName, "/", routerIP, 0) + o.Expect(err).NotTo(o.HaveOccurred()) + + err = oc.AsAdmin().Run("delete").Args("route", name).Execute() + o.Expect(err).NotTo(o.HaveOccurred()) + + routeTypes := []string{"edge", "reencrypt", "passthrough"} + for _, t := range routeTypes { + name := fmt.Sprintf("hapcm-stress-%s-%d", t, i) + hostName := fmt.Sprintf("stress.%s-%d.hapcm.test", t, i) + serviceName := "secure-service" + if t == "edge" { + serviceName = "insecure-service" + } + + err := oc.AsAdmin().Run("create").Args("route", t, name, "--service", serviceName, "--hostname", hostName).Execute() + o.Expect(err).NotTo(o.HaveOccurred()) + err = oc.AsAdmin().Run("label").Args("route", name, "select=haproxy-cfgmgr").Execute() + o.Expect(err).NotTo(o.HaveOccurred()) + + err = waitForRouteToRespond(ns, execPodName, "https", hostName, "/", routerIP, 0) + o.Expect(err).NotTo(o.HaveOccurred()) + + err = oc.AsAdmin().Run("delete").Args("route", name).Execute() + o.Expect(err).NotTo(o.HaveOccurred()) + } + } + }) + }) +}) + +func waitForRouteToRespond(ns, execPodName, proto, host, abspath, ipaddr string, port int) error { + if port == 0 { + switch proto { + case "http": + port = 80 + case "https": + port = 443 + default: + port = 80 + } + } + uri := fmt.Sprintf("%s://%s:%d%s", proto, host, port, abspath) + cmd := fmt.Sprintf(` + set -e + for i in $(seq 1 %d); do + code=$( curl -k -s -o /dev/null -w '%%{http_code}\n' --resolve %s:%d:%s %q ) || rc=$? + if [[ "${rc:-0}" -eq 0 ]]; then + echo $code + if [[ $code -eq 200 ]]; then + exit 0 + fi + if [[ $code -ne 503 ]]; then + exit 1 + fi + else + echo "error ${rc}" 1>&2 + fi + sleep 1 + done + `, timeoutSeconds, host, port, ipaddr, uri) + output, err := e2e.RunHostCmd(ns, execPodName, cmd) + if err != nil { + return fmt.Errorf("host command failed: %v\n%s", err, output) + } + lines := strings.Split(strings.TrimSpace(output), "\n") + if lines[len(lines)-1] != "200" { + return fmt.Errorf("last response from server was not 200:\n%s", output) + } + return nil +} diff --git a/test/extended/router/headers.go b/test/extended/router/headers.go index 75166304eee0..f9e03cc915b3 100644 --- a/test/extended/router/headers.go +++ b/test/extended/router/headers.go @@ -1,4 +1,4 @@ -package images +package router import ( "bufio" diff --git a/test/extended/router/metrics.go b/test/extended/router/metrics.go index 7055b6084415..4759f666fe4a 100644 --- a/test/extended/router/metrics.go +++ b/test/extended/router/metrics.go @@ -1,4 +1,4 @@ -package images +package router import ( "bytes" diff --git a/test/extended/router/reencrypt.go b/test/extended/router/reencrypt.go index 7961c74ad5d3..ee123e77a1ad 100644 --- a/test/extended/router/reencrypt.go +++ b/test/extended/router/reencrypt.go @@ -1,4 +1,4 @@ -package images +package router import ( "fmt" diff --git a/test/extended/router/router.go b/test/extended/router/router.go index e614ddadf8be..26a790c99081 100644 --- a/test/extended/router/router.go +++ b/test/extended/router/router.go @@ -1,4 +1,4 @@ -package images +package router import ( "net/http" diff --git a/test/extended/router/scoped.go b/test/extended/router/scoped.go index 2a7290dea945..d6ba2340d822 100644 --- a/test/extended/router/scoped.go +++ b/test/extended/router/scoped.go @@ -1,4 +1,4 @@ -package images +package router import ( "fmt" diff --git a/test/extended/router/stress.go b/test/extended/router/stress.go index 42dbb4b81458..115de0bd6128 100644 --- a/test/extended/router/stress.go +++ b/test/extended/router/stress.go @@ -1,4 +1,4 @@ -package images +package router import ( "bytes" diff --git a/test/extended/router/unprivileged.go b/test/extended/router/unprivileged.go index 9ad3543cf0e3..3e03e6d1a710 100644 --- a/test/extended/router/unprivileged.go +++ b/test/extended/router/unprivileged.go @@ -1,4 +1,4 @@ -package images +package router import ( "fmt" diff --git a/test/extended/router/weighted.go b/test/extended/router/weighted.go index 438f3a01630b..1ce29e090c22 100644 --- a/test/extended/router/weighted.go +++ b/test/extended/router/weighted.go @@ -1,4 +1,4 @@ -package images +package router import ( "encoding/csv" diff --git a/test/extended/testdata/bindata.go b/test/extended/testdata/bindata.go index 565b03bdee70..4ce4fd03184e 100644 --- a/test/extended/testdata/bindata.go +++ b/test/extended/testdata/bindata.go @@ -178,6 +178,7 @@ // test/extended/testdata/roles/empty-role.yaml // test/extended/testdata/roles/policy-clusterroles.yaml // test/extended/testdata/roles/policy-roles.yaml +// test/extended/testdata/router-config-manager.yaml // test/extended/testdata/router-http-echo-server.yaml // test/extended/testdata/router-metrics.yaml // test/extended/testdata/run_policy/parallel-bc.yaml @@ -10079,6 +10080,297 @@ func testExtendedTestdataRolesPolicyRolesYaml() (*asset, error) { return a, nil } +var _testExtendedTestdataRouterConfigManagerYaml = []byte(`apiVersion: v1 +kind: Template +parameters: +- name: IMAGE + value: openshift/origin-haproxy-router:latest +objects: +- apiVersion: v1 + kind: Pod + metadata: + name: router-haproxy-cfgmgr + labels: + test: router-haproxy-cfgmgr + spec: + terminationGracePeriodSeconds: 1 + containers: + - name: router + image: ${IMAGE} + imagePullPolicy: IfNotPresent + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + args: ["--namespace=$(POD_NAMESPACE)", "--loglevel=4", "--haproxy-config-manager=true", "--blueprint-route-labels=select=hapcm-blueprint", "--labels=select=haproxy-cfgmgr", "--stats-password=password", "--stats-port=1936", "--stats-user=admin"] + hostNetwork: false + ports: + - containerPort: 80 + - containerPort: 443 + - containerPort: 1936 + name: stats + protocol: TCP + serviceAccountName: default + +# ensure the router can access routes and endpoints +- apiVersion: v1 + kind: RoleBinding + metadata: + name: system-router + subjects: + - kind: ServiceAccount + name: default + roleRef: + name: system:router + +# blueprints for edge, reencrypt and passthrough routes with annotation(s) +- apiVersion: v1 + kind: Route + metadata: + name: edge-blueprint + labels: + test: router + select: hapcm-blueprint + annotations: + router.openshift.io/cookie_name: empire + spec: + tls: + termination: edge + host: edge.blueprint.hapcm.test + to: + name: insecure-service + kind: Service + ports: + - targetPort: 8080 +- apiVersion: v1 + kind: Route + metadata: + name: reencrypt-blueprint + labels: + test: router + select: hapcm-blueprint + annotations: + ren: stimpy + spec: + tls: + termination: reencrypt + host: reencrypt.blueprint.hapcm.test + to: + name: secure-service + kind: Service + ports: + - targetPort: 8443 +- apiVersion: v1 + kind: Route + metadata: + name: passthrough-blueprint + labels: + test: router + select: hapcm-blueprint + annotations: + test: ptcruiser + foo: bar + spec: + tls: + termination: passthrough + host: passthrough.blueprint.hapcm.test + to: + name: secure-service + kind: Service + +# config map for nginx +- apiVersion: v1 + kind: ConfigMap + metadata: + name: serving-cert + data: + nginx.conf: | + daemon off; + events { } + http { + server { + listen 8443; + ssl on; + ssl_certificate /etc/serving-cert/tls.crt; + ssl_certificate_key /etc/serving-cert/tls.key; + server_name "*.svc"; + location / { + root /usr/share/nginx/html; + index index.html index.htm; + } + error_page 500 502 503 504 /50x.html; + location = /50x.html { + root /usr/share/nginx/html; + } + } + } + +# pods that service http[s] requests +- apiVersion: v1 + kind: Pod + metadata: + name: insecure-endpoint + labels: + test: haproxy-cfgmgr + endpoints: insecure-endpoint + spec: + terminationGracePeriodSeconds: 1 + containers: + - name: test + image: openshift/hello-openshift + ports: + - containerPort: 8080 + name: http + - containerPort: 100 + protocol: UDP +- apiVersion: v1 + kind: Pod + metadata: + name: secure-endpoint + labels: + app: secure-endpoint + spec: + containers: + - image: nginx:latest + name: serve + command: + - /usr/sbin/nginx + args: + - -c + - /etc/nginx/nginx.conf + ports: + - containerPort: 8443 + protocol: TCP + volumeMounts: + - name: cert + mountPath: /etc/serving-cert + - name: conf + mountPath: /etc/nginx + - name: tmp + mountPath: /var/cache/nginx + - name: tmp + mountPath: /var/run + volumes: + - name: conf + configMap: + name: serving-cert + - name: cert + secret: + secretName: serving-cert + - name: tmp + emptyDir: {} + - name: tmp2 + emptyDir: {} + +# services that can be routed to +- apiVersion: v1 + kind: Service + metadata: + name: insecure-service + labels: + test: router + spec: + selector: + test: haproxy-cfgmgr + endpoints: insecure-endpoint + ports: + - port: 8080 +- apiVersion: v1 + kind: Service + metadata: + name: secure-service + annotations: + service.alpha.openshift.io/serving-cert-secret-name: serving-cert + spec: + selector: + app: secure-endpoint + ports: + - port: 443 + name: https + targetPort: 8443 + protocol: TCP + + +# insecure, edge secured, reencrypt and passthrough routes +- apiVersion: v1 + kind: Route + metadata: + name: insecure-route + labels: + test: haproxy-cfgmgr + select: haproxy-cfgmgr + spec: + host: insecure.hapcm.test + to: + name: insecure-service + kind: Service + ports: + - targetPort: 8080 +- apiVersion: v1 + kind: Route + metadata: + name: edge-allow-http-route + labels: + test: haproxy-cfgmgr + select: haproxy-cfgmgr + spec: + tls: + termination: edge + insecureEdgeTerminationPolicy: Allow + host: edge.allow.hapcm.test + to: + name: insecure-service + kind: Service + ports: + - targetPort: 8080 +- apiVersion: v1 + kind: Route + metadata: + name: reencrypt-route + labels: + test: haproxy-cfgmgr + select: haproxy-cfgmgr + spec: + tls: + termination: reencrypt + host: reencrypt.hapcm.test + to: + name: secure-service + kind: Service + ports: + - targetPort: 8443 +- apiVersion: v1 + kind: Route + metadata: + name: passthrough-route + labels: + test: haproxy-cfgmgr + select: haproxy-cfgmgr + spec: + tls: + termination: passthrough + host: passthrough.hapcm.test + to: + name: secure-service + kind: Service +`) + +func testExtendedTestdataRouterConfigManagerYamlBytes() ([]byte, error) { + return _testExtendedTestdataRouterConfigManagerYaml, nil +} + +func testExtendedTestdataRouterConfigManagerYaml() (*asset, error) { + bytes, err := testExtendedTestdataRouterConfigManagerYamlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "test/extended/testdata/router-config-manager.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + var _testExtendedTestdataRouterHttpEchoServerYaml = []byte(`apiVersion: v1 kind: List metadata: {} @@ -33739,6 +34031,7 @@ var _bindata = map[string]func() (*asset, error){ "test/extended/testdata/roles/empty-role.yaml": testExtendedTestdataRolesEmptyRoleYaml, "test/extended/testdata/roles/policy-clusterroles.yaml": testExtendedTestdataRolesPolicyClusterrolesYaml, "test/extended/testdata/roles/policy-roles.yaml": testExtendedTestdataRolesPolicyRolesYaml, + "test/extended/testdata/router-config-manager.yaml": testExtendedTestdataRouterConfigManagerYaml, "test/extended/testdata/router-http-echo-server.yaml": testExtendedTestdataRouterHttpEchoServerYaml, "test/extended/testdata/router-metrics.yaml": testExtendedTestdataRouterMetricsYaml, "test/extended/testdata/run_policy/parallel-bc.yaml": testExtendedTestdataRun_policyParallelBcYaml, @@ -34273,6 +34566,7 @@ var _bintree = &bintree{nil, map[string]*bintree{ "policy-clusterroles.yaml": &bintree{testExtendedTestdataRolesPolicyClusterrolesYaml, map[string]*bintree{}}, "policy-roles.yaml": &bintree{testExtendedTestdataRolesPolicyRolesYaml, map[string]*bintree{}}, }}, + "router-config-manager.yaml": &bintree{testExtendedTestdataRouterConfigManagerYaml, map[string]*bintree{}}, "router-http-echo-server.yaml": &bintree{testExtendedTestdataRouterHttpEchoServerYaml, map[string]*bintree{}}, "router-metrics.yaml": &bintree{testExtendedTestdataRouterMetricsYaml, map[string]*bintree{}}, "run_policy": &bintree{nil, map[string]*bintree{ diff --git a/test/extended/testdata/router-config-manager.yaml b/test/extended/testdata/router-config-manager.yaml new file mode 100644 index 000000000000..fc151c00908e --- /dev/null +++ b/test/extended/testdata/router-config-manager.yaml @@ -0,0 +1,274 @@ +apiVersion: v1 +kind: Template +parameters: +- name: IMAGE + value: openshift/origin-haproxy-router:latest +objects: +- apiVersion: v1 + kind: Pod + metadata: + name: router-haproxy-cfgmgr + labels: + test: router-haproxy-cfgmgr + spec: + terminationGracePeriodSeconds: 1 + containers: + - name: router + image: ${IMAGE} + imagePullPolicy: IfNotPresent + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + args: ["--namespace=$(POD_NAMESPACE)", "--loglevel=4", "--haproxy-config-manager=true", "--blueprint-route-labels=select=hapcm-blueprint", "--labels=select=haproxy-cfgmgr", "--stats-password=password", "--stats-port=1936", "--stats-user=admin"] + hostNetwork: false + ports: + - containerPort: 80 + - containerPort: 443 + - containerPort: 1936 + name: stats + protocol: TCP + serviceAccountName: default + +# ensure the router can access routes and endpoints +- apiVersion: v1 + kind: RoleBinding + metadata: + name: system-router + subjects: + - kind: ServiceAccount + name: default + roleRef: + name: system:router + +# blueprints for edge, reencrypt and passthrough routes with annotation(s) +- apiVersion: v1 + kind: Route + metadata: + name: edge-blueprint + labels: + test: router + select: hapcm-blueprint + annotations: + router.openshift.io/cookie_name: empire + spec: + tls: + termination: edge + host: edge.blueprint.hapcm.test + to: + name: insecure-service + kind: Service + ports: + - targetPort: 8080 +- apiVersion: v1 + kind: Route + metadata: + name: reencrypt-blueprint + labels: + test: router + select: hapcm-blueprint + annotations: + ren: stimpy + spec: + tls: + termination: reencrypt + host: reencrypt.blueprint.hapcm.test + to: + name: secure-service + kind: Service + ports: + - targetPort: 8443 +- apiVersion: v1 + kind: Route + metadata: + name: passthrough-blueprint + labels: + test: router + select: hapcm-blueprint + annotations: + test: ptcruiser + foo: bar + spec: + tls: + termination: passthrough + host: passthrough.blueprint.hapcm.test + to: + name: secure-service + kind: Service + +# config map for nginx +- apiVersion: v1 + kind: ConfigMap + metadata: + name: serving-cert + data: + nginx.conf: | + daemon off; + events { } + http { + server { + listen 8443; + ssl on; + ssl_certificate /etc/serving-cert/tls.crt; + ssl_certificate_key /etc/serving-cert/tls.key; + server_name "*.svc"; + location / { + root /usr/share/nginx/html; + index index.html index.htm; + } + error_page 500 502 503 504 /50x.html; + location = /50x.html { + root /usr/share/nginx/html; + } + } + } + +# pods that service http[s] requests +- apiVersion: v1 + kind: Pod + metadata: + name: insecure-endpoint + labels: + test: haproxy-cfgmgr + endpoints: insecure-endpoint + spec: + terminationGracePeriodSeconds: 1 + containers: + - name: test + image: openshift/hello-openshift + ports: + - containerPort: 8080 + name: http + - containerPort: 100 + protocol: UDP +- apiVersion: v1 + kind: Pod + metadata: + name: secure-endpoint + labels: + app: secure-endpoint + spec: + containers: + - image: nginx:latest + name: serve + command: + - /usr/sbin/nginx + args: + - -c + - /etc/nginx/nginx.conf + ports: + - containerPort: 8443 + protocol: TCP + volumeMounts: + - name: cert + mountPath: /etc/serving-cert + - name: conf + mountPath: /etc/nginx + - name: tmp + mountPath: /var/cache/nginx + - name: tmp + mountPath: /var/run + volumes: + - name: conf + configMap: + name: serving-cert + - name: cert + secret: + secretName: serving-cert + - name: tmp + emptyDir: {} + - name: tmp2 + emptyDir: {} + +# services that can be routed to +- apiVersion: v1 + kind: Service + metadata: + name: insecure-service + labels: + test: router + spec: + selector: + test: haproxy-cfgmgr + endpoints: insecure-endpoint + ports: + - port: 8080 +- apiVersion: v1 + kind: Service + metadata: + name: secure-service + annotations: + service.alpha.openshift.io/serving-cert-secret-name: serving-cert + spec: + selector: + app: secure-endpoint + ports: + - port: 443 + name: https + targetPort: 8443 + protocol: TCP + + +# insecure, edge secured, reencrypt and passthrough routes +- apiVersion: v1 + kind: Route + metadata: + name: insecure-route + labels: + test: haproxy-cfgmgr + select: haproxy-cfgmgr + spec: + host: insecure.hapcm.test + to: + name: insecure-service + kind: Service + ports: + - targetPort: 8080 +- apiVersion: v1 + kind: Route + metadata: + name: edge-allow-http-route + labels: + test: haproxy-cfgmgr + select: haproxy-cfgmgr + spec: + tls: + termination: edge + insecureEdgeTerminationPolicy: Allow + host: edge.allow.hapcm.test + to: + name: insecure-service + kind: Service + ports: + - targetPort: 8080 +- apiVersion: v1 + kind: Route + metadata: + name: reencrypt-route + labels: + test: haproxy-cfgmgr + select: haproxy-cfgmgr + spec: + tls: + termination: reencrypt + host: reencrypt.hapcm.test + to: + name: secure-service + kind: Service + ports: + - targetPort: 8443 +- apiVersion: v1 + kind: Route + metadata: + name: passthrough-route + labels: + test: haproxy-cfgmgr + select: haproxy-cfgmgr + spec: + tls: + termination: passthrough + host: passthrough.hapcm.test + to: + name: secure-service + kind: Service From 9f322068e2864d5204d5398f0a8e04f9e5bcf0a1 Mon Sep 17 00:00:00 2001 From: ramr Date: Wed, 6 Jun 2018 20:38:25 -0700 Subject: [PATCH 7/9] Add gocsv + go-haproxy dependencies for haproxy dynamic config api changes. --- glide.yaml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/glide.yaml b/glide.yaml index 75e358ff1fa8..234d79cce30d 100644 --- a/glide.yaml +++ b/glide.yaml @@ -182,6 +182,11 @@ import: # router metrics - package: github.com/cockroachdb/cmux version: b64f5908f4945f4b11ed4a0a9d3cc1e23350866d +# router dynamic reloads +- package: github.com/gocarina/gocsv + version: a5c9099e2484f1551abb9433885e158610a25f4b +- package: github.com/bcicen/go-haproxy + version: ff5824fe38bede761b873cab6e247a530e89236a # keep us up to date with latest DNS security fixes - package: github.com/miekg/dns version: ~v1 From 831c11415d4bb117a7e50607439a530b8044492d Mon Sep 17 00:00:00 2001 From: ramr Date: Mon, 9 Jul 2018 15:17:40 -0700 Subject: [PATCH 8/9] bump(*) --- glide.lock | 6 + .../github.com/bcicen/go-haproxy/.gitignore | 24 + vendor/github.com/bcicen/go-haproxy/LICENSE | 21 + vendor/github.com/bcicen/go-haproxy/README.md | 28 + vendor/github.com/bcicen/go-haproxy/client.go | 78 +++ .../bcicen/go-haproxy/client_test.go | 52 ++ vendor/github.com/bcicen/go-haproxy/info.go | 73 +++ .../bcicen/go-haproxy/kvcodec/types.go | 193 ++++++ .../bcicen/go-haproxy/kvcodec/unmarshal.go | 124 ++++ vendor/github.com/bcicen/go-haproxy/stat.go | 102 +++ vendor/github.com/gocarina/gocsv/.gitignore | 1 + vendor/github.com/gocarina/gocsv/.travis.yml | 1 + vendor/github.com/gocarina/gocsv/LICENSE | 21 + vendor/github.com/gocarina/gocsv/README.md | 170 +++++ vendor/github.com/gocarina/gocsv/csv.go | 324 ++++++++++ vendor/github.com/gocarina/gocsv/decode.go | 368 +++++++++++ .../github.com/gocarina/gocsv/decode_test.go | 596 ++++++++++++++++++ vendor/github.com/gocarina/gocsv/encode.go | 139 ++++ .../github.com/gocarina/gocsv/encode_test.go | 295 +++++++++ vendor/github.com/gocarina/gocsv/reflect.go | 107 ++++ vendor/github.com/gocarina/gocsv/safe_csv.go | 32 + .../gocarina/gocsv/sample_structs_test.go | 50 ++ vendor/github.com/gocarina/gocsv/types.go | 456 ++++++++++++++ .../github.com/gocarina/gocsv/types_test.go | 91 +++ .../github.com/gocarina/gocsv/unmarshaller.go | 115 ++++ 25 files changed, 3467 insertions(+) create mode 100644 vendor/github.com/bcicen/go-haproxy/.gitignore create mode 100644 vendor/github.com/bcicen/go-haproxy/LICENSE create mode 100644 vendor/github.com/bcicen/go-haproxy/README.md create mode 100644 vendor/github.com/bcicen/go-haproxy/client.go create mode 100644 vendor/github.com/bcicen/go-haproxy/client_test.go create mode 100644 vendor/github.com/bcicen/go-haproxy/info.go create mode 100644 vendor/github.com/bcicen/go-haproxy/kvcodec/types.go create mode 100644 vendor/github.com/bcicen/go-haproxy/kvcodec/unmarshal.go create mode 100644 vendor/github.com/bcicen/go-haproxy/stat.go create mode 100644 vendor/github.com/gocarina/gocsv/.gitignore create mode 100644 vendor/github.com/gocarina/gocsv/.travis.yml create mode 100644 vendor/github.com/gocarina/gocsv/LICENSE create mode 100644 vendor/github.com/gocarina/gocsv/README.md create mode 100644 vendor/github.com/gocarina/gocsv/csv.go create mode 100644 vendor/github.com/gocarina/gocsv/decode.go create mode 100644 vendor/github.com/gocarina/gocsv/decode_test.go create mode 100644 vendor/github.com/gocarina/gocsv/encode.go create mode 100644 vendor/github.com/gocarina/gocsv/encode_test.go create mode 100644 vendor/github.com/gocarina/gocsv/reflect.go create mode 100644 vendor/github.com/gocarina/gocsv/safe_csv.go create mode 100644 vendor/github.com/gocarina/gocsv/sample_structs_test.go create mode 100644 vendor/github.com/gocarina/gocsv/types.go create mode 100644 vendor/github.com/gocarina/gocsv/types_test.go create mode 100644 vendor/github.com/gocarina/gocsv/unmarshaller.go diff --git a/glide.lock b/glide.lock index 9355cfc4d14c..b5d30c9671e0 100644 --- a/glide.lock +++ b/glide.lock @@ -85,6 +85,10 @@ imports: - autorest/date - autorest/to - autorest/validation +- name: github.com/bcicen/go-haproxy + version: ff5824fe38bede761b873cab6e247a530e89236a + subpackages: + - kvcodec - name: github.com/beorn7/perks version: 3ac7bf7a47d159a033b107610db8a1b6575507a4 subpackages: @@ -429,6 +433,8 @@ imports: version: f3f9494671f93fcff853e3c6e9e948b3eb71e590 - name: github.com/go-openapi/validate version: d509235108fcf6ab4913d2dcb3a2260c0db2108e +- name: github.com/gocarina/gocsv + version: a5c9099e2484f1551abb9433885e158610a25f4b - name: github.com/godbus/dbus version: c7fdd8b5cd55e87b4e1f4e372cdb1db61dd6c66f - name: github.com/gogo/protobuf diff --git a/vendor/github.com/bcicen/go-haproxy/.gitignore b/vendor/github.com/bcicen/go-haproxy/.gitignore new file mode 100644 index 000000000000..daf913b1b347 --- /dev/null +++ b/vendor/github.com/bcicen/go-haproxy/.gitignore @@ -0,0 +1,24 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof diff --git a/vendor/github.com/bcicen/go-haproxy/LICENSE b/vendor/github.com/bcicen/go-haproxy/LICENSE new file mode 100644 index 000000000000..b9911fa2fbca --- /dev/null +++ b/vendor/github.com/bcicen/go-haproxy/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2016 bradley + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/bcicen/go-haproxy/README.md b/vendor/github.com/bcicen/go-haproxy/README.md new file mode 100644 index 000000000000..9a8dde698068 --- /dev/null +++ b/vendor/github.com/bcicen/go-haproxy/README.md @@ -0,0 +1,28 @@ +[![GoDoc](https://godoc.org/github.com/bcicen/go-haproxy?status.svg)](https://godoc.org/github.com/bcicen/go-haproxy) +[![codebeat badge](https://codebeat.co/badges/f947c19e-0d7b-47d0-87b4-4e2e555ba806)](https://codebeat.co/projects/github-com-bcicen-go-haproxy) + +# go-haproxy +Go library for interacting with HAProxys stats socket. + +## Usage + +Initialize a client object. Supported address schemas are `tcp://` and `unix:///` +```go +client := &haproxy.HAProxyClient{ + Addr: "tcp://localhost:9999", +} +``` + +Fetch results for a built in command(currently supports `show stats` and `show info`): +```go +stats, err := client.Stats() +for _, i := range stats { + fmt.Printf("%s: %s\n", i.SvName, i.Status) +} +``` + +Or retrieve the result body from an arbitrary command string: +```go +result, err := h.RunCommand("show info") +fmt.Println(result.String()) +``` diff --git a/vendor/github.com/bcicen/go-haproxy/client.go b/vendor/github.com/bcicen/go-haproxy/client.go new file mode 100644 index 000000000000..db7898358584 --- /dev/null +++ b/vendor/github.com/bcicen/go-haproxy/client.go @@ -0,0 +1,78 @@ +// Package haproxy provides a minimal client for communicating with, and issuing commands to, HAproxy over a network or file socket. +package haproxy + +import ( + "bytes" + "fmt" + "io" + "net" + "strings" + "time" +) + +const ( + socketSchema = "unix://" + tcpSchema = "tcp://" +) + +// HAProxyClient is the main structure of the library. +type HAProxyClient struct { + Addr string + Timeout int + conn net.Conn +} + +// RunCommand is the entrypoint to the client. Sends an arbitray command string to HAProxy. +func (h *HAProxyClient) RunCommand(cmd string) (*bytes.Buffer, error) { + err := h.dial() + if err != nil { + return nil, err + } + defer h.conn.Close() + + result := bytes.NewBuffer(nil) + + _, err = h.conn.Write([]byte(cmd + "\n")) + if err != nil { + return nil, err + } + + _, err = io.Copy(result, h.conn) + if err != nil { + return nil, err + } + + if strings.HasPrefix(result.String(), "Unknown command") { + return nil, fmt.Errorf("Unknown command: %s", cmd) + } + + return result, nil +} + +func (h *HAProxyClient) dial() (err error) { + if h.Timeout == 0 { + h.Timeout = 30 + } + + timeout := time.Duration(h.Timeout) * time.Second + + switch h.schema() { + case "socket": + h.conn, err = net.DialTimeout("unix", strings.Replace(h.Addr, socketSchema, "", 1), timeout) + case "tcp": + h.conn, err = net.DialTimeout("tcp", strings.Replace(h.Addr, tcpSchema, "", 1), timeout) + default: + return fmt.Errorf("unknown schema") + } + return err +} + +func (h *HAProxyClient) schema() string { + if strings.HasPrefix(h.Addr, socketSchema) { + return "socket" + } + if strings.HasPrefix(h.Addr, tcpSchema) { + return "tcp" + } + return "" +} diff --git a/vendor/github.com/bcicen/go-haproxy/client_test.go b/vendor/github.com/bcicen/go-haproxy/client_test.go new file mode 100644 index 000000000000..eb8f6e965e35 --- /dev/null +++ b/vendor/github.com/bcicen/go-haproxy/client_test.go @@ -0,0 +1,52 @@ +package haproxy_test + +import ( + "fmt" + + "github.com/bcicen/go-haproxy" +) + +func ExampleHAProxyClient_Stats() { + client := &haproxy.HAProxyClient{ + Addr: "unix:///var/run/haproxy.sock", + } + stats, err := client.Stats() + if err != nil { + fmt.Println(err) + return + } + for _, s := range stats { + fmt.Printf("%s: %s\n", s.SvName, s.Status) + } + // Output: + //static: DOWN + //app1: UP + //app2: UP + //... +} + +func ExampleHAProxyClient_Info() { + client := &haproxy.HAProxyClient{ + Addr: "unix:///var/run/haproxy.sock", + } + info, err := client.Info() + if err != nil { + fmt.Println(err) + return + } + fmt.Printf("%s version %s\n", info.Name, info.Version) + // Output: + //HAProxy version 1.6.3 +} + +func ExampleHAProxyClient_RunCommand() { + client := &haproxy.HAProxyClient{ + Addr: "unix:///var/run/haproxy.sock", + } + result, err := client.RunCommand("show info") + if err != nil { + fmt.Println(err) + return + } + fmt.Println(result.String()) +} diff --git a/vendor/github.com/bcicen/go-haproxy/info.go b/vendor/github.com/bcicen/go-haproxy/info.go new file mode 100644 index 000000000000..f64e63db2ab6 --- /dev/null +++ b/vendor/github.com/bcicen/go-haproxy/info.go @@ -0,0 +1,73 @@ +package haproxy + +import ( + "fmt" + + "github.com/bcicen/go-haproxy/kvcodec" +) + +// Response from HAProxy "show info" command. +type Info struct { + Name string `kv:"Name"` + Version string `kv:"Version"` + ReleaseDate string `kv:"Release_date"` + Nbproc uint64 `kv:"Nbproc"` + ProcessNum uint64 `kv:"Process_num"` + Pid uint64 `kv:"Pid"` + Uptime string `kv:"Uptime"` + UptimeSec uint64 `kv:"Uptime_sec"` + MemMaxMB uint64 `kv:"Memmax_MB"` + UlimitN uint64 `kv:"Ulimit-n"` + Maxsock uint64 `kv:"Maxsock"` + Maxconn uint64 `kv:"Maxconn"` + HardMaxconn uint64 `kv:"Hard_maxconn"` + CurrConns uint64 `kv:"CurrConns"` + CumConns uint64 `kv:"CumConns"` + CumReq uint64 `kv:"CumReq"` + MaxSslConns uint64 `kv:"MaxSslConns"` + CurrSslConns uint64 `kv:"CurrSslConns"` + CumSslConns uint64 `kv:"CumSslConns"` + Maxpipes uint64 `kv:"Maxpipes"` + PipesUsed uint64 `kv:"PipesUsed"` + PipesFree uint64 `kv:"PipesFree"` + ConnRate uint64 `kv:"ConnRate"` + ConnRateLimit uint64 `kv:"ConnRateLimit"` + MaxConnRate uint64 `kv:"MaxConnRate"` + SessRate uint64 `kv:"SessRate"` + SessRateLimit uint64 `kv:"SessRateLimit"` + MaxSessRate uint64 `kv:"MaxSessRate"` + SslRate uint64 `kv:"SslRate"` + SslRateLimit uint64 `kv:"SslRateLimit"` + MaxSslRate uint64 `kv:"MaxSslRate"` + SslFrontendKeyRate uint64 `kv:"SslFrontendKeyRate"` + SslFrontendMaxKeyRate uint64 `kv:"SslFrontendMaxKeyRate"` + SslFrontendSessionReusePct uint64 `kv:"SslFrontendSessionReuse_pct"` + SslBackendKeyRate uint64 `kv:"SslBackendKeyRate"` + SslBackendMaxKeyRate uint64 `kv:"SslBackendMaxKeyRate"` + SslCacheLookups uint64 `kv:"SslCacheLookups"` + SslCacheMisses uint64 `kv:"SslCacheMisses"` + CompressBpsIn uint64 `kv:"CompressBpsIn"` + CompressBpsOut uint64 `kv:"CompressBpsOut"` + CompressBpsRateLim uint64 `kv:"CompressBpsRateLim"` + ZlibMemUsage uint64 `kv:"ZlibMemUsage"` + MaxZlibMemUsage uint64 `kv:"MaxZlibMemUsage"` + Tasks uint64 `kv:"Tasks"` + RunQueue uint64 `kv:"Run_queue"` + IdlePct uint64 `kv:"Idle_pct"` + Node string `kv:"node"` + Description string `kv:"description"` +} + +// Equivalent to HAProxy "show info" command. +func (h *HAProxyClient) Info() (*Info, error) { + res, err := h.RunCommand("show info") + if err != nil { + return nil, err + } + info := &Info{} + err = kvcodec.Unmarshal(res, info) + if err != nil { + return nil, fmt.Errorf("error decoding response: %s", err) + } + return info, nil +} diff --git a/vendor/github.com/bcicen/go-haproxy/kvcodec/types.go b/vendor/github.com/bcicen/go-haproxy/kvcodec/types.go new file mode 100644 index 000000000000..d4733dfe647f --- /dev/null +++ b/vendor/github.com/bcicen/go-haproxy/kvcodec/types.go @@ -0,0 +1,193 @@ +package kvcodec + +import ( + "fmt" + "reflect" + "strconv" + "strings" +) + +func toString(in interface{}) (string, error) { + inValue := reflect.ValueOf(in) + + switch inValue.Kind() { + case reflect.String: + return inValue.String(), nil + case reflect.Bool: + b := inValue.Bool() + if b { + return "true", nil + } + return "false", nil + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return fmt.Sprintf("%v", inValue.Int()), nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return fmt.Sprintf("%v", inValue.Uint()), nil + case reflect.Float32: + return strconv.FormatFloat(inValue.Float(), byte('f'), -1, 32), nil + case reflect.Float64: + return strconv.FormatFloat(inValue.Float(), byte('f'), -1, 64), nil + } + return "", fmt.Errorf("unable to cast " + inValue.Type().String() + " to string") +} + +func toBool(in interface{}) (bool, error) { + inValue := reflect.ValueOf(in) + + switch inValue.Kind() { + case reflect.String: + s := inValue.String() + switch s { + case "yes": + return true, nil + case "no", "": + return false, nil + default: + return strconv.ParseBool(s) + } + case reflect.Bool: + return inValue.Bool(), nil + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + i := inValue.Int() + if i != 0 { + return true, nil + } + return false, nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + i := inValue.Uint() + if i != 0 { + return true, nil + } + return false, nil + case reflect.Float32, reflect.Float64: + f := inValue.Float() + if f != 0 { + return true, nil + } + return false, nil + } + return false, fmt.Errorf("unable to cast " + inValue.Type().String() + " to bool") +} + +func toInt(in interface{}) (int64, error) { + inValue := reflect.ValueOf(in) + + switch inValue.Kind() { + case reflect.String: + s := strings.TrimSpace(inValue.String()) + if s == "" { + return 0, nil + } + return strconv.ParseInt(s, 0, 64) + case reflect.Bool: + if inValue.Bool() { + return 1, nil + } + return 0, nil + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return inValue.Int(), nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return int64(inValue.Uint()), nil + case reflect.Float32, reflect.Float64: + return int64(inValue.Float()), nil + } + return 0, fmt.Errorf("unable to cast " + inValue.Type().String() + " to int") +} + +func toUint(in interface{}) (uint64, error) { + inValue := reflect.ValueOf(in) + + switch inValue.Kind() { + case reflect.String: + s := strings.TrimSpace(inValue.String()) + if s == "" { + return 0, nil + } + + // float input + if strings.Contains(s, ".") { + f, err := strconv.ParseFloat(s, 64) + if err != nil { + return 0, err + } + return uint64(f), nil + } + return strconv.ParseUint(s, 0, 64) + case reflect.Bool: + if inValue.Bool() { + return 1, nil + } + return 0, nil + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return uint64(inValue.Int()), nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return inValue.Uint(), nil + case reflect.Float32, reflect.Float64: + return uint64(inValue.Float()), nil + } + return 0, fmt.Errorf("unable to cast " + inValue.Type().String() + " to uint") +} + +func toFloat(in interface{}) (float64, error) { + inValue := reflect.ValueOf(in) + + switch inValue.Kind() { + case reflect.String: + s := strings.TrimSpace(inValue.String()) + if s == "" { + return 0, nil + } + return strconv.ParseFloat(s, 64) + case reflect.Bool: + if inValue.Bool() { + return 1, nil + } + return 0, nil + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return float64(inValue.Int()), nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return float64(inValue.Uint()), nil + case reflect.Float32, reflect.Float64: + return inValue.Float(), nil + } + return 0, fmt.Errorf("unable to cast " + inValue.Type().String() + " to float") +} + +func setField(field reflect.Value, value string) error { + switch field.Kind() { + case reflect.String: + s, err := toString(value) + if err != nil { + return err + } + field.SetString(s) + case reflect.Bool: + b, err := toBool(value) + if err != nil { + return err + } + field.SetBool(b) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + i, err := toInt(value) + if err != nil { + return err + } + field.SetInt(i) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + ui, err := toUint(value) + if err != nil { + return err + } + field.SetUint(ui) + case reflect.Float32, reflect.Float64: + f, err := toFloat(value) + if err != nil { + return err + } + field.SetFloat(f) + default: + err := fmt.Errorf("unable to set field of type %s", field.Kind()) + return err + } + return nil +} diff --git a/vendor/github.com/bcicen/go-haproxy/kvcodec/unmarshal.go b/vendor/github.com/bcicen/go-haproxy/kvcodec/unmarshal.go new file mode 100644 index 000000000000..bff131f6ef8b --- /dev/null +++ b/vendor/github.com/bcicen/go-haproxy/kvcodec/unmarshal.go @@ -0,0 +1,124 @@ +package kvcodec + +import ( + "bufio" + "fmt" + "io" + "reflect" + "regexp" + "strings" + "sync" +) + +const ( + tagLabel = "kv" + kvDelim = ":" +) + +type structFields map[string]fieldMeta + +type fieldMeta struct { + Key string + Name string + OmitAlways bool + OmitEmpty bool +} + +func newfieldMeta(field reflect.StructField) (meta fieldMeta) { + meta = fieldMeta{ + Name: field.Name, + } + fieldTags := strings.Split(field.Tag.Get(tagLabel), ",") + for _, tag := range fieldTags { + if tag == "-" { + meta.OmitAlways = true + return meta + } + if tag == "omitempty" { + meta.OmitEmpty = true + } else if tag != "" { + meta.Key = tag + } else { + meta.Key = field.Name + } + } + return meta +} + +var err error +var structMap = make(map[reflect.Type]structFields) +var structMapMutex sync.RWMutex + +func getStructFields(rType reflect.Type) (structFields, error) { + structMapMutex.RLock() + stInfo, ok := structMap[rType] + if !ok { + stInfo, err = newStructFields(rType) + if err != nil { + return nil, err + } + structMap[rType] = stInfo + } + structMapMutex.RUnlock() + return stInfo, nil +} + +func newStructFields(rType reflect.Type) (structFields, error) { + fieldsCount := rType.NumField() + fieldMap := make(structFields) + + for i := 0; i < fieldsCount; i++ { + field := rType.Field(i) + meta := newfieldMeta(field) + + if field.PkgPath != "" { + continue + } + + if field.Anonymous && field.Type.Kind() == reflect.Struct { + return nil, fmt.Errorf("embedded structs not supported") + } + + if !meta.OmitAlways { + fieldMap[meta.Key] = meta + } + } + + return fieldMap, nil +} + +func trim(s string) string { + re := regexp.MustCompile("(\\S+|\\S+)") + return strings.Join(re.FindAllString(s, -1), " ") +} + +func Unmarshal(in io.Reader, out interface{}) error { + outValue := reflect.ValueOf(out) + if outValue.Kind() == reflect.Ptr { + outValue = outValue.Elem() + } + + fields, err := getStructFields(outValue.Type()) + if fields == nil { + panic(err) + } + if err != nil { + return err + } + + scanner := bufio.NewScanner(in) + for scanner.Scan() { + if strings.Contains(scanner.Text(), kvDelim) { + s := strings.Split(scanner.Text(), kvDelim) + k, v := trim(s[0]), trim(s[1]) + if meta, ok := fields[k]; ok { + field := outValue.FieldByName(meta.Name) + err = setField(field, v) + if err != nil { + return err + } + } + } + } + return nil +} diff --git a/vendor/github.com/bcicen/go-haproxy/stat.go b/vendor/github.com/bcicen/go-haproxy/stat.go new file mode 100644 index 000000000000..392864621407 --- /dev/null +++ b/vendor/github.com/bcicen/go-haproxy/stat.go @@ -0,0 +1,102 @@ +package haproxy + +import ( + "encoding/csv" + "fmt" + + "github.com/gocarina/gocsv" +) + +// Response from HAProxy "show stat" command. +type Stat struct { + PxName string `csv:"# pxname"` + SvName string `csv:"svname"` + Qcur uint64 `csv:"qcur"` + Qmax uint64 `csv:"qmax"` + Scur uint64 `csv:"scur"` + Smax uint64 `csv:"smax"` + Slim uint64 `csv:"slim"` + Stot uint64 `csv:"stot"` + Bin uint64 `csv:"bin"` + Bout uint64 `csv:"bout"` + Dreq uint64 `csv:"dreq"` + Dresp uint64 `csv:"dresp"` + Ereq uint64 `csv:"ereq"` + Econ uint64 `csv:"econ"` + Eresp uint64 `csv:"eresp"` + Wretr uint64 `csv:"wretr"` + Wredis uint64 `csv:"wredis"` + Status string `csv:"status"` + Weight uint64 `csv:"weight"` + Act uint64 `csv:"act"` + Bck uint64 `csv:"bck"` + ChkFail uint64 `csv:"chkfail"` + ChkDown uint64 `csv:"chkdown"` + Lastchg uint64 `csv:"lastchg"` + Downtime uint64 `csv:"downtime"` + Qlimit uint64 `csv:"qlimit"` + Pid uint64 `csv:"pid"` + Iid uint64 `csv:"iid"` + Sid uint64 `csv:"sid"` + Throttle uint64 `csv:"throttle"` + Lbtot uint64 `csv:"lbtot"` + Tracked uint64 `csv:"tracked"` + Type uint64 `csv:"type"` + Rate uint64 `csv:"rate"` + RateLim uint64 `csv:"rate_lim"` + RateMax uint64 `csv:"rate_max"` + CheckStatus string `csv:"check_status"` + CheckCode uint64 `csv:"check_code"` + CheckDuration uint64 `csv:"check_duration"` + Hrsp1xx uint64 `csv:"hrsp_1xx"` + Hrsp2xx uint64 `csv:"hrsp_2xx"` + Hrsp3xx uint64 `csv:"hrsp_3xx"` + Hrsp4xx uint64 `csv:"hrsp_4xx"` + Hrsp5xx uint64 `csv:"hrsp_5xx"` + HrspOther uint64 `csv:"hrsp_other"` + Hanafail uint64 `csv:"hanafail"` + ReqRate uint64 `csv:"req_rate"` + ReqRateMax uint64 `csv:"req_rate_max"` + ReqTot uint64 `csv:"req_tot"` + CliAbrt uint64 `csv:"cli_abrt"` + SrvAbrt uint64 `csv:"srv_abrt"` + CompIn uint64 `csv:"comp_in"` + CompOut uint64 `csv:"comp_out"` + CompByp uint64 `csv:"comp_byp"` + CompRsp uint64 `csv:"comp_rsp"` + LastSess int64 `csv:"lastsess"` + LastChk string `csv:"last_chk"` + LastAgt uint64 `csv:"last_agt"` + Qtime uint64 `csv:"qtime"` + Ctime uint64 `csv:"ctime"` + Rtime uint64 `csv:"rtime"` + Ttime uint64 `csv:"ttime"` +} + +// Equivalent to HAProxy "show stat" command. +func (h *HAProxyClient) Stats() (stats []*Stat, err error) { + res, err := h.RunCommand("show stat") + if err != nil { + return nil, err + } + + reader := csv.NewReader(res) + reader.TrailingComma = true + err = gocsv.UnmarshalCSV(reader, &stats) + if err != nil { + return nil, fmt.Errorf("error reading csv: %s", err) + } + + // for _, s := range allStats { + // switch s.SvName { + // case "FRONTEND": + // services.Frontends = append(services.Frontends, s) + // case "BACKEND": + // services.Backends = append(services.Backends, s) + // default: + // services.Listeners = append(services.Listeners, s) + // } + // } + + return stats, nil +} diff --git a/vendor/github.com/gocarina/gocsv/.gitignore b/vendor/github.com/gocarina/gocsv/.gitignore new file mode 100644 index 000000000000..485dee64bcfb --- /dev/null +++ b/vendor/github.com/gocarina/gocsv/.gitignore @@ -0,0 +1 @@ +.idea diff --git a/vendor/github.com/gocarina/gocsv/.travis.yml b/vendor/github.com/gocarina/gocsv/.travis.yml new file mode 100644 index 000000000000..4f2ee4d97338 --- /dev/null +++ b/vendor/github.com/gocarina/gocsv/.travis.yml @@ -0,0 +1 @@ +language: go diff --git a/vendor/github.com/gocarina/gocsv/LICENSE b/vendor/github.com/gocarina/gocsv/LICENSE new file mode 100644 index 000000000000..052a371193e4 --- /dev/null +++ b/vendor/github.com/gocarina/gocsv/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Jonathan Picques + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/gocarina/gocsv/README.md b/vendor/github.com/gocarina/gocsv/README.md new file mode 100644 index 000000000000..c3fae623bb50 --- /dev/null +++ b/vendor/github.com/gocarina/gocsv/README.md @@ -0,0 +1,170 @@ +Go CSV +===== + +The GoCSV package aims to provide easy serialization and deserialization functions to use CSV in Golang + +API and techniques inspired from https://godoc.org/gopkg.in/mgo.v2 + +[![GoDoc](https://godoc.org/github.com/gocarina/gocsv?status.png)](https://godoc.org/github.com/gocarina/gocsv) +[![Build Status](https://travis-ci.org/gocarina/gocsv.svg?branch=master)](https://travis-ci.org/gocarina/gocsv) + +Installation +===== + +```go get -u github.com/gocarina/gocsv``` + +Full example +===== + +Consider the following CSV file + +```csv + +client_id,client_name,client_age +1,Jose,42 +2,Daniel,26 +3,Vincent,32 + +``` + +Easy binding in Go! +--- + +```go + +package main + +import ( + "fmt" + "gocsv" + "os" +) + +type Client struct { // Our example struct, you can use "-" to ignore a field + Id string `csv:"client_id"` + Name string `csv:"client_name"` + Age string `csv:"client_age"` + NotUsed string `csv:"-"` +} + +func main() { + clientsFile, err := os.OpenFile("clients.csv", os.O_RDWR|os.O_CREATE, os.ModePerm) + if err != nil { + panic(err) + } + defer clientsFile.Close() + + clients := []*Client{} + + if err := gocsv.UnmarshalFile(clientsFile, &clients); err != nil { // Load clients from file + panic(err) + } + for _, client := range clients { + fmt.Println("Hello", client.Name) + } + + if _, err := clientsFile.Seek(0, 0); err != nil { // Go to the start of the file + panic(err) + } + + clients = append(clients, &Client{Id: "12", Name: "John", Age: "21"}) // Add clients + clients = append(clients, &Client{Id: "13", Name: "Fred"}) + clients = append(clients, &Client{Id: "14", Name: "James", Age: "32"}) + clients = append(clients, &Client{Id: "15", Name: "Danny"}) + csvContent, err := gocsv.MarshalString(&clients) // Get all clients as CSV string + //err = gocsv.MarshalFile(&clients, clientsFile) // Use this to save the CSV back to the file + if err != nil { + panic(err) + } + fmt.Println(csvContent) // Display all clients as CSV string + +} + +``` + +Customizable Converters +--- + +```go + +type DateTime struct { + time.Time +} + +// Convert the internal date as CSV string +func (date *DateTime) MarshalCSV() (string, error) { + return date.Time.Format("20060201"), nil +} + +// You could also use the standard Stringer interface +func (date *DateTime) String() (string) { + return date.String() // Redundant, just for example +} + +// Convert the CSV string as internal date +func (date *DateTime) UnmarshalCSV(csv string) (err error) { + date.Time, err = time.Parse("20060201", csv) + if err != nil { + return err + } + return nil +} + +type Client struct { // Our example struct with a custom type (DateTime) + Id string `csv:"id"` + Name string `csv:"name"` + Employed DateTime `csv:"employed"` +} + +``` + +Customizable CSV Reader / Writer +--- + +```go + +func main() { + ... + + gocsv.SetCSVReader(func(in io.Reader) gocsv.CSVReader { + r := csv.NewReader(in) + r.Comma = '|' + return r // Allows use pipe as delimiter + }) + + ... + + gocsv.SetCSVReader(func(in io.Reader) gocsv.CSVReader { + r := csv.NewReader(in) + r.LazyQuotes = true + r.Comma = '.' + return r // Allows use dot as delimiter and use quotes in CSV + }) + + ... + + gocsv.SetCSVReader(func(in io.Reader) gocsv.CSVReader { + //return csv.NewReader(in) + return gocsv.LazyCSVReader(in) // Allows use of quotes in CSV + }) + + ... + + gocsv.UnmarshalFile(file, &clients) + + ... + + gocsv.SetCSVWriter(func(out io.Writer) *SafeCSVWriter { + writer := csv.NewWriter(out) + writer.Comma = '|' + return gocsv.NewSafeCSVWriter(writer) + }) + + ... + + gocsv.MarshalFile(&clients, file) + + ... +} + +``` diff --git a/vendor/github.com/gocarina/gocsv/csv.go b/vendor/github.com/gocarina/gocsv/csv.go new file mode 100644 index 000000000000..83e2ed47fbb4 --- /dev/null +++ b/vendor/github.com/gocarina/gocsv/csv.go @@ -0,0 +1,324 @@ +// Copyright 2014 Jonathan Picques. All rights reserved. +// Use of this source code is governed by a MIT license +// The license can be found in the LICENSE file. + +// The GoCSV package aims to provide easy CSV serialization and deserialization to the golang programming language + +package gocsv + +import ( + "bytes" + "encoding/csv" + "fmt" + "io" + "os" + "reflect" + "strings" +) + +// FailIfUnmatchedStructTags indicates whether it is considered an error when there is an unmatched +// struct tag. +var FailIfUnmatchedStructTags = false + +// FailIfDoubleHeaderNames indicates whether it is considered an error when a header name is repeated +// in the csv header. +var FailIfDoubleHeaderNames = false + +// ShouldAlignDuplicateHeadersWithStructFieldOrder indicates whether we should align duplicate CSV +// headers per their alignment in the struct definition. +var ShouldAlignDuplicateHeadersWithStructFieldOrder = false + +// TagSeparator defines seperator string for multiple csv tags in struct fields +var TagSeparator = "," + +// -------------------------------------------------------------------------- +// CSVWriter used to format CSV + +var selfCSVWriter = DefaultCSVWriter + +// DefaultCSVWriter is the default SafeCSVWriter used to format CSV (cf. csv.NewWriter) +func DefaultCSVWriter(out io.Writer) *SafeCSVWriter { + writer := NewSafeCSVWriter(csv.NewWriter(out)) + + // As only one rune can be defined as a CSV separator, we are going to trim + // the custom tag separator and use the first rune. + if runes := []rune(strings.TrimSpace(TagSeparator)); len(runes) > 0 { + writer.Comma = runes[0] + } + + return writer +} + +// SetCSVWriter sets the SafeCSVWriter used to format CSV. +func SetCSVWriter(csvWriter func(io.Writer) *SafeCSVWriter) { + selfCSVWriter = csvWriter +} + +func getCSVWriter(out io.Writer) *SafeCSVWriter { + return selfCSVWriter(out) +} + +// -------------------------------------------------------------------------- +// CSVReader used to parse CSV + +var selfCSVReader = DefaultCSVReader + +// DefaultCSVReader is the default CSV reader used to parse CSV (cf. csv.NewReader) +func DefaultCSVReader(in io.Reader) CSVReader { + return csv.NewReader(in) +} + +// LazyCSVReader returns a lazy CSV reader, with LazyQuotes and TrimLeadingSpace. +func LazyCSVReader(in io.Reader) CSVReader { + csvReader := csv.NewReader(in) + csvReader.LazyQuotes = true + csvReader.TrimLeadingSpace = true + return csvReader +} + +// SetCSVReader sets the CSV reader used to parse CSV. +func SetCSVReader(csvReader func(io.Reader) CSVReader) { + selfCSVReader = csvReader +} + +func getCSVReader(in io.Reader) CSVReader { + return selfCSVReader(in) +} + +// -------------------------------------------------------------------------- +// Marshal functions + +// MarshalFile saves the interface as CSV in the file. +func MarshalFile(in interface{}, file *os.File) (err error) { + return Marshal(in, file) +} + +// MarshalString returns the CSV string from the interface. +func MarshalString(in interface{}) (out string, err error) { + bufferString := bytes.NewBufferString(out) + if err := Marshal(in, bufferString); err != nil { + return "", err + } + return bufferString.String(), nil +} + +// MarshalBytes returns the CSV bytes from the interface. +func MarshalBytes(in interface{}) (out []byte, err error) { + bufferString := bytes.NewBuffer(out) + if err := Marshal(in, bufferString); err != nil { + return nil, err + } + return bufferString.Bytes(), nil +} + +// Marshal returns the CSV in writer from the interface. +func Marshal(in interface{}, out io.Writer) (err error) { + writer := getCSVWriter(out) + return writeTo(writer, in, false) +} + +// Marshal returns the CSV in writer from the interface. +func MarshalWithoutHeaders(in interface{}, out io.Writer) (err error) { + writer := getCSVWriter(out) + return writeTo(writer, in, true) +} + +// MarshalChan returns the CSV read from the channel. +func MarshalChan(c <-chan interface{}, out *SafeCSVWriter) error { + return writeFromChan(out, c) +} + +// MarshalCSV returns the CSV in writer from the interface. +func MarshalCSV(in interface{}, out *SafeCSVWriter) (err error) { + return writeTo(out, in, false) +} + +// MarshalCSVWithoutHeaders returns the CSV in writer from the interface. +func MarshalCSVWithoutHeaders(in interface{}, out *SafeCSVWriter) (err error) { + return writeTo(out, in, true) +} + +// -------------------------------------------------------------------------- +// Unmarshal functions + +// UnmarshalFile parses the CSV from the file in the interface. +func UnmarshalFile(in *os.File, out interface{}) error { + return Unmarshal(in, out) +} + +// UnmarshalString parses the CSV from the string in the interface. +func UnmarshalString(in string, out interface{}) error { + return Unmarshal(strings.NewReader(in), out) +} + +// UnmarshalBytes parses the CSV from the bytes in the interface. +func UnmarshalBytes(in []byte, out interface{}) error { + return Unmarshal(bytes.NewReader(in), out) +} + +// Unmarshal parses the CSV from the reader in the interface. +func Unmarshal(in io.Reader, out interface{}) error { + return readTo(newDecoder(in), out) +} + +// UnmarshalWithoutHeaders parses the CSV from the reader in the interface. +func UnmarshalWithoutHeaders(in io.Reader, out interface{}) error { + return readToWithoutHeaders(newDecoder(in), out) +} + +// UnmarshalDecoder parses the CSV from the decoder in the interface +func UnmarshalDecoder(in Decoder, out interface{}) error { + return readTo(in, out) +} + +// UnmarshalCSV parses the CSV from the reader in the interface. +func UnmarshalCSV(in CSVReader, out interface{}) error { + return readTo(csvDecoder{in}, out) +} + +// UnmarshalToChan parses the CSV from the reader and send each value in the chan c. +// The channel must have a concrete type. +func UnmarshalToChan(in io.Reader, c interface{}) error { + if c == nil { + return fmt.Errorf("goscv: channel is %v", c) + } + return readEach(newDecoder(in), c) +} + +// UnmarshalDecoderToChan parses the CSV from the decoder and send each value in the chan c. +// The channel must have a concrete type. +func UnmarshalDecoderToChan(in SimpleDecoder, c interface{}) error { + if c == nil { + return fmt.Errorf("goscv: channel is %v", c) + } + return readEach(in, c) +} + +// UnmarshalStringToChan parses the CSV from the string and send each value in the chan c. +// The channel must have a concrete type. +func UnmarshalStringToChan(in string, c interface{}) error { + return UnmarshalToChan(strings.NewReader(in), c) +} + +// UnmarshalBytesToChan parses the CSV from the bytes and send each value in the chan c. +// The channel must have a concrete type. +func UnmarshalBytesToChan(in []byte, c interface{}) error { + return UnmarshalToChan(bytes.NewReader(in), c) +} + +// UnmarshalToCallback parses the CSV from the reader and send each value to the given func f. +// The func must look like func(Struct). +func UnmarshalToCallback(in io.Reader, f interface{}) error { + valueFunc := reflect.ValueOf(f) + t := reflect.TypeOf(f) + if t.NumIn() != 1 { + return fmt.Errorf("the given function must have exactly one parameter") + } + cerr := make(chan error) + c := reflect.MakeChan(reflect.ChanOf(reflect.BothDir, t.In(0)), 0) + go func() { + cerr <- UnmarshalToChan(in, c.Interface()) + }() + for { + select { + case err := <-cerr: + return err + default: + } + v, notClosed := c.Recv() + if !notClosed || v.Interface() == nil { + break + } + valueFunc.Call([]reflect.Value{v}) + } + return nil +} + +// UnmarshalDecoderToCallback parses the CSV from the decoder and send each value to the given func f. +// The func must look like func(Struct). +func UnmarshalDecoderToCallback(in SimpleDecoder, f interface{}) error { + valueFunc := reflect.ValueOf(f) + t := reflect.TypeOf(f) + if t.NumIn() != 1 { + return fmt.Errorf("the given function must have exactly one parameter") + } + cerr := make(chan error) + c := reflect.MakeChan(reflect.ChanOf(reflect.BothDir, t.In(0)), 0) + go func() { + cerr <- UnmarshalDecoderToChan(in, c.Interface()) + }() + for { + select { + case err := <-cerr: + return err + default: + } + v, notClosed := c.Recv() + if !notClosed || v.Interface() == nil { + break + } + valueFunc.Call([]reflect.Value{v}) + } + return nil +} + +// UnmarshalBytesToCallback parses the CSV from the bytes and send each value to the given func f. +// The func must look like func(Struct). +func UnmarshalBytesToCallback(in []byte, f interface{}) error { + return UnmarshalToCallback(bytes.NewReader(in), f) +} + +// UnmarshalStringToCallback parses the CSV from the string and send each value to the given func f. +// The func must look like func(Struct). +func UnmarshalStringToCallback(in string, c interface{}) (err error) { + return UnmarshalToCallback(strings.NewReader(in), c) +} + +// CSVToMap creates a simple map from a CSV of 2 columns. +func CSVToMap(in io.Reader) (map[string]string, error) { + decoder := newDecoder(in) + header, err := decoder.getCSVRow() + if err != nil { + return nil, err + } + if len(header) != 2 { + return nil, fmt.Errorf("maps can only be created for csv of two columns") + } + m := make(map[string]string) + for { + line, err := decoder.getCSVRow() + if err == io.EOF { + break + } else if err != nil { + return nil, err + } + m[line[0]] = line[1] + } + return m, nil +} + +// CSVToMaps takes a reader and returns an array of dictionaries, using the header row as the keys +func CSVToMaps(reader io.Reader) ([]map[string]string, error) { + r := csv.NewReader(reader) + rows := []map[string]string{} + var header []string + for { + record, err := r.Read() + if err == io.EOF { + break + } + if err != nil { + return nil, err + } + if header == nil { + header = record + } else { + dict := map[string]string{} + for i := range header { + dict[header[i]] = record[i] + } + rows = append(rows, dict) + } + } + return rows, nil +} diff --git a/vendor/github.com/gocarina/gocsv/decode.go b/vendor/github.com/gocarina/gocsv/decode.go new file mode 100644 index 000000000000..1ef3b274d1ea --- /dev/null +++ b/vendor/github.com/gocarina/gocsv/decode.go @@ -0,0 +1,368 @@ +package gocsv + +import ( + "encoding/csv" + "errors" + "fmt" + "io" + "reflect" +) + +// Decoder . +type Decoder interface { + getCSVRows() ([][]string, error) +} + +// SimpleDecoder . +type SimpleDecoder interface { + getCSVRow() ([]string, error) +} + +type decoder struct { + in io.Reader + csvDecoder *csvDecoder +} + +func newDecoder(in io.Reader) *decoder { + return &decoder{in: in} +} + +func (decode *decoder) getCSVRows() ([][]string, error) { + return getCSVReader(decode.in).ReadAll() +} + +func (decode *decoder) getCSVRow() ([]string, error) { + if decode.csvDecoder == nil { + decode.csvDecoder = &csvDecoder{getCSVReader(decode.in)} + } + return decode.csvDecoder.Read() +} + +type CSVReader interface { + Read() ([]string, error) + ReadAll() ([][]string, error) +} + +type csvDecoder struct { + CSVReader +} + +func (c csvDecoder) getCSVRows() ([][]string, error) { + return c.ReadAll() +} + +func (c csvDecoder) getCSVRow() ([]string, error) { + return c.Read() +} + +func mismatchStructFields(structInfo []fieldInfo, headers []string) []string { + missing := make([]string, 0) + if len(structInfo) == 0 { + return missing + } + + headerMap := make(map[string]struct{}, len(headers)) + for idx := range headers { + headerMap[headers[idx]] = struct{}{} + } + + for _, info := range structInfo { + found := false + for _, key := range info.keys { + if _, ok := headerMap[key]; ok { + found = true + break + } + } + if !found { + missing = append(missing, info.keys...) + } + } + return missing +} + +func mismatchHeaderFields(structInfo []fieldInfo, headers []string) []string { + missing := make([]string, 0) + if len(headers) == 0 { + return missing + } + + keyMap := make(map[string]struct{}, 0) + for _, info := range structInfo { + for _, key := range info.keys { + keyMap[key] = struct{}{} + } + } + + for _, header := range headers { + if _, ok := keyMap[header]; !ok { + missing = append(missing, header) + } + } + return missing +} + +func maybeMissingStructFields(structInfo []fieldInfo, headers []string) error { + missing := mismatchStructFields(structInfo, headers) + if len(missing) != 0 { + return fmt.Errorf("found unmatched struct field with tags %v", missing) + } + return nil +} + +// Check that no header name is repeated twice +func maybeDoubleHeaderNames(headers []string) error { + headerMap := make(map[string]bool, len(headers)) + for _, v := range headers { + if _, ok := headerMap[v]; ok { + return fmt.Errorf("Repeated header name: %v", v) + } + headerMap[v] = true + } + return nil +} + +func readTo(decoder Decoder, out interface{}) error { + outValue, outType := getConcreteReflectValueAndType(out) // Get the concrete type (not pointer) (Slice or Array) + if err := ensureOutType(outType); err != nil { + return err + } + outInnerWasPointer, outInnerType := getConcreteContainerInnerType(outType) // Get the concrete inner type (not pointer) (Container<"?">) + if err := ensureOutInnerType(outInnerType); err != nil { + return err + } + csvRows, err := decoder.getCSVRows() // Get the CSV csvRows + if err != nil { + return err + } + if len(csvRows) == 0 { + return errors.New("empty csv file given") + } + if err := ensureOutCapacity(&outValue, len(csvRows)); err != nil { // Ensure the container is big enough to hold the CSV content + return err + } + outInnerStructInfo := getStructInfo(outInnerType) // Get the inner struct info to get CSV annotations + if len(outInnerStructInfo.Fields) == 0 { + return errors.New("no csv struct tags found") + } + + headers := csvRows[0] + body := csvRows[1:] + + csvHeadersLabels := make(map[int]*fieldInfo, len(outInnerStructInfo.Fields)) // Used to store the correspondance header <-> position in CSV + + headerCount := map[string]int{} + for i, csvColumnHeader := range headers { + curHeaderCount := headerCount[csvColumnHeader] + if fieldInfo := getCSVFieldPosition(csvColumnHeader, outInnerStructInfo, curHeaderCount); fieldInfo != nil { + csvHeadersLabels[i] = fieldInfo + if ShouldAlignDuplicateHeadersWithStructFieldOrder { + curHeaderCount++ + headerCount[csvColumnHeader] = curHeaderCount + } + } + } + + if FailIfUnmatchedStructTags { + if err := maybeMissingStructFields(outInnerStructInfo.Fields, headers); err != nil { + return err + } + } + if FailIfDoubleHeaderNames { + if err := maybeDoubleHeaderNames(headers); err != nil { + return err + } + } + + for i, csvRow := range body { + outInner := createNewOutInner(outInnerWasPointer, outInnerType) + for j, csvColumnContent := range csvRow { + if fieldInfo, ok := csvHeadersLabels[j]; ok { // Position found accordingly to header name + if err := setInnerField(&outInner, outInnerWasPointer, fieldInfo.IndexChain, csvColumnContent, fieldInfo.omitEmpty); err != nil { // Set field of struct + return &csv.ParseError{ + Line: i + 2, //add 2 to account for the header & 0-indexing of arrays + Column: j + 1, + Err: err, + } + } + } + } + outValue.Index(i).Set(outInner) + } + return nil +} + +func readEach(decoder SimpleDecoder, c interface{}) error { + headers, err := decoder.getCSVRow() + if err != nil { + return err + } + outValue, outType := getConcreteReflectValueAndType(c) // Get the concrete type (not pointer) (Slice or Array) + if err := ensureOutType(outType); err != nil { + return err + } + defer outValue.Close() + outInnerWasPointer, outInnerType := getConcreteContainerInnerType(outType) // Get the concrete inner type (not pointer) (Container<"?">) + if err := ensureOutInnerType(outInnerType); err != nil { + return err + } + outInnerStructInfo := getStructInfo(outInnerType) // Get the inner struct info to get CSV annotations + if len(outInnerStructInfo.Fields) == 0 { + return errors.New("no csv struct tags found") + } + csvHeadersLabels := make(map[int]*fieldInfo, len(outInnerStructInfo.Fields)) // Used to store the correspondance header <-> position in CSV + headerCount := map[string]int{} + for i, csvColumnHeader := range headers { + curHeaderCount := headerCount[csvColumnHeader] + if fieldInfo := getCSVFieldPosition(csvColumnHeader, outInnerStructInfo, curHeaderCount); fieldInfo != nil { + csvHeadersLabels[i] = fieldInfo + if ShouldAlignDuplicateHeadersWithStructFieldOrder { + curHeaderCount++ + headerCount[csvColumnHeader] = curHeaderCount + } + } + } + if err := maybeMissingStructFields(outInnerStructInfo.Fields, headers); err != nil { + if FailIfUnmatchedStructTags { + return err + } + } + if FailIfDoubleHeaderNames { + if err := maybeDoubleHeaderNames(headers); err != nil { + return err + } + } + i := 0 + for { + line, err := decoder.getCSVRow() + if err == io.EOF { + break + } else if err != nil { + return err + } + outInner := createNewOutInner(outInnerWasPointer, outInnerType) + for j, csvColumnContent := range line { + if fieldInfo, ok := csvHeadersLabels[j]; ok { // Position found accordingly to header name + if err := setInnerField(&outInner, outInnerWasPointer, fieldInfo.IndexChain, csvColumnContent, fieldInfo.omitEmpty); err != nil { // Set field of struct + return &csv.ParseError{ + Line: i + 2, //add 2 to account for the header & 0-indexing of arrays + Column: j + 1, + Err: err, + } + } + } + } + outValue.Send(outInner) + i++ + } + return nil +} + +func readToWithoutHeaders(decoder Decoder, out interface{}) error { + outValue, outType := getConcreteReflectValueAndType(out) // Get the concrete type (not pointer) (Slice or Array) + if err := ensureOutType(outType); err != nil { + return err + } + outInnerWasPointer, outInnerType := getConcreteContainerInnerType(outType) // Get the concrete inner type (not pointer) (Container<"?">) + if err := ensureOutInnerType(outInnerType); err != nil { + return err + } + csvRows, err := decoder.getCSVRows() // Get the CSV csvRows + if err != nil { + return err + } + if len(csvRows) == 0 { + return errors.New("empty csv file given") + } + if err := ensureOutCapacity(&outValue, len(csvRows)+1); err != nil { // Ensure the container is big enough to hold the CSV content + return err + } + outInnerStructInfo := getStructInfo(outInnerType) // Get the inner struct info to get CSV annotations + if len(outInnerStructInfo.Fields) == 0 { + return errors.New("no csv struct tags found") + } + + for i, csvRow := range csvRows { + outInner := createNewOutInner(outInnerWasPointer, outInnerType) + for j, csvColumnContent := range csvRow { + fieldInfo := outInnerStructInfo.Fields[j] + if err := setInnerField(&outInner, outInnerWasPointer, fieldInfo.IndexChain, csvColumnContent, fieldInfo.omitEmpty); err != nil { // Set field of struct + return &csv.ParseError{ + Line: i + 1, + Column: j + 1, + Err: err, + } + } + } + outValue.Index(i).Set(outInner) + } + + return nil +} + +// Check if the outType is an array or a slice +func ensureOutType(outType reflect.Type) error { + switch outType.Kind() { + case reflect.Slice: + fallthrough + case reflect.Chan: + fallthrough + case reflect.Array: + return nil + } + return fmt.Errorf("cannot use " + outType.String() + ", only slice or array supported") +} + +// Check if the outInnerType is of type struct +func ensureOutInnerType(outInnerType reflect.Type) error { + switch outInnerType.Kind() { + case reflect.Struct: + return nil + } + return fmt.Errorf("cannot use " + outInnerType.String() + ", only struct supported") +} + +func ensureOutCapacity(out *reflect.Value, csvLen int) error { + switch out.Kind() { + case reflect.Array: + if out.Len() < csvLen-1 { // Array is not big enough to hold the CSV content (arrays are not addressable) + return fmt.Errorf("array capacity problem: cannot store %d %s in %s", csvLen-1, out.Type().Elem().String(), out.Type().String()) + } + case reflect.Slice: + if !out.CanAddr() && out.Len() < csvLen-1 { // Slice is not big enough tho hold the CSV content and is not addressable + return fmt.Errorf("slice capacity problem and is not addressable (did you forget &?)") + } else if out.CanAddr() && out.Len() < csvLen-1 { + out.Set(reflect.MakeSlice(out.Type(), csvLen-1, csvLen-1)) // Slice is not big enough, so grows it + } + } + return nil +} + +func getCSVFieldPosition(key string, structInfo *structInfo, curHeaderCount int) *fieldInfo { + matchedFieldCount := 0 + for _, field := range structInfo.Fields { + if field.matchesKey(key) { + if matchedFieldCount >= curHeaderCount { + return &field + } else { + matchedFieldCount++ + } + } + } + return nil +} + +func createNewOutInner(outInnerWasPointer bool, outInnerType reflect.Type) reflect.Value { + if outInnerWasPointer { + return reflect.New(outInnerType) + } + return reflect.New(outInnerType).Elem() +} + +func setInnerField(outInner *reflect.Value, outInnerWasPointer bool, index []int, value string, omitEmpty bool) error { + oi := *outInner + if outInnerWasPointer { + oi = outInner.Elem() + } + return setField(oi.FieldByIndex(index), value, omitEmpty) +} diff --git a/vendor/github.com/gocarina/gocsv/decode_test.go b/vendor/github.com/gocarina/gocsv/decode_test.go new file mode 100644 index 000000000000..c3872bcb8b6b --- /dev/null +++ b/vendor/github.com/gocarina/gocsv/decode_test.go @@ -0,0 +1,596 @@ +package gocsv + +import ( + "bytes" + "encoding/csv" + "io" + "reflect" + "strconv" + "strings" + "testing" + "time" +) + +func Test_readTo(t *testing.T) { + blah := 0 + sptr := "*string" + sptr2 := "" + b := bytes.NewBufferString(`foo,BAR,Baz,Blah,SPtr,Omit +f,1,baz,,*string,*string +e,3,b,,,`) + d := &decoder{in: b} + + var samples []Sample + if err := readTo(d, &samples); err != nil { + t.Fatal(err) + } + if len(samples) != 2 { + t.Fatalf("expected 2 sample instances, got %d", len(samples)) + } + + expected := Sample{Foo: "f", Bar: 1, Baz: "baz", Blah: &blah, SPtr: &sptr, Omit: &sptr} + if !reflect.DeepEqual(expected, samples[0]) { + t.Fatalf("expected first sample %v, got %v", expected, samples[0]) + } + + expected = Sample{Foo: "e", Bar: 3, Baz: "b", Blah: &blah, SPtr: &sptr2} + if !reflect.DeepEqual(expected, samples[1]) { + t.Fatalf("expected second sample %v, got %v", expected, samples[1]) + } + + b = bytes.NewBufferString(`foo,BAR,Baz +f,1,baz +e,BAD_INPUT,b`) + d = &decoder{in: b} + samples = []Sample{} + err := readTo(d, &samples) + if err == nil { + t.Fatalf("Expected error from bad input, got: %+v", samples) + } + switch actualErr := err.(type) { + case *csv.ParseError: + if actualErr.Line != 3 { + t.Fatalf("Expected csv.ParseError on line 3, got: %d", actualErr.Line) + } + if actualErr.Column != 2 { + t.Fatalf("Expected csv.ParseError in column 2, got: %d", actualErr.Column) + } + default: + t.Fatalf("incorrect error type: %T", err) + } + +} + +func Test_readTo_Time(t *testing.T) { + b := bytes.NewBufferString(`Foo +1970-01-01T03:01:00+03:00`) + d := &decoder{in: b} + + var samples []DateTime + if err := readTo(d, &samples); err != nil { + t.Fatal(err) + } + + rt, _ := time.Parse(time.RFC3339, "1970-01-01T03:01:00+03:00") + + expected := DateTime{Foo: rt} + + if !reflect.DeepEqual(expected, samples[0]) { + t.Fatalf("expected first sample %v, got %v", expected, samples[0]) + } +} + +func Test_readTo_complex_embed(t *testing.T) { + b := bytes.NewBufferString(`first,foo,BAR,Baz,last,abc +aa,bb,11,cc,dd,ee +ff,gg,22,hh,ii,jj`) + d := &decoder{in: b} + + var samples []SkipFieldSample + if err := readTo(d, &samples); err != nil { + t.Fatal(err) + } + if len(samples) != 2 { + t.Fatalf("expected 2 sample instances, got %d", len(samples)) + } + expected := SkipFieldSample{ + EmbedSample: EmbedSample{ + Qux: "aa", + Sample: Sample{ + Foo: "bb", + Bar: 11, + Baz: "cc", + }, + Quux: "dd", + }, + Corge: "ee", + } + if expected != samples[0] { + t.Fatalf("expected first sample %v, got %v", expected, samples[0]) + } + expected = SkipFieldSample{ + EmbedSample: EmbedSample{ + Qux: "ff", + Sample: Sample{ + Foo: "gg", + Bar: 22, + Baz: "hh", + }, + Quux: "ii", + }, + Corge: "jj", + } + if expected != samples[1] { + t.Fatalf("expected first sample %v, got %v", expected, samples[1]) + } +} + +func Test_readEach(t *testing.T) { + b := bytes.NewBufferString(`first,foo,BAR,Baz,last,abc +aa,bb,11,cc,dd,ee +ff,gg,22,hh,ii,jj`) + d := &decoder{in: b} + + c := make(chan SkipFieldSample) + var samples []SkipFieldSample + go func() { + if err := readEach(d, c); err != nil { + t.Fatal(err) + } + }() + for v := range c { + samples = append(samples, v) + } + if len(samples) != 2 { + t.Fatalf("expected 2 sample instances, got %d", len(samples)) + } + expected := SkipFieldSample{ + EmbedSample: EmbedSample{ + Qux: "aa", + Sample: Sample{ + Foo: "bb", + Bar: 11, + Baz: "cc", + }, + Quux: "dd", + }, + Corge: "ee", + } + if expected != samples[0] { + t.Fatalf("expected first sample %v, got %v", expected, samples[0]) + } + expected = SkipFieldSample{ + EmbedSample: EmbedSample{ + Qux: "ff", + Sample: Sample{ + Foo: "gg", + Bar: 22, + Baz: "hh", + }, + Quux: "ii", + }, + Corge: "jj", + } + if expected != samples[1] { + t.Fatalf("expected first sample %v, got %v", expected, samples[1]) + } +} + +func Test_maybeMissingStructFields(t *testing.T) { + structTags := []fieldInfo{ + {keys: []string{"foo"}}, + {keys: []string{"bar"}}, + {keys: []string{"baz"}}, + } + badHeaders := []string{"hi", "mom", "bacon"} + goodHeaders := []string{"foo", "bar", "baz"} + + // no tags to match, expect no error + if err := maybeMissingStructFields([]fieldInfo{}, goodHeaders); err != nil { + t.Fatal(err) + } + + // bad headers, expect an error + if err := maybeMissingStructFields(structTags, badHeaders); err == nil { + t.Fatal("expected an error, but no error found") + } + + // good headers, expect no error + if err := maybeMissingStructFields(structTags, goodHeaders); err != nil { + t.Fatal(err) + } + + // extra headers, but all structtags match; expect no error + moarHeaders := append(goodHeaders, "qux", "quux", "corge", "grault") + if err := maybeMissingStructFields(structTags, moarHeaders); err != nil { + t.Fatal(err) + } + + // not all structTags match, but there's plenty o' headers; expect + // error + mismatchedHeaders := []string{"foo", "qux", "quux", "corgi"} + if err := maybeMissingStructFields(structTags, mismatchedHeaders); err == nil { + t.Fatal("expected an error, but no error found") + } +} + +func Test_maybeDoubleHeaderNames(t *testing.T) { + b := bytes.NewBufferString(`foo,BAR,foo +f,1,baz +e,3,b`) + d := &decoder{in: b} + var samples []Sample + + // *** check maybeDoubleHeaderNames + if err := maybeDoubleHeaderNames([]string{"foo", "BAR", "foo"}); err == nil { + t.Fatal("maybeDoubleHeaderNames did not raise an error when a should have.") + } + + // *** check readTo + if err := readTo(d, &samples); err != nil { + t.Fatal(err) + } + // Double header allowed, value should be of third row + if samples[0].Foo != "baz" { + t.Fatal("Double header allowed, value should be of third row but is not. Function called is readTo.") + } + + b = bytes.NewBufferString(`foo,BAR,foo +f,1,baz +e,3,b`) + d = &decoder{in: b} + ShouldAlignDuplicateHeadersWithStructFieldOrder = true + if err := readTo(d, &samples); err != nil { + t.Fatal(err) + } + // Double header allowed, value should be of first row + if samples[0].Foo != "f" { + t.Fatal("Double header allowed, value should be of first row but is not. Function called is readTo.") + } + + ShouldAlignDuplicateHeadersWithStructFieldOrder = false + // Double header not allowed, should fail + FailIfDoubleHeaderNames = true + if err := readTo(d, &samples); err == nil { + t.Fatal("Double header not allowed but no error raised. Function called is readTo.") + } + + // *** check readEach + FailIfDoubleHeaderNames = false + b = bytes.NewBufferString(`foo,BAR,foo +f,1,baz +e,3,b`) + d = &decoder{in: b} + samples = samples[:0] + c := make(chan Sample) + go func() { + if err := readEach(d, c); err != nil { + t.Fatal(err) + } + }() + for v := range c { + samples = append(samples, v) + } + // Double header allowed, value should be of third row + if samples[0].Foo != "baz" { + t.Fatal("Double header allowed, value should be of third row but is not. Function called is readEach.") + } + // Double header not allowed, should fail + FailIfDoubleHeaderNames = true + b = bytes.NewBufferString(`foo,BAR,foo +f,1,baz +e,3,b`) + d = &decoder{in: b} + c = make(chan Sample) + go func() { + if err := readEach(d, c); err == nil { + t.Fatal("Double header not allowed but no error raised. Function called is readEach.") + } + }() + for v := range c { + samples = append(samples, v) + } +} + +func TestUnmarshalToCallback(t *testing.T) { + b := bytes.NewBufferString(`first,foo,BAR,Baz,last,abc +aa,bb,11,cc,dd,ee +ff,gg,22,hh,ii,jj`) + var samples []SkipFieldSample + if err := UnmarshalBytesToCallback(b.Bytes(), func(s SkipFieldSample) { + samples = append(samples, s) + }); err != nil { + t.Fatal(err) + } + if len(samples) != 2 { + t.Fatalf("expected 2 sample instances, got %d", len(samples)) + } + expected := SkipFieldSample{ + EmbedSample: EmbedSample{ + Qux: "aa", + Sample: Sample{ + Foo: "bb", + Bar: 11, + Baz: "cc", + }, + Quux: "dd", + }, + Corge: "ee", + } + if expected != samples[0] { + t.Fatalf("expected first sample %v, got %v", expected, samples[0]) + } + expected = SkipFieldSample{ + EmbedSample: EmbedSample{ + Qux: "ff", + Sample: Sample{ + Foo: "gg", + Bar: 22, + Baz: "hh", + }, + Quux: "ii", + }, + Corge: "jj", + } + if expected != samples[1] { + t.Fatalf("expected first sample %v, got %v", expected, samples[1]) + } +} + +// TestRenamedTypes tests for unmarshaling functions on redefined basic types. +func TestRenamedTypesUnmarshal(t *testing.T) { + b := bytes.NewBufferString(`foo;bar +1,4;1.5 +2,3;2.4`) + d := &decoder{in: b} + var samples []RenamedSample + + // Set different csv field separator to enable comma in floats + SetCSVReader(func(in io.Reader) CSVReader { + csvin := csv.NewReader(in) + csvin.Comma = ';' + return csvin + }) + // Switch back to default for tests executed after this + defer SetCSVReader(DefaultCSVReader) + + if err := readTo(d, &samples); err != nil { + t.Fatal(err) + } + if samples[0].RenamedFloatUnmarshaler != 1.4 { + t.Fatalf("Parsed float value wrong for renamed float64 type. Expected 1.4, got %v.", samples[0].RenamedFloatUnmarshaler) + } + if samples[0].RenamedFloatDefault != 1.5 { + t.Fatalf("Parsed float value wrong for renamed float64 type without an explicit unmarshaler function. Expected 1.5, got %v.", samples[0].RenamedFloatDefault) + } + + // Test that errors raised by UnmarshalCSV are correctly reported + b = bytes.NewBufferString(`foo;bar +4.2;2.4`) + d = &decoder{in: b} + samples = samples[:0] + if perr, _ := readTo(d, &samples).(*csv.ParseError); perr == nil { + t.Fatalf("Expected ParseError, got nil.") + } else if _, ok := perr.Err.(UnmarshalError); !ok { + t.Fatalf("Expected UnmarshalError, got %v", perr.Err) + } +} + +func (rf *RenamedFloat64Unmarshaler) UnmarshalCSV(csv string) (err error) { + // Purely for testing purposes: Raise error on specific string + if csv == "4.2" { + return UnmarshalError{"Test error: Invalid float 4.2"} + } + + // Convert , to . before parsing to create valid float strings + converted := strings.Replace(csv, ",", ".", -1) + var f float64 + if f, err = strconv.ParseFloat(converted, 64); err != nil { + return err + } + *rf = RenamedFloat64Unmarshaler(f) + return nil +} + +type UnmarshalError struct { + msg string +} + +func (e UnmarshalError) Error() string { + return e.msg +} + +func TestMultipleStructTags(t *testing.T) { + b := bytes.NewBufferString(`foo,BAR,Baz +e,3,b`) + d := &decoder{in: b} + + var samples []MultiTagSample + if err := readTo(d, &samples); err != nil { + t.Fatal(err) + } + if samples[0].Foo != "b" { + t.Fatalf("expected second tag value 'b' in multi tag struct field, got %v", samples[0].Foo) + } + + b = bytes.NewBufferString(`foo,BAR +e,3`) + d = &decoder{in: b} + + if err := readTo(d, &samples); err != nil { + t.Fatal(err) + } + if samples[0].Foo != "e" { + t.Fatalf("wrong value in multi tag struct field, expected 'e', got %v", samples[0].Foo) + } + + b = bytes.NewBufferString(`BAR,Baz +3,b`) + d = &decoder{in: b} + + if err := readTo(d, &samples); err != nil { + t.Fatal(err) + } + if samples[0].Foo != "b" { + t.Fatal("wrong value in multi tag struct field") + } +} + +func TestStructTagSeparator(t *testing.T) { + b := bytes.NewBufferString(`foo,BAR,Baz +e,3,b`) + d := &decoder{in: b} + + defaultTagSeparator := TagSeparator + TagSeparator = "|" + defer func() { + TagSeparator = defaultTagSeparator + }() + + var samples []TagSeparatorSample + if err := readTo(d, &samples); err != nil { + t.Fatal(err) + } + + if samples[0].Foo != "b" { + t.Fatal("expected second tag value in multi tag struct field.") + } +} + +func TestCSVToMap(t *testing.T) { + b := bytes.NewBufferString(`foo,BAR +4,Jose +2,Daniel +5,Vincent`) + m, err := CSVToMap(bytes.NewReader(b.Bytes())) + if err != nil { + t.Fatal(err) + } + if m["4"] != "Jose" { + t.Fatal("Expected Jose got", m["4"]) + } + if m["2"] != "Daniel" { + t.Fatal("Expected Daniel got", m["2"]) + } + if m["5"] != "Vincent" { + t.Fatal("Expected Vincent got", m["5"]) + } + + b = bytes.NewBufferString(`foo,BAR,Baz +e,3,b`) + _, err = CSVToMap(bytes.NewReader(b.Bytes())) + if err == nil { + t.Fatal("Something went wrong") + } + b = bytes.NewBufferString(`foo +e`) + _, err = CSVToMap(bytes.NewReader(b.Bytes())) + if err == nil { + t.Fatal("Something went wrong") + } +} + +func TestCSVToMaps(t *testing.T) { + b := bytes.NewBufferString(`foo,BAR,Baz +4,Jose,42 +2,Daniel,21 +5,Vincent,84`) + m, err := CSVToMaps(bytes.NewReader(b.Bytes())) + if err != nil { + t.Fatal(err) + } + firstRecord := m[0] + if firstRecord["foo"] != "4" { + t.Fatal("Expected 4 got", firstRecord["foo"]) + } + if firstRecord["BAR"] != "Jose" { + t.Fatal("Expected Jose got", firstRecord["BAR"]) + } + if firstRecord["Baz"] != "42" { + t.Fatal("Expected 42 got", firstRecord["Baz"]) + } + secondRecord := m[1] + if secondRecord["foo"] != "2" { + t.Fatal("Expected 2 got", secondRecord["foo"]) + } + if secondRecord["BAR"] != "Daniel" { + t.Fatal("Expected Daniel got", secondRecord["BAR"]) + } + if secondRecord["Baz"] != "21" { + t.Fatal("Expected 21 got", secondRecord["Baz"]) + } + thirdRecord := m[2] + if thirdRecord["foo"] != "5" { + t.Fatal("Expected 5 got", thirdRecord["foo"]) + } + if thirdRecord["BAR"] != "Vincent" { + t.Fatal("Expected Vincent got", thirdRecord["BAR"]) + } + if thirdRecord["Baz"] != "84" { + t.Fatal("Expected 84 got", thirdRecord["Baz"]) + } +} + +type trimDecoder struct { + csvReader CSVReader +} + +func (c *trimDecoder) getCSVRow() ([]string, error) { + recoder, err := c.csvReader.Read() + for i, r := range recoder { + recoder[i] = strings.TrimRight(r, " ") + } + return recoder, err +} + +func TestUnmarshalToDecoder(t *testing.T) { + blah := 0 + sptr := "*string" + sptr2 := "" + b := bytes.NewBufferString(`foo,BAR,Baz,Blah,SPtr +f,1,baz,, *string +e,3,b,, `) + + var samples []Sample + if err := UnmarshalDecoderToCallback(&trimDecoder{LazyCSVReader(b)}, func(s Sample) { + samples = append(samples, s) + }); err != nil { + t.Fatal(err) + } + if len(samples) != 2 { + t.Fatalf("expected 2 sample instances, got %d", len(samples)) + } + + expected := Sample{Foo: "f", Bar: 1, Baz: "baz", Blah: &blah, SPtr: &sptr} + if !reflect.DeepEqual(expected, samples[0]) { + t.Fatalf("expected first sample %v, got %v", expected, samples[0]) + } + + expected = Sample{Foo: "e", Bar: 3, Baz: "b", Blah: &blah, SPtr: &sptr2} + if !reflect.DeepEqual(expected, samples[1]) { + t.Fatalf("expected second sample %v, got %v", expected, samples[1]) + } +} + +func TestUnmarshalWithoutHeader(t *testing.T) { + blah := 0 + sptr := "" + b := bytes.NewBufferString(`f,1,baz,1.66,,, +e,3,b,,,,`) + d := &decoder{in: b} + + var samples []Sample + if err := readToWithoutHeaders(d, &samples); err != nil { + t.Fatal(err) + } + + expected := Sample{Foo: "f", Bar: 1, Baz: "baz", Frop: 1.66, Blah: &blah, SPtr: &sptr} + if !reflect.DeepEqual(expected, samples[0]) { + t.Fatalf("expected first sample %v, got %v", expected, samples[0]) + } + + expected = Sample{Foo: "e", Bar: 3, Baz: "b", Frop: 0, Blah: &blah, SPtr: &sptr} + if !reflect.DeepEqual(expected, samples[1]) { + t.Fatalf("expected second sample %v, got %v", expected, samples[1]) + } +} diff --git a/vendor/github.com/gocarina/gocsv/encode.go b/vendor/github.com/gocarina/gocsv/encode.go new file mode 100644 index 000000000000..7a41f9fcb6a7 --- /dev/null +++ b/vendor/github.com/gocarina/gocsv/encode.go @@ -0,0 +1,139 @@ +package gocsv + +import ( + "fmt" + "io" + "reflect" +) + +type encoder struct { + out io.Writer +} + +func newEncoder(out io.Writer) *encoder { + return &encoder{out} +} + +func writeFromChan(writer *SafeCSVWriter, c <-chan interface{}) error { + // Get the first value. It wil determine the header structure. + firstValue, ok := <-c + if !ok { + return fmt.Errorf("channel is closed") + } + inValue, inType := getConcreteReflectValueAndType(firstValue) // Get the concrete type + if err := ensureStructOrPtr(inType); err != nil { + return err + } + inInnerWasPointer := inType.Kind() == reflect.Ptr + inInnerStructInfo := getStructInfo(inType) // Get the inner struct info to get CSV annotations + csvHeadersLabels := make([]string, len(inInnerStructInfo.Fields)) + for i, fieldInfo := range inInnerStructInfo.Fields { // Used to write the header (first line) in CSV + csvHeadersLabels[i] = fieldInfo.getFirstKey() + } + if err := writer.Write(csvHeadersLabels); err != nil { + return err + } + write := func(val reflect.Value) error { + for j, fieldInfo := range inInnerStructInfo.Fields { + csvHeadersLabels[j] = "" + inInnerFieldValue, err := getInnerField(val, inInnerWasPointer, fieldInfo.IndexChain) // Get the correct field header <-> position + if err != nil { + return err + } + csvHeadersLabels[j] = inInnerFieldValue + } + if err := writer.Write(csvHeadersLabels); err != nil { + return err + } + return nil + } + if err := write(inValue); err != nil { + return err + } + for v := range c { + val, _ := getConcreteReflectValueAndType(v) // Get the concrete type (not pointer) (Slice or Array) + if err := ensureStructOrPtr(inType); err != nil { + return err + } + if err := write(val); err != nil { + return err + } + } + writer.Flush() + return writer.Error() +} + +func writeTo(writer *SafeCSVWriter, in interface{}, omitHeaders bool) error { + inValue, inType := getConcreteReflectValueAndType(in) // Get the concrete type (not pointer) (Slice or Array) + if err := ensureInType(inType); err != nil { + return err + } + inInnerWasPointer, inInnerType := getConcreteContainerInnerType(inType) // Get the concrete inner type (not pointer) (Container<"?">) + if err := ensureInInnerType(inInnerType); err != nil { + return err + } + inInnerStructInfo := getStructInfo(inInnerType) // Get the inner struct info to get CSV annotations + csvHeadersLabels := make([]string, len(inInnerStructInfo.Fields)) + for i, fieldInfo := range inInnerStructInfo.Fields { // Used to write the header (first line) in CSV + csvHeadersLabels[i] = fieldInfo.getFirstKey() + } + if !omitHeaders { + if err := writer.Write(csvHeadersLabels); err != nil { + return err + } + } + inLen := inValue.Len() + for i := 0; i < inLen; i++ { // Iterate over container rows + for j, fieldInfo := range inInnerStructInfo.Fields { + csvHeadersLabels[j] = "" + inInnerFieldValue, err := getInnerField(inValue.Index(i), inInnerWasPointer, fieldInfo.IndexChain) // Get the correct field header <-> position + if err != nil { + return err + } + csvHeadersLabels[j] = inInnerFieldValue + } + if err := writer.Write(csvHeadersLabels); err != nil { + return err + } + } + writer.Flush() + return writer.Error() +} + +func ensureStructOrPtr(t reflect.Type) error { + switch t.Kind() { + case reflect.Struct: + fallthrough + case reflect.Ptr: + return nil + } + return fmt.Errorf("cannot use " + t.String() + ", only slice or array supported") +} + +// Check if the inType is an array or a slice +func ensureInType(outType reflect.Type) error { + switch outType.Kind() { + case reflect.Slice: + fallthrough + case reflect.Array: + return nil + } + return fmt.Errorf("cannot use " + outType.String() + ", only slice or array supported") +} + +// Check if the inInnerType is of type struct +func ensureInInnerType(outInnerType reflect.Type) error { + switch outInnerType.Kind() { + case reflect.Struct: + return nil + } + return fmt.Errorf("cannot use " + outInnerType.String() + ", only struct supported") +} + +func getInnerField(outInner reflect.Value, outInnerWasPointer bool, index []int) (string, error) { + oi := outInner + if outInnerWasPointer { + oi = outInner.Elem() + } + return getFieldAsString(oi.FieldByIndex(index)) +} diff --git a/vendor/github.com/gocarina/gocsv/encode_test.go b/vendor/github.com/gocarina/gocsv/encode_test.go new file mode 100644 index 000000000000..85e807b4c2e1 --- /dev/null +++ b/vendor/github.com/gocarina/gocsv/encode_test.go @@ -0,0 +1,295 @@ +package gocsv + +import ( + "bytes" + "encoding/csv" + "io" + "math" + "strconv" + "strings" + "testing" + "time" +) + +func assertLine(t *testing.T, expected, actual []string) { + if len(expected) != len(actual) { + t.Fatalf("line length mismatch between expected: %d and actual: %d", len(expected), len(actual)) + } + for i := range expected { + if expected[i] != actual[i] { + t.Fatalf("mismatch on field %d at line `%s`: %s != %s", i, expected, expected[i], actual[i]) + } + } +} + +func Test_writeTo(t *testing.T) { + b := bytes.Buffer{} + e := &encoder{out: &b} + blah := 2 + sptr := "*string" + s := []Sample{ + {Foo: "f", Bar: 1, Baz: "baz", Frop: 0.1, Blah: &blah, SPtr: &sptr}, + {Foo: "e", Bar: 3, Baz: "b", Frop: 6.0 / 13, Blah: nil, SPtr: nil}, + } + if err := writeTo(NewSafeCSVWriter(csv.NewWriter(e.out)), s, false); err != nil { + t.Fatal(err) + } + + lines, err := csv.NewReader(&b).ReadAll() + if err != nil { + t.Fatal(err) + } + if len(lines) != 3 { + t.Fatalf("expected 3 lines, got %d", len(lines)) + } + assertLine(t, []string{"foo", "BAR", "Baz", "Quux", "Blah", "SPtr", "Omit"}, lines[0]) + assertLine(t, []string{"f", "1", "baz", "0.1", "2", "*string", ""}, lines[1]) + assertLine(t, []string{"e", "3", "b", "0.46153846153846156", "", "", ""}, lines[2]) +} + +func Test_writeTo_Time(t *testing.T) { + b := bytes.Buffer{} + e := &encoder{out: &b} + d := time.Unix(60, 0) + s := []DateTime{ + {Foo: d}, + } + if err := writeTo(NewSafeCSVWriter(csv.NewWriter(e.out)), s, true); err != nil { + t.Fatal(err) + } + + lines, err := csv.NewReader(&b).ReadAll() + if err != nil { + t.Fatal(err) + } + + ft := time.Now() + ft.UnmarshalText([]byte(lines[0][0])) + if err != nil { + t.Fatal(err) + } + if ft.Sub(d) != 0 { + t.Fatalf("Dates doesn't match: %s and actual: %s", d, d) + } + + m, _ := d.MarshalText() + assertLine(t, []string{string(m)}, lines[0]) +} + +func Test_writeTo_NoHeaders(t *testing.T) { + b := bytes.Buffer{} + e := &encoder{out: &b} + blah := 2 + sptr := "*string" + s := []Sample{ + {Foo: "f", Bar: 1, Baz: "baz", Frop: 0.1, Blah: &blah, SPtr: &sptr}, + {Foo: "e", Bar: 3, Baz: "b", Frop: 6.0 / 13, Blah: nil, SPtr: nil}, + } + if err := writeTo(NewSafeCSVWriter(csv.NewWriter(e.out)), s, true); err != nil { + t.Fatal(err) + } + + lines, err := csv.NewReader(&b).ReadAll() + if err != nil { + t.Fatal(err) + } + if len(lines) != 2 { + t.Fatalf("expected 2 lines, got %d", len(lines)) + } + assertLine(t, []string{"f", "1", "baz", "0.1", "2", "*string", ""}, lines[0]) + assertLine(t, []string{"e", "3", "b", "0.46153846153846156", "", "", ""}, lines[1]) +} + +func Test_writeTo_multipleTags(t *testing.T) { + b := bytes.Buffer{} + e := &encoder{out: &b} + s := []MultiTagSample{ + {Foo: "abc", Bar: 123}, + {Foo: "def", Bar: 234}, + } + if err := writeTo(NewSafeCSVWriter(csv.NewWriter(e.out)), s, false); err != nil { + t.Fatal(err) + } + + lines, err := csv.NewReader(&b).ReadAll() + if err != nil { + t.Fatal(err) + } + if len(lines) != 3 { + t.Fatalf("expected 3 lines, got %d", len(lines)) + } + // the first tag for each field is the encoding CSV header + assertLine(t, []string{"Baz", "BAR"}, lines[0]) + assertLine(t, []string{"abc", "123"}, lines[1]) + assertLine(t, []string{"def", "234"}, lines[2]) +} + +func Test_writeTo_embed(t *testing.T) { + b := bytes.Buffer{} + e := &encoder{out: &b} + blah := 2 + sptr := "*string" + s := []EmbedSample{ + { + Qux: "aaa", + Sample: Sample{Foo: "f", Bar: 1, Baz: "baz", Frop: 0.2, Blah: &blah, SPtr: &sptr}, + Ignore: "shouldn't be marshalled", + Quux: "zzz", + Grault: math.Pi, + }, + } + if err := writeTo(NewSafeCSVWriter(csv.NewWriter(e.out)), s, false); err != nil { + t.Fatal(err) + } + + lines, err := csv.NewReader(&b).ReadAll() + if err != nil { + t.Fatal(err) + } + if len(lines) != 2 { + t.Fatalf("expected 2 lines, got %d", len(lines)) + } + assertLine(t, []string{"first", "foo", "BAR", "Baz", "Quux", "Blah", "SPtr", "Omit", "garply", "last"}, lines[0]) + assertLine(t, []string{"aaa", "f", "1", "baz", "0.2", "2", "*string", "", "3.141592653589793", "zzz"}, lines[1]) +} + +func Test_writeTo_complex_embed(t *testing.T) { + b := bytes.Buffer{} + e := &encoder{out: &b} + sptr := "*string" + sfs := []SkipFieldSample{ + { + EmbedSample: EmbedSample{ + Qux: "aaa", + Sample: Sample{ + Foo: "bbb", + Bar: 111, + Baz: "ddd", + Frop: 1.2e22, + Blah: nil, + SPtr: &sptr, + }, + Ignore: "eee", + Grault: 0.1, + Quux: "fff", + }, + MoreIgnore: "ggg", + Corge: "hhh", + }, + } + if err := writeTo(NewSafeCSVWriter(csv.NewWriter(e.out)), sfs, false); err != nil { + t.Fatal(err) + } + lines, err := csv.NewReader(&b).ReadAll() + if err != nil { + t.Fatal(err) + } + if len(lines) != 2 { + t.Fatalf("expected 2 lines, got %d", len(lines)) + } + assertLine(t, []string{"first", "foo", "BAR", "Baz", "Quux", "Blah", "SPtr", "Omit", "garply", "last", "abc"}, lines[0]) + assertLine(t, []string{"aaa", "bbb", "111", "ddd", "12000000000000000000000", "", "*string", "", "0.1", "fff", "hhh"}, lines[1]) +} + +func Test_writeToChan(t *testing.T) { + b := bytes.Buffer{} + e := &encoder{out: &b} + c := make(chan interface{}) + sptr := "*string" + go func() { + for i := 0; i < 100; i++ { + v := Sample{Foo: "f", Bar: i, Baz: "baz" + strconv.Itoa(i), Frop: float64(i), Blah: nil, SPtr: &sptr} + c <- v + } + close(c) + }() + if err := MarshalChan(c, NewSafeCSVWriter(csv.NewWriter(e.out))); err != nil { + t.Fatal(err) + } + lines, err := csv.NewReader(&b).ReadAll() + if err != nil { + t.Fatal(err) + } + if len(lines) != 101 { + t.Fatalf("expected 100 lines, got %d", len(lines)) + } + for i, l := range lines { + if i == 0 { + assertLine(t, []string{"foo", "BAR", "Baz", "Quux", "Blah", "SPtr", "Omit"}, l) + continue + } + assertLine(t, []string{"f", strconv.Itoa(i - 1), "baz" + strconv.Itoa(i-1), strconv.FormatFloat(float64(i-1), 'f', -1, 64), "", "*string", ""}, l) + } +} + +// TestRenamedTypes tests for marshaling functions on redefined basic types. +func TestRenamedTypesMarshal(t *testing.T) { + samples := []RenamedSample{ + {RenamedFloatUnmarshaler: 1.4, RenamedFloatDefault: 1.5}, + {RenamedFloatUnmarshaler: 2.3, RenamedFloatDefault: 2.4}, + } + + SetCSVWriter(func(out io.Writer) *SafeCSVWriter { + csvout := NewSafeCSVWriter(csv.NewWriter(out)) + csvout.Comma = ';' + return csvout + }) + // Switch back to default for tests executed after this + defer SetCSVWriter(DefaultCSVWriter) + + csvContent, err := MarshalString(&samples) + if err != nil { + t.Fatal(err) + } + if csvContent != "foo;bar\n1,4;1.5\n2,3;2.4\n" { + t.Fatalf("Error marshaling floats with , as separator. Expected \nfoo;bar\n1,4;1.5\n2,3;2.4\ngot:\n%v", csvContent) + } + + // Test that errors raised by MarshalCSV are correctly reported + samples = []RenamedSample{ + {RenamedFloatUnmarshaler: 4.2, RenamedFloatDefault: 1.5}, + } + _, err = MarshalString(&samples) + if _, ok := err.(MarshalError); !ok { + t.Fatalf("Expected UnmarshalError, got %v", err) + } +} + +// TestCustomTagSeparatorMarshal tests for custom tag separator in marshalling. +func TestCustomTagSeparatorMarshal(t *testing.T) { + samples := []RenamedSample{ + {RenamedFloatUnmarshaler: 1.4, RenamedFloatDefault: 1.5}, + {RenamedFloatUnmarshaler: 2.3, RenamedFloatDefault: 2.4}, + } + + TagSeparator = " | " + // Switch back to default TagSeparator after this + defer func() { + TagSeparator = "," + }() + + csvContent, err := MarshalString(&samples) + if err != nil { + t.Fatal(err) + } + if csvContent != "foo|bar\n1,4|1.5\n2,3|2.4\n" { + t.Fatalf("Error marshaling floats with , as separator. Expected \nfoo|bar\n1,4|1.5\n2,3|2.4\ngot:\n%v", csvContent) + } +} + +func (rf *RenamedFloat64Unmarshaler) MarshalCSV() (csv string, err error) { + if *rf == RenamedFloat64Unmarshaler(4.2) { + return "", MarshalError{"Test error: Invalid float 4.2"} + } + csv = strconv.FormatFloat(float64(*rf), 'f', 1, 64) + csv = strings.Replace(csv, ".", ",", -1) + return csv, nil +} + +type MarshalError struct { + msg string +} + +func (e MarshalError) Error() string { + return e.msg +} diff --git a/vendor/github.com/gocarina/gocsv/reflect.go b/vendor/github.com/gocarina/gocsv/reflect.go new file mode 100644 index 000000000000..9217e30b9106 --- /dev/null +++ b/vendor/github.com/gocarina/gocsv/reflect.go @@ -0,0 +1,107 @@ +package gocsv + +import ( + "reflect" + "strings" + "sync" +) + +// -------------------------------------------------------------------------- +// Reflection helpers + +type structInfo struct { + Fields []fieldInfo +} + +// fieldInfo is a struct field that should be mapped to a CSV column, or vice-versa +// Each IndexChain element before the last is the index of an the embedded struct field +// that defines Key as a tag +type fieldInfo struct { + keys []string + omitEmpty bool + IndexChain []int +} + +func (f fieldInfo) getFirstKey() string { + return f.keys[0] +} + +func (f fieldInfo) matchesKey(key string) bool { + for _, k := range f.keys { + if key == k || strings.TrimSpace(key) == k { + return true + } + } + return false +} + +var structMap = make(map[reflect.Type]*structInfo) +var structMapMutex sync.RWMutex + +func getStructInfo(rType reflect.Type) *structInfo { + structMapMutex.RLock() + stInfo, ok := structMap[rType] + structMapMutex.RUnlock() + if ok { + return stInfo + } + fieldsList := getFieldInfos(rType, []int{}) + stInfo = &structInfo{fieldsList} + return stInfo +} + +func getFieldInfos(rType reflect.Type, parentIndexChain []int) []fieldInfo { + fieldsCount := rType.NumField() + fieldsList := make([]fieldInfo, 0, fieldsCount) + for i := 0; i < fieldsCount; i++ { + field := rType.Field(i) + if field.PkgPath != "" { + continue + } + indexChain := append(parentIndexChain, i) + // if the field is an embedded struct, create a fieldInfo for each of its fields + if field.Anonymous && field.Type.Kind() == reflect.Struct { + fieldsList = append(fieldsList, getFieldInfos(field.Type, indexChain)...) + continue + } + fieldInfo := fieldInfo{IndexChain: indexChain} + fieldTag := field.Tag.Get("csv") + fieldTags := strings.Split(fieldTag, TagSeparator) + filteredTags := []string{} + for _, fieldTagEntry := range fieldTags { + if fieldTagEntry != "omitempty" { + filteredTags = append(filteredTags, fieldTagEntry) + } else { + fieldInfo.omitEmpty = true + } + } + + if len(filteredTags) == 1 && filteredTags[0] == "-" { + continue + } else if len(filteredTags) > 0 && filteredTags[0] != "" { + fieldInfo.keys = filteredTags + } else { + fieldInfo.keys = []string{field.Name} + } + fieldsList = append(fieldsList, fieldInfo) + } + return fieldsList +} + +func getConcreteContainerInnerType(in reflect.Type) (inInnerWasPointer bool, inInnerType reflect.Type) { + inInnerType = in.Elem() + inInnerWasPointer = false + if inInnerType.Kind() == reflect.Ptr { + inInnerWasPointer = true + inInnerType = inInnerType.Elem() + } + return inInnerWasPointer, inInnerType +} + +func getConcreteReflectValueAndType(in interface{}) (reflect.Value, reflect.Type) { + value := reflect.ValueOf(in) + if value.Kind() == reflect.Ptr { + value = value.Elem() + } + return value, value.Type() +} diff --git a/vendor/github.com/gocarina/gocsv/safe_csv.go b/vendor/github.com/gocarina/gocsv/safe_csv.go new file mode 100644 index 000000000000..4b2882f1c425 --- /dev/null +++ b/vendor/github.com/gocarina/gocsv/safe_csv.go @@ -0,0 +1,32 @@ +package gocsv + +//Wraps around SafeCSVWriter and makes it thread safe. +import ( + "encoding/csv" + "sync" +) + +type SafeCSVWriter struct { + *csv.Writer + m sync.Mutex +} + +func NewSafeCSVWriter(original *csv.Writer) *SafeCSVWriter { + return &SafeCSVWriter{ + Writer: original, + } +} + +//Override write +func (w *SafeCSVWriter) Write(row []string) error { + w.m.Lock() + defer w.m.Unlock() + return w.Writer.Write(row) +} + +//Override flush +func (w *SafeCSVWriter) Flush() { + w.m.Lock() + w.Writer.Flush() + w.m.Unlock() +} diff --git a/vendor/github.com/gocarina/gocsv/sample_structs_test.go b/vendor/github.com/gocarina/gocsv/sample_structs_test.go new file mode 100644 index 000000000000..e5e1b09dfb33 --- /dev/null +++ b/vendor/github.com/gocarina/gocsv/sample_structs_test.go @@ -0,0 +1,50 @@ +package gocsv + +import "time" + +type Sample struct { + Foo string `csv:"foo"` + Bar int `csv:"BAR"` + Baz string `csv:"Baz"` + Frop float64 `csv:"Quux"` + Blah *int `csv:"Blah"` + SPtr *string `csv:"SPtr"` + Omit *string `csv:"Omit,omitempty"` +} + +type EmbedSample struct { + Qux string `csv:"first"` + Sample + Ignore string `csv:"-"` + Grault float64 `csv:"garply"` + Quux string `csv:"last"` +} + +type SkipFieldSample struct { + EmbedSample + MoreIgnore string `csv:"-"` + Corge string `csv:"abc"` +} + +// Testtype for unmarshal/marshal functions on renamed basic types +type RenamedFloat64Unmarshaler float64 +type RenamedFloat64Default float64 + +type RenamedSample struct { + RenamedFloatUnmarshaler RenamedFloat64Unmarshaler `csv:"foo"` + RenamedFloatDefault RenamedFloat64Default `csv:"bar"` +} + +type MultiTagSample struct { + Foo string `csv:"Baz,foo"` + Bar int `csv:"BAR"` +} + +type TagSeparatorSample struct { + Foo string `csv:"Baz|foo"` + Bar int `csv:"BAR"` +} + +type DateTime struct { + Foo time.Time `csv:"Foo"` +} diff --git a/vendor/github.com/gocarina/gocsv/types.go b/vendor/github.com/gocarina/gocsv/types.go new file mode 100644 index 000000000000..50d88ce1849d --- /dev/null +++ b/vendor/github.com/gocarina/gocsv/types.go @@ -0,0 +1,456 @@ +package gocsv + +import ( + "encoding" + "fmt" + "reflect" + "strconv" + "strings" +) + +// -------------------------------------------------------------------------- +// Conversion interfaces + +// TypeMarshaller is implemented by any value that has a MarshalCSV method +// This converter is used to convert the value to it string representation +type TypeMarshaller interface { + MarshalCSV() (string, error) +} + +// Stringer is implemented by any value that has a String method +// This converter is used to convert the value to it string representation +// This converter will be used if your value does not implement TypeMarshaller +type Stringer interface { + String() string +} + +// TypeUnmarshaller is implemented by any value that has an UnmarshalCSV method +// This converter is used to convert a string to your value representation of that string +type TypeUnmarshaller interface { + UnmarshalCSV(string) error +} + +// NoUnmarshalFuncError is the custom error type to be raised in case there is no unmarshal function defined on type +type NoUnmarshalFuncError struct { + msg string +} + +func (e NoUnmarshalFuncError) Error() string { + return e.msg +} + +// NoMarshalFuncError is the custom error type to be raised in case there is no marshal function defined on type +type NoMarshalFuncError struct { + msg string +} + +func (e NoMarshalFuncError) Error() string { + return e.msg +} + +var ( + stringerType = reflect.TypeOf((*Stringer)(nil)).Elem() + marshallerType = reflect.TypeOf((*TypeMarshaller)(nil)).Elem() + unMarshallerType = reflect.TypeOf((*TypeUnmarshaller)(nil)).Elem() + textMarshalerType = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem() + textUnMarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem() +) + +// -------------------------------------------------------------------------- +// Conversion helpers + +func toString(in interface{}) (string, error) { + inValue := reflect.ValueOf(in) + + switch inValue.Kind() { + case reflect.String: + return inValue.String(), nil + case reflect.Bool: + b := inValue.Bool() + if b { + return "true", nil + } + return "false", nil + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return fmt.Sprintf("%v", inValue.Int()), nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return fmt.Sprintf("%v", inValue.Uint()), nil + case reflect.Float32: + return strconv.FormatFloat(inValue.Float(), byte('f'), -1, 32), nil + case reflect.Float64: + return strconv.FormatFloat(inValue.Float(), byte('f'), -1, 64), nil + } + return "", fmt.Errorf("No known conversion from " + inValue.Type().String() + " to string") +} + +func toBool(in interface{}) (bool, error) { + inValue := reflect.ValueOf(in) + + switch inValue.Kind() { + case reflect.String: + s := inValue.String() + switch s { + case "yes": + return true, nil + case "no", "": + return false, nil + default: + return strconv.ParseBool(s) + } + case reflect.Bool: + return inValue.Bool(), nil + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + i := inValue.Int() + if i != 0 { + return true, nil + } + return false, nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + i := inValue.Uint() + if i != 0 { + return true, nil + } + return false, nil + case reflect.Float32, reflect.Float64: + f := inValue.Float() + if f != 0 { + return true, nil + } + return false, nil + } + return false, fmt.Errorf("No known conversion from " + inValue.Type().String() + " to bool") +} + +func toInt(in interface{}) (int64, error) { + inValue := reflect.ValueOf(in) + + switch inValue.Kind() { + case reflect.String: + s := strings.TrimSpace(inValue.String()) + if s == "" { + return 0, nil + } + return strconv.ParseInt(s, 0, 64) + case reflect.Bool: + if inValue.Bool() { + return 1, nil + } + return 0, nil + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return inValue.Int(), nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return int64(inValue.Uint()), nil + case reflect.Float32, reflect.Float64: + return int64(inValue.Float()), nil + } + return 0, fmt.Errorf("No known conversion from " + inValue.Type().String() + " to int") +} + +func toUint(in interface{}) (uint64, error) { + inValue := reflect.ValueOf(in) + + switch inValue.Kind() { + case reflect.String: + s := strings.TrimSpace(inValue.String()) + if s == "" { + return 0, nil + } + + // support the float input + if strings.Contains(s, ".") { + f, err := strconv.ParseFloat(s, 64) + if err != nil { + return 0, err + } + return uint64(f), nil + } + return strconv.ParseUint(s, 0, 64) + case reflect.Bool: + if inValue.Bool() { + return 1, nil + } + return 0, nil + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return uint64(inValue.Int()), nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return inValue.Uint(), nil + case reflect.Float32, reflect.Float64: + return uint64(inValue.Float()), nil + } + return 0, fmt.Errorf("No known conversion from " + inValue.Type().String() + " to uint") +} + +func toFloat(in interface{}) (float64, error) { + inValue := reflect.ValueOf(in) + + switch inValue.Kind() { + case reflect.String: + s := strings.TrimSpace(inValue.String()) + if s == "" { + return 0, nil + } + return strconv.ParseFloat(s, 64) + case reflect.Bool: + if inValue.Bool() { + return 1, nil + } + return 0, nil + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return float64(inValue.Int()), nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return float64(inValue.Uint()), nil + case reflect.Float32, reflect.Float64: + return inValue.Float(), nil + } + return 0, fmt.Errorf("No known conversion from " + inValue.Type().String() + " to float") +} + +func setField(field reflect.Value, value string, omitEmpty bool) error { + if field.Kind() == reflect.Ptr { + if omitEmpty && value == "" { + return nil + } + if field.IsNil() { + field.Set(reflect.New(field.Type().Elem())) + } + field = field.Elem() + } + + switch field.Interface().(type) { + case string: + s, err := toString(value) + if err != nil { + return err + } + field.SetString(s) + case bool: + b, err := toBool(value) + if err != nil { + return err + } + field.SetBool(b) + case int, int8, int16, int32, int64: + i, err := toInt(value) + if err != nil { + return err + } + field.SetInt(i) + case uint, uint8, uint16, uint32, uint64: + ui, err := toUint(value) + if err != nil { + return err + } + field.SetUint(ui) + case float32, float64: + f, err := toFloat(value) + if err != nil { + return err + } + field.SetFloat(f) + default: + // Not a native type, check for unmarshal method + if err := unmarshall(field, value); err != nil { + if _, ok := err.(NoUnmarshalFuncError); !ok { + return err + } + // Could not unmarshal, check for kind, e.g. renamed type from basic type + switch field.Kind() { + case reflect.String: + s, err := toString(value) + if err != nil { + return err + } + field.SetString(s) + case reflect.Bool: + b, err := toBool(value) + if err != nil { + return err + } + field.SetBool(b) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + i, err := toInt(value) + if err != nil { + return err + } + field.SetInt(i) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + ui, err := toUint(value) + if err != nil { + return err + } + field.SetUint(ui) + case reflect.Float32, reflect.Float64: + f, err := toFloat(value) + if err != nil { + return err + } + field.SetFloat(f) + default: + return err + } + } else { + return nil + } + } + return nil +} + +func getFieldAsString(field reflect.Value) (str string, err error) { + switch field.Kind() { + case reflect.Interface: + case reflect.Ptr: + if field.IsNil() { + return "", nil + } + return getFieldAsString(field.Elem()) + default: + // Check if field is go native type + switch field.Interface().(type) { + case string: + return field.String(), nil + case bool: + str, err = toString(field.Bool()) + if err != nil { + return str, err + } + case int, int8, int16, int32, int64: + str, err = toString(field.Int()) + if err != nil { + return str, err + } + case uint, uint8, uint16, uint32, uint64: + str, err = toString(field.Uint()) + if err != nil { + return str, err + } + case float32: + str, err = toString(float32(field.Float())) + if err != nil { + return str, err + } + case float64: + str, err = toString(field.Float()) + if err != nil { + return str, err + } + default: + // Not a native type, check for marshal method + str, err = marshall(field) + if err != nil { + if _, ok := err.(NoMarshalFuncError); !ok { + return str, err + } + // If not marshal method, is field compatible with/renamed from native type + switch field.Kind() { + case reflect.String: + return field.String(), nil + case reflect.Bool: + str, err = toString(field.Bool()) + if err != nil { + return str, err + } + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + str, err = toString(field.Int()) + if err != nil { + return str, err + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + str, err = toString(field.Uint()) + if err != nil { + return str, err + } + case reflect.Float32: + str, err = toString(float32(field.Float())) + if err != nil { + return str, err + } + case reflect.Float64: + str, err = toString(field.Float()) + if err != nil { + return str, err + } + } + } else { + return str, nil + } + } + } + return str, nil +} + +// -------------------------------------------------------------------------- +// Un/serializations helpers + +func unmarshall(field reflect.Value, value string) error { + dupField := field + unMarshallIt := func(finalField reflect.Value) error { + if finalField.CanInterface() { + fieldIface := finalField.Interface() + + fieldTypeUnmarshaller, ok := fieldIface.(TypeUnmarshaller) + if ok { + return fieldTypeUnmarshaller.UnmarshalCSV(value) + } + + // Otherwise try to use TextUnmarshaler + fieldTextUnmarshaler, ok := fieldIface.(encoding.TextUnmarshaler) + if ok { + return fieldTextUnmarshaler.UnmarshalText([]byte(value)) + } + } + + return NoUnmarshalFuncError{"No known conversion from string to " + field.Type().String() + ", " + field.Type().String() + " does not implement TypeUnmarshaller"} + } + for dupField.Kind() == reflect.Interface || dupField.Kind() == reflect.Ptr { + if dupField.IsNil() { + dupField = reflect.New(field.Type().Elem()) + field.Set(dupField) + return unMarshallIt(dupField) + } + dupField = dupField.Elem() + } + if dupField.CanAddr() { + return unMarshallIt(dupField.Addr()) + } + return NoUnmarshalFuncError{"No known conversion from string to " + field.Type().String() + ", " + field.Type().String() + " does not implement TypeUnmarshaller"} +} + +func marshall(field reflect.Value) (value string, err error) { + dupField := field + marshallIt := func(finalField reflect.Value) (string, error) { + if finalField.CanInterface() { + fieldIface := finalField.Interface() + + // Use TypeMarshaller when possible + fieldTypeMarhaller, ok := fieldIface.(TypeMarshaller) + if ok { + return fieldTypeMarhaller.MarshalCSV() + } + + // Otherwise try to use TextMarshaller + fieldTextMarshaler, ok := fieldIface.(encoding.TextMarshaler) + if ok { + text, err := fieldTextMarshaler.MarshalText() + return string(text), err + } + + // Otherwise try to use Stringer + fieldStringer, ok := fieldIface.(Stringer) + if ok { + return fieldStringer.String(), nil + } + } + + return value, NoMarshalFuncError{"No known conversion from " + field.Type().String() + " to string, " + field.Type().String() + " does not implement TypeMarshaller nor Stringer"} + } + for dupField.Kind() == reflect.Interface || dupField.Kind() == reflect.Ptr { + if dupField.IsNil() { + return value, nil + } + dupField = dupField.Elem() + } + if dupField.CanAddr() { + return marshallIt(dupField.Addr()) + } + return value, NoMarshalFuncError{"No known conversion from " + field.Type().String() + " to string, " + field.Type().String() + " does not implement TypeMarshaller nor Stringer"} +} diff --git a/vendor/github.com/gocarina/gocsv/types_test.go b/vendor/github.com/gocarina/gocsv/types_test.go new file mode 100644 index 000000000000..5828cda16bdf --- /dev/null +++ b/vendor/github.com/gocarina/gocsv/types_test.go @@ -0,0 +1,91 @@ +package gocsv + +import ( + "reflect" + "testing" +) + +type sampleTypeUnmarshaller struct { + val string +} + +func (s *sampleTypeUnmarshaller) UnmarshalCSV(val string) error { + s.val = val + return nil +} + +func (s sampleTypeUnmarshaller) MarshalCSV() (string, error) { + return s.val, nil +} + +type sampleTextUnmarshaller struct { + val []byte +} + +func (s *sampleTextUnmarshaller) UnmarshalText(text []byte) error { + s.val = text + return nil +} + +func (s sampleTextUnmarshaller) MarshalText() ([]byte, error) { + return s.val, nil +} + +type sampleStringer string + +func (s sampleStringer) String() string { + return string(s) +} + +func Benchmark_unmarshall_TypeUnmarshaller(b *testing.B) { + sample := sampleTypeUnmarshaller{} + val := reflect.ValueOf(&sample) + for n := 0; n < b.N; n++ { + if err := unmarshall(val, "foo"); err != nil { + b.Fatalf("unmarshall error: %s", err.Error()) + } + } +} + +func Benchmark_unmarshall_TextUnmarshaller(b *testing.B) { + sample := sampleTextUnmarshaller{} + val := reflect.ValueOf(&sample) + for n := 0; n < b.N; n++ { + if err := unmarshall(val, "foo"); err != nil { + b.Fatalf("unmarshall error: %s", err.Error()) + } + } +} + +func Benchmark_marshall_TypeMarshaller(b *testing.B) { + sample := sampleTypeUnmarshaller{"foo"} + val := reflect.ValueOf(&sample) + for n := 0; n < b.N; n++ { + _, err := marshall(val) + if err != nil { + b.Fatalf("marshall error: %s", err.Error()) + } + } +} + +func Benchmark_marshall_TextMarshaller(b *testing.B) { + sample := sampleTextUnmarshaller{[]byte("foo")} + val := reflect.ValueOf(&sample) + for n := 0; n < b.N; n++ { + _, err := marshall(val) + if err != nil { + b.Fatalf("marshall error: %s", err.Error()) + } + } +} + +func Benchmark_marshall_Stringer(b *testing.B) { + sample := sampleStringer("foo") + val := reflect.ValueOf(&sample) + for n := 0; n < b.N; n++ { + _, err := marshall(val) + if err != nil { + b.Fatalf("marshall error: %s", err.Error()) + } + } +} diff --git a/vendor/github.com/gocarina/gocsv/unmarshaller.go b/vendor/github.com/gocarina/gocsv/unmarshaller.go new file mode 100644 index 000000000000..17837ea4688d --- /dev/null +++ b/vendor/github.com/gocarina/gocsv/unmarshaller.go @@ -0,0 +1,115 @@ +package gocsv + +import ( + "encoding/csv" + "errors" + "fmt" + "reflect" +) + +// Unmarshaller is a CSV to struct unmarshaller. +type Unmarshaller struct { + reader *csv.Reader + headerMap map[int]string + fieldInfoMap map[int]*fieldInfo + MismatchedHeaders []string + MismatchedStructFields []string + outType reflect.Type +} + +// NewUnmarshaller creates an unmarshaller from a csv.Reader and a struct. +func NewUnmarshaller(reader *csv.Reader, out interface{}) (*Unmarshaller, error) { + headers, err := reader.Read() + if err != nil { + return nil, err + } + + um := &Unmarshaller{reader: reader, outType: reflect.TypeOf(out)} + err = validate(um, out, headers) + if err != nil { + return nil, err + } + return um, nil +} + +// Read returns an interface{} whose runtime type is the same as the struct that +// was used to create the Unmarshaller. +func (um *Unmarshaller) Read() (interface{}, error) { + row, err := um.reader.Read() + if err != nil { + return nil, err + } + return um.unmarshalRow(row, nil) +} + +// The same as Read(), but returns a map of the columns that didn't match a field in the struct +func (um *Unmarshaller) ReadUnmatched() (interface{}, map[string]string, error) { + row, err := um.reader.Read() + if err != nil { + return nil, nil, err + } + unmatched := make(map[string]string) + value, err := um.unmarshalRow(row, unmatched) + return value, unmatched, err +} + +// validate ensures that a struct was used to create the Unmarshaller, and validates +// CSV headers against the CSV tags in the struct. +func validate(um *Unmarshaller, s interface{}, headers []string) error { + concreteType := reflect.TypeOf(s) + if concreteType.Kind() == reflect.Ptr { + concreteType = concreteType.Elem() + } + if err := ensureOutInnerType(concreteType); err != nil { + return err + } + structInfo := getStructInfo(concreteType) // Get struct info to get CSV annotations. + if len(structInfo.Fields) == 0 { + return errors.New("no csv struct tags found") + } + csvHeaders := make(map[int]string) // Map of columm index to header name + csvHeadersLabels := make(map[int]*fieldInfo, len(structInfo.Fields)) // Used to store the corresponding header <-> position in CSV + headerCount := map[string]int{} + for i, csvColumnHeader := range headers { + csvHeaders[i] = csvColumnHeader + curHeaderCount := headerCount[csvColumnHeader] + if fieldInfo := getCSVFieldPosition(csvColumnHeader, structInfo, curHeaderCount); fieldInfo != nil { + csvHeadersLabels[i] = fieldInfo + if ShouldAlignDuplicateHeadersWithStructFieldOrder { + curHeaderCount++ + headerCount[csvColumnHeader] = curHeaderCount + } + } + } + if err := maybeDoubleHeaderNames(headers); err != nil { + return err + } + + um.headerMap = csvHeaders + um.fieldInfoMap = csvHeadersLabels + um.MismatchedHeaders = mismatchHeaderFields(structInfo.Fields, headers) + um.MismatchedStructFields = mismatchStructFields(structInfo.Fields, headers) + return nil +} + +// unmarshalRow converts a CSV row to a struct, based on CSV struct tags. +// If unmatched is non nil, it is populated with any columns that don't map to a struct field +func (um *Unmarshaller) unmarshalRow(row []string, unmatched map[string]string) (interface{}, error) { + isPointer := false + concreteOutType := um.outType + if um.outType.Kind() == reflect.Ptr { + isPointer = true + concreteOutType = concreteOutType.Elem() + } + outValue := createNewOutInner(isPointer, concreteOutType) + for j, csvColumnContent := range row { + if fieldInfo, ok := um.fieldInfoMap[j]; ok { + if err := setInnerField(&outValue, isPointer, fieldInfo.IndexChain, csvColumnContent, fieldInfo.omitEmpty); err != nil { // Set field of struct + return nil, fmt.Errorf("cannot assign field at %v to %s through index chain %v: %v", j, outValue.Type(), fieldInfo.IndexChain, err) + } + } else if unmatched != nil { + unmatched[um.headerMap[j]] = csvColumnContent + } + } + return outValue.Interface(), nil +} From 205ded1b1bc21d9aa780b6c82c2ee07fd312f998 Mon Sep 17 00:00:00 2001 From: ramr Date: Fri, 13 Jul 2018 23:08:56 -0700 Subject: [PATCH 9/9] bump(*) --- glide.lock | 8 +++---- vendor/github.com/openshift/api/README.md | 29 +++++++++++++++++++++++ 2 files changed, 33 insertions(+), 4 deletions(-) diff --git a/glide.lock b/glide.lock index b5d30c9671e0..afc4fe962bc6 100644 --- a/glide.lock +++ b/glide.lock @@ -1,5 +1,5 @@ -hash: 8bd538d5b1bc5b1103529ae4af431ae78ae68173ef3ac7db06dc7c00bea05fc4 -updated: 2018-07-13T03:38:19.086677269-04:00 +hash: 424aaf9eb0319384574a012df2886bbf6cd77b479e7065462bc48a61e99a9b3f +updated: 2018-07-12T22:59:32.292828018-07:00 imports: - name: bitbucket.org/ww/goautoneg version: 75cd24fc2f2c2a2088577d12123ddee5f54e0675 @@ -819,7 +819,7 @@ imports: - go-selinux - go-selinux/label - name: github.com/openshift/api - version: f8215ce525d43bfcc42aeacf0c53c519fd3d41ef + version: 12a230f9f9225237f1a35b86171848da610900bb subpackages: - apps/v1 - authorization @@ -1838,7 +1838,7 @@ imports: - pkg/util/proto/testing - pkg/util/proto/validation - name: k8s.io/kubernetes - version: 9a9fb880ba176e6375b5ee47ebc289fb90f974c6 + version: cbf5b4279a751d7d4dc876149e8b2a4c3bb732c3 repo: https://github.com/openshift/kubernetes.git subpackages: - cmd/controller-manager/app diff --git a/vendor/github.com/openshift/api/README.md b/vendor/github.com/openshift/api/README.md index 84755e610c9d..de662b498297 100644 --- a/vendor/github.com/openshift/api/README.md +++ b/vendor/github.com/openshift/api/README.md @@ -1,2 +1,31 @@ # api The canonical location of the OpenShift API definition. This repo holds the API type definitions and serialization code used by [openshift/client-go](https://github.com/openshift/client-go) + +## pull request process + +Pull requests that change API types in this repo that have corresponding "internal" API objects in the +[openshift/origin](https://github.com/openshift/origin) repo must be paired with a pull request to +[openshift/origin](https://github.com/openshift/origin). + +To ensure the corresponding origin pull request is ready to merge as soon as the pull request to this repo is merged: +1. Base your pull request to this repo on latest [openshift/api#master](https://github.com/openshift/api/commits/master) and ensure CI is green +2. Base your pull request to openshift/origin on latest [openshift/origin#master](https://github.com/openshift/origin/commits/master) +3. In your openshift/origin pull request: + 1. Add a TMP commit that points [glide.yaml](https://github.com/openshift/origin/blob/master/glide.yaml#L39-L41) at your fork of openshift/api, and the branch of your pull request: + + ``` + - package: github.com/openshift/api + repo: https://github.com//api.git + version: "" + ``` + + 2. Update your `bump(*)` commit to include the result of running `hack/update-deps.sh`, which will pull in the changes from your openshift/api pull request + 3. Make sure CI is green on your openshift/origin pull request + 4. Get LGTM on your openshift/api pull request (for API changes) and your openshift/origin pull request (for code changes) + +Once both pull requests are ready, the openshift/api pull request can be merged. + +Then do the following with your openshift/origin pull request: +1. Drop the TMP commit (pointing glide back at openshift/api#master) +2. Rerun `hack/update-deps.sh` and update your `bump(*)` commit +3. It can then be tagged and merged by CI