From 2c7e43427fe4464faf4c84c1db202716e6328e993744cae5b4bc4acdaee71521 Mon Sep 17 00:00:00 2001 From: Jan Fajerski Date: Fri, 26 Jul 2019 11:24:49 +0000 Subject: [PATCH 1/3] Accepting request 718818 from home:jcavalheiro:monitoring Temporary patch to support SUSE Manager service discovery. Its a non-disruptive extension to Prometheus and it is expected to be resilient to version upgrades. Plan is to remove this patch once we have it upstreamed. - Add support for Uyuni/SUSE Manager service discovery + Added 0003-Add-Uyuni-service-discovery OBS-URL: https://build.opensuse.org/request/show/718818 OBS-URL: https://build.opensuse.org/package/show/server:monitoring/golang-github-prometheus-prometheus?expand=0&rev=15 --- 0003-Add-Uyuni-service-discovery.patch | 1996 +++++++++++++++++++ golang-github-prometheus-prometheus.changes | 6 + golang-github-prometheus-prometheus.spec | 3 + 3 files changed, 2005 insertions(+) create mode 100644 0003-Add-Uyuni-service-discovery.patch diff --git a/0003-Add-Uyuni-service-discovery.patch b/0003-Add-Uyuni-service-discovery.patch new file mode 100644 index 0000000..fe675b8 --- /dev/null +++ b/0003-Add-Uyuni-service-discovery.patch @@ -0,0 +1,1996 @@ +From 757642fdefcc3a6f08d36d5db9ff5e9b46104193 Mon Sep 17 00:00:00 2001 +From: Joao Cavalheiro +Date: Wed, 22 May 2019 16:39:25 +0100 +Subject: [PATCH] Add Uyuni service discovery + +--- + discovery/config/config.go | 3 + + discovery/manager.go | 6 + + discovery/uyuni/uyuni.go | 219 ++++++++++ + vendor/github.com/kolo/xmlrpc/LICENSE | 19 + + vendor/github.com/kolo/xmlrpc/README.md | 89 ++++ + vendor/github.com/kolo/xmlrpc/client.go | 170 ++++++++ + vendor/github.com/kolo/xmlrpc/client_test.go | 141 +++++++ + vendor/github.com/kolo/xmlrpc/decoder.go | 473 ++++++++++++++++++++++ + vendor/github.com/kolo/xmlrpc/decoder_test.go | 234 +++++++++++ + vendor/github.com/kolo/xmlrpc/encoder.go | 171 ++++++++ + vendor/github.com/kolo/xmlrpc/encoder_test.go | 58 +++ + vendor/github.com/kolo/xmlrpc/fixtures/cp1251.xml | 6 + + vendor/github.com/kolo/xmlrpc/request.go | 57 +++ + vendor/github.com/kolo/xmlrpc/response.go | 52 +++ + vendor/github.com/kolo/xmlrpc/response_test.go | 84 ++++ + vendor/github.com/kolo/xmlrpc/test_server.rb | 25 ++ + vendor/github.com/kolo/xmlrpc/xmlrpc.go | 19 + + 17 files changed, 1826 insertions(+) + create mode 100644 discovery/uyuni/uyuni.go + create mode 100644 vendor/github.com/kolo/xmlrpc/LICENSE + create mode 100644 vendor/github.com/kolo/xmlrpc/README.md + create mode 100644 vendor/github.com/kolo/xmlrpc/client.go + create mode 100644 vendor/github.com/kolo/xmlrpc/client_test.go + create mode 100644 vendor/github.com/kolo/xmlrpc/decoder.go + create mode 100644 vendor/github.com/kolo/xmlrpc/decoder_test.go + create mode 100644 vendor/github.com/kolo/xmlrpc/encoder.go + create mode 100644 vendor/github.com/kolo/xmlrpc/encoder_test.go + create mode 100644 vendor/github.com/kolo/xmlrpc/fixtures/cp1251.xml + create mode 100644 vendor/github.com/kolo/xmlrpc/request.go + create mode 100644 vendor/github.com/kolo/xmlrpc/response.go + create mode 100644 vendor/github.com/kolo/xmlrpc/response_test.go + create mode 100644 vendor/github.com/kolo/xmlrpc/test_server.rb + create mode 100644 vendor/github.com/kolo/xmlrpc/xmlrpc.go + +diff --git a/discovery/config/config.go b/discovery/config/config.go +index 820de1f7..27d8c0cc 100644 +--- a/discovery/config/config.go ++++ b/discovery/config/config.go +@@ -27,6 +27,7 @@ import ( + "github.com/prometheus/prometheus/discovery/openstack" + "github.com/prometheus/prometheus/discovery/targetgroup" + "github.com/prometheus/prometheus/discovery/triton" ++ "github.com/prometheus/prometheus/discovery/uyuni" + "github.com/prometheus/prometheus/discovery/zookeeper" + ) + +@@ -58,6 +59,8 @@ type ServiceDiscoveryConfig struct { + AzureSDConfigs []*azure.SDConfig `yaml:"azure_sd_configs,omitempty"` + // List of Triton service discovery configurations. + TritonSDConfigs []*triton.SDConfig `yaml:"triton_sd_configs,omitempty"` ++ // List of Uyuni service discovery configurations. ++ UyuniSDConfigs []*uyuni.SDConfig `yaml:"uyuni_sd_configs,omitempty"` + } + + // Validate validates the ServiceDiscoveryConfig. +diff --git a/discovery/manager.go b/discovery/manager.go +index 1dbdecc8..ac621f3e 100644 +--- a/discovery/manager.go ++++ b/discovery/manager.go +@@ -37,6 +37,7 @@ import ( + "github.com/prometheus/prometheus/discovery/marathon" + "github.com/prometheus/prometheus/discovery/openstack" + "github.com/prometheus/prometheus/discovery/triton" ++ "github.com/prometheus/prometheus/discovery/uyuni" + "github.com/prometheus/prometheus/discovery/zookeeper" + ) + +@@ -406,6 +407,11 @@ func (m *Manager) registerProviders(cfg sd_config.ServiceDiscoveryConfig, setNam + return triton.New(log.With(m.logger, "discovery", "triton"), c) + }) + } ++ for _, c := range cfg.UyuniSDConfigs { ++ add(c, func() (Discoverer, error) { ++ return uyuni.NewDiscovery(c, log.With(m.logger, "discovery", "uyuni")), nil ++ }) ++ } + if len(cfg.StaticConfigs) > 0 { + add(setName, func() (Discoverer, error) { + return &StaticProvider{TargetGroups: cfg.StaticConfigs}, nil +diff --git a/discovery/uyuni/uyuni.go b/discovery/uyuni/uyuni.go +new file mode 100644 +index 00000000..60f741a5 +--- /dev/null ++++ b/discovery/uyuni/uyuni.go +@@ -0,0 +1,219 @@ ++// Copyright 2017 The Prometheus Authors ++// Licensed under the Apache License, Version 2.0 (the "License"); ++// you may not use this file except in compliance with the License. ++// You may obtain a copy of the License at ++// ++// http://www.apache.org/licenses/LICENSE-2.0 ++// ++// Unless required by applicable law or agreed to in writing, software ++// distributed under the License is distributed on an "AS IS" BASIS, ++// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++// See the License for the specific language governing permissions and ++// limitations under the License. ++ ++package uyuni ++ ++import ( ++ "context" ++ "fmt" ++ "net/http" ++ "time" ++ ++ "github.com/go-kit/kit/log" ++ "github.com/go-kit/kit/log/level" ++ "github.com/kolo/xmlrpc" ++ "github.com/pkg/errors" ++ "github.com/prometheus/common/model" ++ ++ "github.com/prometheus/prometheus/discovery/refresh" ++ "github.com/prometheus/prometheus/discovery/targetgroup" ++) ++ ++const ( ++ uyuniLabel = model.MetaLabelPrefix + "uyuni_" ++ uyuniLabelEntitlements = uyuniLabel + "entitlements" ++) ++ ++// DefaultSDConfig is the default Uyuni SD configuration. ++var DefaultSDConfig = SDConfig{ ++ RefreshInterval: model.Duration(1 * time.Minute), ++} ++ ++// SDConfig is the configuration for Uyuni based service discovery. ++type SDConfig struct { ++ Host string `yaml:"host"` ++ User string `yaml:"username"` ++ Pass string `yaml:"password"` ++ RefreshInterval model.Duration `yaml:"refresh_interval,omitempty"` ++} ++ ++// Uyuni API Response structures ++type clientRef struct { ++ Id int `xmlrpc:"id"` ++ Name string `xmlrpc:"name"` ++} ++ ++type clientDetail struct { ++ Id int `xmlrpc:"id"` ++ Hostname string `xmlrpc:"hostname"` ++ Entitlements []string `xmlrpc:"addon_entitlements"` ++} ++ ++type exporterConfig struct { ++ Enabled bool `xmlrpc:"enabled"` ++} ++ ++type formulaData struct { ++ NodeExporter exporterConfig `xmlrpc:"node_exporter"` ++ PostgresExporter exporterConfig `xmlrpc:"postgres_exporter"` ++} ++ ++// UnmarshalYAML implements the yaml.Unmarshaler interface. ++func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { ++ *c = DefaultSDConfig ++ type plain SDConfig ++ err := unmarshal((*plain)(c)) ++ ++ if err != nil { ++ return err ++ } ++ if c.Host == "" { ++ return errors.New("Uyuni SD configuration requires a Host") ++ } ++ if c.User == "" { ++ return errors.New("Uyuni SD configuration requires a Username") ++ } ++ if c.Pass == "" { ++ return errors.New("Uyuni SD configuration requires a Password") ++ } ++ if c.RefreshInterval <= 0 { ++ return errors.New("Uyuni SD configuration requires RefreshInterval to be a positive integer") ++ } ++ return nil ++} ++ ++// Attempt to login in SUSE Manager Server and get an auth token ++func Login(rpcclient *xmlrpc.Client, user string, pass string) (string, error) { ++ var result string ++ err := rpcclient.Call("auth.login", []interface{}{user, pass}, &result) ++ return result, err ++} ++ ++// Logout from SUSE Manager API ++func Logout(rpcclient *xmlrpc.Client, token string) error { ++ err := rpcclient.Call("auth.logout", token, nil) ++ return err ++} ++ ++// Get client list ++func ListSystems(rpcclient *xmlrpc.Client, token string) ([]clientRef, error) { ++ var result []clientRef ++ err := rpcclient.Call("system.listSystems", token, &result) ++ return result, err ++} ++ ++// Get client details ++func GetSystemDetails(rpcclient *xmlrpc.Client, token string, systemId int) (clientDetail, error) { ++ var result clientDetail ++ err := rpcclient.Call("system.getDetails", []interface{}{token, systemId}, &result) ++ return result, err ++} ++ ++// List client FQDNs ++func ListSystemFQDNs(rpcclient *xmlrpc.Client, token string, systemId int) ([]string, error) { ++ var result []string ++ err := rpcclient.Call("system.listFqdns", []interface{}{token, systemId}, &result) ++ return result, err ++} ++ ++// Get formula data for a given system ++func getSystemFormulaData(rpcclient *xmlrpc.Client, token string, systemId int, formulaName string) (formulaData, error) { ++ var result formulaData ++ err := rpcclient.Call("formula.getSystemFormulaData", []interface{}{token, systemId, formulaName}, &result) ++ return result, err ++} ++ ++// Discovery periodically performs Uyuni API requests. It implements ++// the Discoverer interface. ++type Discovery struct { ++ *refresh.Discovery ++ client *http.Client ++ interval time.Duration ++ sdConfig *SDConfig ++ logger log.Logger ++} ++ ++// NewDiscovery returns a new file discovery for the given paths. ++func NewDiscovery(conf *SDConfig, logger log.Logger) *Discovery { ++ d := &Discovery{ ++ interval: time.Duration(conf.RefreshInterval), ++ sdConfig: conf, ++ logger: logger, ++ } ++ d.Discovery = refresh.NewDiscovery( ++ logger, ++ "uyuni", ++ time.Duration(conf.RefreshInterval), ++ d.refresh, ++ ) ++ return d ++} ++ ++func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) { ++ ++ config := d.sdConfig ++ apiUrl := config.Host + "/rpc/api" ++ ++ rpcclient, _ := xmlrpc.NewClient(apiUrl, nil) ++ ++ token, err := Login(rpcclient, config.User, config.Pass) ++ if err != nil { ++ return nil, errors.Wrap(err, "Unable to login to SUSE Manager API") ++ } ++ ++ clientList, err := ListSystems(rpcclient, token) ++ if err != nil { ++ return nil, errors.Wrap(err, "Unable to get list of systems") ++ } ++ ++ tg := &targetgroup.Group{ ++ Source: config.Host, ++ } ++ ++ if len(clientList) == 0 { ++ fmt.Printf("\tFound 0 systems.\n") ++ } else { ++ for _, client := range clientList { ++ fqdns := []string{} ++ formulas := formulaData{} ++ details, err := GetSystemDetails(rpcclient, token, client.Id) ++ if err != nil { ++ level.Error(d.logger).Log("msg", "Unable to get system details","clientId", client.Id, "err", err) ++ continue; ++ } ++ // Check if system is to be monitored ++ for _, v := range details.Entitlements { ++ if v == "monitoring_entitled" { ++ fqdns, err = ListSystemFQDNs(rpcclient, token, client.Id) ++ formulas, err = getSystemFormulaData(rpcclient, token, client.Id, "prometheus-exporters") ++ if (formulas.NodeExporter.Enabled) { ++ labels := model.LabelSet{} ++ addr := fmt.Sprintf("%s:%d", fqdns[len(fqdns)-1], 9100) ++ labels[model.AddressLabel] = model.LabelValue(addr) ++ tg.Targets = append(tg.Targets, labels) ++ } ++ if (formulas.PostgresExporter.Enabled) { ++ labels := model.LabelSet{} ++ addr := fmt.Sprintf("%s:%d", fqdns[len(fqdns)-1], 9187) ++ labels[model.AddressLabel] = model.LabelValue(addr) ++ tg.Targets = append(tg.Targets, labels) ++ } ++ } ++ } ++ ++ level.Debug(d.logger).Log("msg", "Found system", "host", details.Hostname, "entitlements", fmt.Sprintf("%+v", details.Entitlements), "FQDN", fmt.Sprintf("%+v", fqdns), "formulas", fmt.Sprintf("%+v", formulas)) ++ } ++ } ++ Logout(rpcclient, token) ++ return []*targetgroup.Group{tg}, nil ++} +diff --git a/vendor/github.com/kolo/xmlrpc/LICENSE b/vendor/github.com/kolo/xmlrpc/LICENSE +new file mode 100644 +index 00000000..8103dd13 +--- /dev/null ++++ b/vendor/github.com/kolo/xmlrpc/LICENSE +@@ -0,0 +1,19 @@ ++Copyright (C) 2012 Dmitry Maksimov ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE ++AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER ++LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, ++OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN ++THE SOFTWARE. +diff --git a/vendor/github.com/kolo/xmlrpc/README.md b/vendor/github.com/kolo/xmlrpc/README.md +new file mode 100644 +index 00000000..8113cfcc +--- /dev/null ++++ b/vendor/github.com/kolo/xmlrpc/README.md +@@ -0,0 +1,89 @@ ++[![GoDoc](https://godoc.org/github.com/kolo/xmlrpc?status.svg)](https://godoc.org/github.com/kolo/xmlrpc) ++ ++## Overview ++ ++xmlrpc is an implementation of client side part of XMLRPC protocol in Go language. ++ ++## Status ++ ++This project is in minimal maintenance mode with no further development. Bug fixes ++are accepted, but it might take some time until they will be merged. ++ ++## Installation ++ ++To install xmlrpc package run `go get github.com/kolo/xmlrpc`. To use ++it in application add `"github.com/kolo/xmlrpc"` string to `import` ++statement. ++ ++## Usage ++ ++ client, _ := xmlrpc.NewClient("https://bugzilla.mozilla.org/xmlrpc.cgi", nil) ++ result := struct{ ++ Version string `xmlrpc:"version"` ++ }{} ++ client.Call("Bugzilla.version", nil, &result) ++ fmt.Printf("Version: %s\n", result.Version) // Version: 4.2.7+ ++ ++Second argument of NewClient function is an object that implements ++[http.RoundTripper](http://golang.org/pkg/net/http/#RoundTripper) ++interface, it can be used to get more control over connection options. ++By default it initialized by http.DefaultTransport object. ++ ++### Arguments encoding ++ ++xmlrpc package supports encoding of native Go data types to method ++arguments. ++ ++Data types encoding rules: ++ ++* int, int8, int16, int32, int64 encoded to int; ++* float32, float64 encoded to double; ++* bool encoded to boolean; ++* string encoded to string; ++* time.Time encoded to datetime.iso8601; ++* xmlrpc.Base64 encoded to base64; ++* slice encoded to array; ++ ++Structs decoded to struct by following rules: ++ ++* all public field become struct members; ++* field name become member name; ++* if field has xmlrpc tag, its value become member name. ++ ++Server method can accept few arguments, to handle this case there is ++special approach to handle slice of empty interfaces (`[]interface{}`). ++Each value of such slice encoded as separate argument. ++ ++### Result decoding ++ ++Result of remote function is decoded to native Go data type. ++ ++Data types decoding rules: ++ ++* int, i4 decoded to int, int8, int16, int32, int64; ++* double decoded to float32, float64; ++* boolean decoded to bool; ++* string decoded to string; ++* array decoded to slice; ++* structs decoded following the rules described in previous section; ++* datetime.iso8601 decoded as time.Time data type; ++* base64 decoded to string. ++ ++## Implementation details ++ ++xmlrpc package contains clientCodec type, that implements [rpc.ClientCodec](http://golang.org/pkg/net/rpc/#ClientCodec) ++interface of [net/rpc](http://golang.org/pkg/net/rpc) package. ++ ++xmlrpc package works over HTTP protocol, but some internal functions ++and data type were made public to make it easier to create another ++implementation of xmlrpc that works over another protocol. To encode ++request body there is EncodeMethodCall function. To decode server ++response Response data type can be used. ++ ++## Contribution ++ ++See [project status](#status). ++ ++## Authors ++ ++Dmitry Maksimov (dmtmax@gmail.com) +diff --git a/vendor/github.com/kolo/xmlrpc/client.go b/vendor/github.com/kolo/xmlrpc/client.go +new file mode 100644 +index 00000000..3aa86ce2 +--- /dev/null ++++ b/vendor/github.com/kolo/xmlrpc/client.go +@@ -0,0 +1,170 @@ ++package xmlrpc ++ ++import ( ++ "errors" ++ "fmt" ++ "io/ioutil" ++ "net/http" ++ "net/http/cookiejar" ++ "net/rpc" ++ "net/url" ++ "sync" ++) ++ ++type Client struct { ++ *rpc.Client ++} ++ ++// clientCodec is rpc.ClientCodec interface implementation. ++type clientCodec struct { ++ // url presents url of xmlrpc service ++ url *url.URL ++ ++ // httpClient works with HTTP protocol ++ httpClient *http.Client ++ ++ // cookies stores cookies received on last request ++ cookies http.CookieJar ++ ++ // responses presents map of active requests. It is required to return request id, that ++ // rpc.Client can mark them as done. ++ responses map[uint64]*http.Response ++ mutex sync.Mutex ++ ++ response *Response ++ ++ // ready presents channel, that is used to link request and it`s response. ++ ready chan uint64 ++ ++ // close notifies codec is closed. ++ close chan uint64 ++} ++ ++func (codec *clientCodec) WriteRequest(request *rpc.Request, args interface{}) (err error) { ++ httpRequest, err := NewRequest(codec.url.String(), request.ServiceMethod, args) ++ ++ if codec.cookies != nil { ++ for _, cookie := range codec.cookies.Cookies(codec.url) { ++ httpRequest.AddCookie(cookie) ++ } ++ } ++ ++ if err != nil { ++ return err ++ } ++ ++ var httpResponse *http.Response ++ httpResponse, err = codec.httpClient.Do(httpRequest) ++ ++ if err != nil { ++ return err ++ } ++ ++ if codec.cookies != nil { ++ codec.cookies.SetCookies(codec.url, httpResponse.Cookies()) ++ } ++ ++ codec.mutex.Lock() ++ codec.responses[request.Seq] = httpResponse ++ codec.mutex.Unlock() ++ ++ codec.ready <- request.Seq ++ ++ return nil ++} ++ ++func (codec *clientCodec) ReadResponseHeader(response *rpc.Response) (err error) { ++ var seq uint64 ++ ++ select { ++ case seq = <-codec.ready: ++ case <-codec.close: ++ return errors.New("codec is closed") ++ } ++ ++ codec.mutex.Lock() ++ httpResponse := codec.responses[seq] ++ codec.mutex.Unlock() ++ ++ if httpResponse.StatusCode < 200 || httpResponse.StatusCode >= 300 { ++ return fmt.Errorf("request error: bad status code - %d", httpResponse.StatusCode) ++ } ++ ++ respData, err := ioutil.ReadAll(httpResponse.Body) ++ ++ if err != nil { ++ return err ++ } ++ ++ httpResponse.Body.Close() ++ ++ resp := NewResponse(respData) ++ ++ if resp.Failed() { ++ response.Error = fmt.Sprintf("%v", resp.Err()) ++ } ++ ++ codec.response = resp ++ ++ response.Seq = seq ++ ++ codec.mutex.Lock() ++ delete(codec.responses, seq) ++ codec.mutex.Unlock() ++ ++ return nil ++} ++ ++func (codec *clientCodec) ReadResponseBody(v interface{}) (err error) { ++ if v == nil { ++ return nil ++ } ++ ++ if err = codec.response.Unmarshal(v); err != nil { ++ return err ++ } ++ ++ return nil ++} ++ ++func (codec *clientCodec) Close() error { ++ if transport, ok := codec.httpClient.Transport.(*http.Transport); ok { ++ transport.CloseIdleConnections() ++ } ++ ++ close(codec.close) ++ ++ return nil ++} ++ ++// NewClient returns instance of rpc.Client object, that is used to send request to xmlrpc service. ++func NewClient(requrl string, transport http.RoundTripper) (*Client, error) { ++ if transport == nil { ++ transport = http.DefaultTransport ++ } ++ ++ httpClient := &http.Client{Transport: transport} ++ ++ jar, err := cookiejar.New(nil) ++ ++ if err != nil { ++ return nil, err ++ } ++ ++ u, err := url.Parse(requrl) ++ ++ if err != nil { ++ return nil, err ++ } ++ ++ codec := clientCodec{ ++ url: u, ++ httpClient: httpClient, ++ close: make(chan uint64), ++ ready: make(chan uint64), ++ responses: make(map[uint64]*http.Response), ++ cookies: jar, ++ } ++ ++ return &Client{rpc.NewClientWithCodec(&codec)}, nil ++} +diff --git a/vendor/github.com/kolo/xmlrpc/client_test.go b/vendor/github.com/kolo/xmlrpc/client_test.go +new file mode 100644 +index 00000000..b429d4f8 +--- /dev/null ++++ b/vendor/github.com/kolo/xmlrpc/client_test.go +@@ -0,0 +1,141 @@ ++// +build integration ++ ++package xmlrpc ++ ++import ( ++ "context" ++ "runtime" ++ "sync" ++ "testing" ++ "time" ++) ++ ++func Test_CallWithoutArgs(t *testing.T) { ++ client := newClient(t) ++ defer client.Close() ++ ++ var result time.Time ++ if err := client.Call("service.time", nil, &result); err != nil { ++ t.Fatalf("service.time call error: %v", err) ++ } ++} ++ ++func Test_CallWithOneArg(t *testing.T) { ++ client := newClient(t) ++ defer client.Close() ++ ++ var result string ++ if err := client.Call("service.upcase", "xmlrpc", &result); err != nil { ++ t.Fatalf("service.upcase call error: %v", err) ++ } ++ ++ if result != "XMLRPC" { ++ t.Fatalf("Unexpected result of service.upcase: %s != %s", "XMLRPC", result) ++ } ++} ++ ++func Test_CallWithTwoArgs(t *testing.T) { ++ client := newClient(t) ++ defer client.Close() ++ ++ var sum int ++ if err := client.Call("service.sum", []interface{}{2, 3}, &sum); err != nil { ++ t.Fatalf("service.sum call error: %v", err) ++ } ++ ++ if sum != 5 { ++ t.Fatalf("Unexpected result of service.sum: %d != %d", 5, sum) ++ } ++} ++ ++func Test_TwoCalls(t *testing.T) { ++ client := newClient(t) ++ defer client.Close() ++ ++ var upcase string ++ if err := client.Call("service.upcase", "xmlrpc", &upcase); err != nil { ++ t.Fatalf("service.upcase call error: %v", err) ++ } ++ ++ var sum int ++ if err := client.Call("service.sum", []interface{}{2, 3}, &sum); err != nil { ++ t.Fatalf("service.sum call error: %v", err) ++ } ++ ++} ++ ++func Test_FailedCall(t *testing.T) { ++ client := newClient(t) ++ defer client.Close() ++ ++ var result int ++ if err := client.Call("service.error", nil, &result); err == nil { ++ t.Fatal("expected service.error returns error, but it didn't") ++ } ++} ++ ++func Test_ConcurrentCalls(t *testing.T) { ++ client := newClient(t) ++ ++ call := func() { ++ var result time.Time ++ client.Call("service.time", nil, &result) ++ } ++ ++ var wg sync.WaitGroup ++ for i := 0; i < 100; i++ { ++ wg.Add(1) ++ go func() { ++ call() ++ wg.Done() ++ }() ++ } ++ ++ wg.Wait() ++ client.Close() ++} ++ ++func Test_CloseMemoryLeak(t *testing.T) { ++ expected := runtime.NumGoroutine() ++ ++ for i := 0; i < 3; i++ { ++ client := newClient(t) ++ client.Call("service.time", nil, nil) ++ client.Close() ++ } ++ ++ var actual int ++ ++ // It takes some time to stop running goroutinges. This function checks number of ++ // running goroutines. It finishes execution if number is same as expected or timeout ++ // has been reached. ++ func() { ++ ctx, cancel := context.WithTimeout(context.Background(), time.Second) ++ defer cancel() ++ ++ for { ++ select { ++ case <-ctx.Done(): ++ return ++ default: ++ actual = runtime.NumGoroutine() ++ if actual == expected { ++ return ++ } ++ } ++ } ++ }() ++ ++ if actual != expected { ++ t.Errorf("expected number of running goroutines to be %d, but got %d", expected, actual) ++ } ++} ++ ++func newClient(t *testing.T) *Client { ++ client, err := NewClient("http://localhost:5001", nil) ++ if err != nil { ++ t.Fatalf("Can't create client: %v", err) ++ } ++ ++ return client ++} +diff --git a/vendor/github.com/kolo/xmlrpc/decoder.go b/vendor/github.com/kolo/xmlrpc/decoder.go +new file mode 100644 +index 00000000..d4dcb19a +--- /dev/null ++++ b/vendor/github.com/kolo/xmlrpc/decoder.go +@@ -0,0 +1,473 @@ ++package xmlrpc ++ ++import ( ++ "bytes" ++ "encoding/xml" ++ "errors" ++ "fmt" ++ "io" ++ "reflect" ++ "strconv" ++ "strings" ++ "time" ++) ++ ++const ( ++ iso8601 = "20060102T15:04:05" ++ iso8601Z = "20060102T15:04:05Z07:00" ++ iso8601Hyphen = "2006-01-02T15:04:05" ++ iso8601HyphenZ = "2006-01-02T15:04:05Z07:00" ++) ++ ++var ( ++ // CharsetReader is a function to generate reader which converts a non UTF-8 ++ // charset into UTF-8. ++ CharsetReader func(string, io.Reader) (io.Reader, error) ++ ++ timeLayouts = []string{iso8601, iso8601Z, iso8601Hyphen, iso8601HyphenZ} ++ invalidXmlError = errors.New("invalid xml") ++) ++ ++type TypeMismatchError string ++ ++func (e TypeMismatchError) Error() string { return string(e) } ++ ++type decoder struct { ++ *xml.Decoder ++} ++ ++func unmarshal(data []byte, v interface{}) (err error) { ++ dec := &decoder{xml.NewDecoder(bytes.NewBuffer(data))} ++ ++ if CharsetReader != nil { ++ dec.CharsetReader = CharsetReader ++ } ++ ++ var tok xml.Token ++ for { ++ if tok, err = dec.Token(); err != nil { ++ return err ++ } ++ ++ if t, ok := tok.(xml.StartElement); ok { ++ if t.Name.Local == "value" { ++ val := reflect.ValueOf(v) ++ if val.Kind() != reflect.Ptr { ++ return errors.New("non-pointer value passed to unmarshal") ++ } ++ if err = dec.decodeValue(val.Elem()); err != nil { ++ return err ++ } ++ ++ break ++ } ++ } ++ } ++ ++ // read until end of document ++ err = dec.Skip() ++ if err != nil && err != io.EOF { ++ return err ++ } ++ ++ return nil ++} ++ ++func (dec *decoder) decodeValue(val reflect.Value) error { ++ var tok xml.Token ++ var err error ++ ++ if val.Kind() == reflect.Ptr { ++ if val.IsNil() { ++ val.Set(reflect.New(val.Type().Elem())) ++ } ++ val = val.Elem() ++ } ++ ++ var typeName string ++ for { ++ if tok, err = dec.Token(); err != nil { ++ return err ++ } ++ ++ if t, ok := tok.(xml.EndElement); ok { ++ if t.Name.Local == "value" { ++ return nil ++ } else { ++ return invalidXmlError ++ } ++ } ++ ++ if t, ok := tok.(xml.StartElement); ok { ++ typeName = t.Name.Local ++ break ++ } ++ ++ // Treat value data without type identifier as string ++ if t, ok := tok.(xml.CharData); ok { ++ if value := strings.TrimSpace(string(t)); value != "" { ++ if err = checkType(val, reflect.String); err != nil { ++ return err ++ } ++ ++ val.SetString(value) ++ return nil ++ } ++ } ++ } ++ ++ switch typeName { ++ case "struct": ++ ismap := false ++ pmap := val ++ valType := val.Type() ++ ++ if err = checkType(val, reflect.Struct); err != nil { ++ if checkType(val, reflect.Map) == nil { ++ if valType.Key().Kind() != reflect.String { ++ return fmt.Errorf("only maps with string key type can be unmarshalled") ++ } ++ ismap = true ++ } else if checkType(val, reflect.Interface) == nil && val.IsNil() { ++ var dummy map[string]interface{} ++ valType = reflect.TypeOf(dummy) ++ pmap = reflect.New(valType).Elem() ++ val.Set(pmap) ++ ismap = true ++ } else { ++ return err ++ } ++ } ++ ++ var fields map[string]reflect.Value ++ ++ if !ismap { ++ fields = make(map[string]reflect.Value) ++ ++ for i := 0; i < valType.NumField(); i++ { ++ field := valType.Field(i) ++ fieldVal := val.FieldByName(field.Name) ++ ++ if fieldVal.CanSet() { ++ if fn := field.Tag.Get("xmlrpc"); fn != "" { ++ fields[fn] = fieldVal ++ } else { ++ fields[field.Name] = fieldVal ++ } ++ } ++ } ++ } else { ++ // Create initial empty map ++ pmap.Set(reflect.MakeMap(valType)) ++ } ++ ++ // Process struct members. ++ StructLoop: ++ for { ++ if tok, err = dec.Token(); err != nil { ++ return err ++ } ++ switch t := tok.(type) { ++ case xml.StartElement: ++ if t.Name.Local != "member" { ++ return invalidXmlError ++ } ++ ++ tagName, fieldName, err := dec.readTag() ++ if err != nil { ++ return err ++ } ++ if tagName != "name" { ++ return invalidXmlError ++ } ++ ++ var fv reflect.Value ++ ok := true ++ ++ if !ismap { ++ fv, ok = fields[string(fieldName)] ++ } else { ++ fv = reflect.New(valType.Elem()) ++ } ++ ++ if ok { ++ for { ++ if tok, err = dec.Token(); err != nil { ++ return err ++ } ++ if t, ok := tok.(xml.StartElement); ok && t.Name.Local == "value" { ++ if err = dec.decodeValue(fv); err != nil { ++ return err ++ } ++ ++ // ++ if err = dec.Skip(); err != nil { ++ return err ++ } ++ ++ break ++ } ++ } ++ } ++ ++ // ++ if err = dec.Skip(); err != nil { ++ return err ++ } ++ ++ if ismap { ++ pmap.SetMapIndex(reflect.ValueOf(string(fieldName)), reflect.Indirect(fv)) ++ val.Set(pmap) ++ } ++ case xml.EndElement: ++ break StructLoop ++ } ++ } ++ case "array": ++ slice := val ++ if checkType(val, reflect.Interface) == nil && val.IsNil() { ++ slice = reflect.ValueOf([]interface{}{}) ++ } else if err = checkType(val, reflect.Slice); err != nil { ++ return err ++ } ++ ++ ArrayLoop: ++ for { ++ if tok, err = dec.Token(); err != nil { ++ return err ++ } ++ ++ switch t := tok.(type) { ++ case xml.StartElement: ++ var index int ++ if t.Name.Local != "data" { ++ return invalidXmlError ++ } ++ DataLoop: ++ for { ++ if tok, err = dec.Token(); err != nil { ++ return err ++ } ++ ++ switch tt := tok.(type) { ++ case xml.StartElement: ++ if tt.Name.Local != "value" { ++ return invalidXmlError ++ } ++ ++ if index < slice.Len() { ++ v := slice.Index(index) ++ if v.Kind() == reflect.Interface { ++ v = v.Elem() ++ } ++ if v.Kind() != reflect.Ptr { ++ return errors.New("error: cannot write to non-pointer array element") ++ } ++ if err = dec.decodeValue(v); err != nil { ++ return err ++ } ++ } else { ++ v := reflect.New(slice.Type().Elem()) ++ if err = dec.decodeValue(v); err != nil { ++ return err ++ } ++ slice = reflect.Append(slice, v.Elem()) ++ } ++ ++ // ++ if err = dec.Skip(); err != nil { ++ return err ++ } ++ index++ ++ case xml.EndElement: ++ val.Set(slice) ++ break DataLoop ++ } ++ } ++ case xml.EndElement: ++ break ArrayLoop ++ } ++ } ++ default: ++ if tok, err = dec.Token(); err != nil { ++ return err ++ } ++ ++ var data []byte ++ ++ switch t := tok.(type) { ++ case xml.EndElement: ++ return nil ++ case xml.CharData: ++ data = []byte(t.Copy()) ++ default: ++ return invalidXmlError ++ } ++ ++ switch typeName { ++ case "int", "i4", "i8": ++ if checkType(val, reflect.Interface) == nil && val.IsNil() { ++ i, err := strconv.ParseInt(string(data), 10, 64) ++ if err != nil { ++ return err ++ } ++ ++ pi := reflect.New(reflect.TypeOf(i)).Elem() ++ pi.SetInt(i) ++ val.Set(pi) ++ } else if err = checkType(val, reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64); err != nil { ++ return err ++ } else { ++ i, err := strconv.ParseInt(string(data), 10, val.Type().Bits()) ++ if err != nil { ++ return err ++ } ++ ++ val.SetInt(i) ++ } ++ case "string", "base64": ++ str := string(data) ++ if checkType(val, reflect.Interface) == nil && val.IsNil() { ++ pstr := reflect.New(reflect.TypeOf(str)).Elem() ++ pstr.SetString(str) ++ val.Set(pstr) ++ } else if err = checkType(val, reflect.String); err != nil { ++ return err ++ } else { ++ val.SetString(str) ++ } ++ case "dateTime.iso8601": ++ var t time.Time ++ var err error ++ ++ for _, layout := range timeLayouts { ++ t, err = time.Parse(layout, string(data)) ++ if err == nil { ++ break ++ } ++ } ++ if err != nil { ++ return err ++ } ++ ++ if checkType(val, reflect.Interface) == nil && val.IsNil() { ++ ptime := reflect.New(reflect.TypeOf(t)).Elem() ++ ptime.Set(reflect.ValueOf(t)) ++ val.Set(ptime) ++ } else if _, ok := val.Interface().(time.Time); !ok { ++ return TypeMismatchError(fmt.Sprintf("error: type mismatch error - can't decode %v to time", val.Kind())) ++ } else { ++ val.Set(reflect.ValueOf(t)) ++ } ++ case "boolean": ++ v, err := strconv.ParseBool(string(data)) ++ if err != nil { ++ return err ++ } ++ ++ if checkType(val, reflect.Interface) == nil && val.IsNil() { ++ pv := reflect.New(reflect.TypeOf(v)).Elem() ++ pv.SetBool(v) ++ val.Set(pv) ++ } else if err = checkType(val, reflect.Bool); err != nil { ++ return err ++ } else { ++ val.SetBool(v) ++ } ++ case "double": ++ if checkType(val, reflect.Interface) == nil && val.IsNil() { ++ i, err := strconv.ParseFloat(string(data), 64) ++ if err != nil { ++ return err ++ } ++ ++ pdouble := reflect.New(reflect.TypeOf(i)).Elem() ++ pdouble.SetFloat(i) ++ val.Set(pdouble) ++ } else if err = checkType(val, reflect.Float32, reflect.Float64); err != nil { ++ return err ++ } else { ++ i, err := strconv.ParseFloat(string(data), val.Type().Bits()) ++ if err != nil { ++ return err ++ } ++ ++ val.SetFloat(i) ++ } ++ default: ++ return errors.New("unsupported type") ++ } ++ ++ // ++ if err = dec.Skip(); err != nil { ++ return err ++ } ++ } ++ ++ return nil ++} ++ ++func (dec *decoder) readTag() (string, []byte, error) { ++ var tok xml.Token ++ var err error ++ ++ var name string ++ for { ++ if tok, err = dec.Token(); err != nil { ++ return "", nil, err ++ } ++ ++ if t, ok := tok.(xml.StartElement); ok { ++ name = t.Name.Local ++ break ++ } ++ } ++ ++ value, err := dec.readCharData() ++ if err != nil { ++ return "", nil, err ++ } ++ ++ return name, value, dec.Skip() ++} ++ ++func (dec *decoder) readCharData() ([]byte, error) { ++ var tok xml.Token ++ var err error ++ ++ if tok, err = dec.Token(); err != nil { ++ return nil, err ++ } ++ ++ if t, ok := tok.(xml.CharData); ok { ++ return []byte(t.Copy()), nil ++ } else { ++ return nil, invalidXmlError ++ } ++} ++ ++func checkType(val reflect.Value, kinds ...reflect.Kind) error { ++ if len(kinds) == 0 { ++ return nil ++ } ++ ++ if val.Kind() == reflect.Ptr { ++ val = val.Elem() ++ } ++ ++ match := false ++ ++ for _, kind := range kinds { ++ if val.Kind() == kind { ++ match = true ++ break ++ } ++ } ++ ++ if !match { ++ return TypeMismatchError(fmt.Sprintf("error: type mismatch - can't unmarshal %v to %v", ++ val.Kind(), kinds[0])) ++ } ++ ++ return nil ++} +diff --git a/vendor/github.com/kolo/xmlrpc/decoder_test.go b/vendor/github.com/kolo/xmlrpc/decoder_test.go +new file mode 100644 +index 00000000..3701d50a +--- /dev/null ++++ b/vendor/github.com/kolo/xmlrpc/decoder_test.go +@@ -0,0 +1,234 @@ ++package xmlrpc ++ ++import ( ++ "fmt" ++ "io" ++ "io/ioutil" ++ "reflect" ++ "testing" ++ "time" ++ ++ "golang.org/x/text/encoding/charmap" ++ "golang.org/x/text/transform" ++) ++ ++type book struct { ++ Title string ++ Amount int ++} ++ ++type bookUnexported struct { ++ title string ++ amount int ++} ++ ++var unmarshalTests = []struct { ++ value interface{} ++ ptr interface{} ++ xml string ++}{ ++ // int, i4, i8 ++ {0, new(*int), ""}, ++ {100, new(*int), "100"}, ++ {389451, new(*int), "389451"}, ++ {int64(45659074), new(*int64), "45659074"}, ++ ++ // string ++ {"Once upon a time", new(*string), "Once upon a time"}, ++ {"Mike & Mick ", new(*string), "Mike & Mick <London, UK>"}, ++ {"Once upon a time", new(*string), "Once upon a time"}, ++ ++ // base64 ++ {"T25jZSB1cG9uIGEgdGltZQ==", new(*string), "T25jZSB1cG9uIGEgdGltZQ=="}, ++ ++ // boolean ++ {true, new(*bool), "1"}, ++ {false, new(*bool), "0"}, ++ ++ // double ++ {12.134, new(*float32), "12.134"}, ++ {-12.134, new(*float32), "-12.134"}, ++ ++ // datetime.iso8601 ++ {_time("2013-12-09T21:00:12Z"), new(*time.Time), "20131209T21:00:12"}, ++ {_time("2013-12-09T21:00:12Z"), new(*time.Time), "20131209T21:00:12Z"}, ++ {_time("2013-12-09T21:00:12-01:00"), new(*time.Time), "20131209T21:00:12-01:00"}, ++ {_time("2013-12-09T21:00:12+01:00"), new(*time.Time), "20131209T21:00:12+01:00"}, ++ {_time("2013-12-09T21:00:12Z"), new(*time.Time), "2013-12-09T21:00:12"}, ++ {_time("2013-12-09T21:00:12Z"), new(*time.Time), "2013-12-09T21:00:12Z"}, ++ {_time("2013-12-09T21:00:12-01:00"), new(*time.Time), "2013-12-09T21:00:12-01:00"}, ++ {_time("2013-12-09T21:00:12+01:00"), new(*time.Time), "2013-12-09T21:00:12+01:00"}, ++ ++ // array ++ {[]int{1, 5, 7}, new(*[]int), "157"}, ++ {[]interface{}{"A", "5"}, new(interface{}), "A5"}, ++ {[]interface{}{"A", int64(5)}, new(interface{}), "A5"}, ++ ++ // struct ++ {book{"War and Piece", 20}, new(*book), "TitleWar and PieceAmount20"}, ++ {bookUnexported{}, new(*bookUnexported), "titleWar and Pieceamount20"}, ++ {map[string]interface{}{"Name": "John Smith"}, new(interface{}), "NameJohn Smith"}, ++ {map[string]interface{}{}, new(interface{}), ""}, ++} ++ ++func _time(s string) time.Time { ++ t, err := time.Parse(time.RFC3339, s) ++ if err != nil { ++ panic(fmt.Sprintf("time parsing error: %v", err)) ++ } ++ return t ++} ++ ++func Test_unmarshal(t *testing.T) { ++ for _, tt := range unmarshalTests { ++ v := reflect.New(reflect.TypeOf(tt.value)) ++ if err := unmarshal([]byte(tt.xml), v.Interface()); err != nil { ++ t.Fatalf("unmarshal error: %v", err) ++ } ++ ++ v = v.Elem() ++ ++ if v.Kind() == reflect.Slice { ++ vv := reflect.ValueOf(tt.value) ++ if vv.Len() != v.Len() { ++ t.Fatalf("unmarshal error:\nexpected: %v\n got: %v", tt.value, v.Interface()) ++ } ++ for i := 0; i < v.Len(); i++ { ++ if v.Index(i).Interface() != vv.Index(i).Interface() { ++ t.Fatalf("unmarshal error:\nexpected: %v\n got: %v", tt.value, v.Interface()) ++ } ++ } ++ } else { ++ a1 := v.Interface() ++ a2 := interface{}(tt.value) ++ ++ if !reflect.DeepEqual(a1, a2) { ++ t.Fatalf("unmarshal error:\nexpected: %v\n got: %v", tt.value, v.Interface()) ++ } ++ } ++ } ++} ++ ++func Test_unmarshalToNil(t *testing.T) { ++ for _, tt := range unmarshalTests { ++ if err := unmarshal([]byte(tt.xml), tt.ptr); err != nil { ++ t.Fatalf("unmarshal error: %v", err) ++ } ++ } ++} ++ ++func Test_typeMismatchError(t *testing.T) { ++ var s string ++ ++ encoded := "100" ++ var err error ++ ++ if err = unmarshal([]byte(encoded), &s); err == nil { ++ t.Fatal("unmarshal error: expected error, but didn't get it") ++ } ++ ++ if _, ok := err.(TypeMismatchError); !ok { ++ t.Fatal("unmarshal error: expected type mistmatch error, but didn't get it") ++ } ++} ++ ++func Test_unmarshalEmptyValueTag(t *testing.T) { ++ var v int ++ ++ if err := unmarshal([]byte(""), &v); err != nil { ++ t.Fatalf("unmarshal error: %v", err) ++ } ++} ++ ++const structEmptyXML = ` ++ ++ ++ ++ ++` ++ ++func Test_unmarshalEmptyStruct(t *testing.T) { ++ var v interface{} ++ if err := unmarshal([]byte(structEmptyXML), &v); err != nil { ++ t.Fatal(err) ++ } ++ if v == nil { ++ t.Fatalf("got nil map") ++ } ++} ++ ++const arrayValueXML = ` ++ ++ ++ ++ 234 ++ 1 ++ Hello World ++ Extra Value ++ ++ ++ ++` ++ ++func Test_unmarshalExistingArray(t *testing.T) { ++ ++ var ( ++ v1 int ++ v2 bool ++ v3 string ++ ++ v = []interface{}{&v1, &v2, &v3} ++ ) ++ if err := unmarshal([]byte(arrayValueXML), &v); err != nil { ++ t.Fatal(err) ++ } ++ ++ // check pre-existing values ++ if want := 234; v1 != want { ++ t.Fatalf("want %d, got %d", want, v1) ++ } ++ if want := true; v2 != want { ++ t.Fatalf("want %t, got %t", want, v2) ++ } ++ if want := "Hello World"; v3 != want { ++ t.Fatalf("want %s, got %s", want, v3) ++ } ++ // check the appended result ++ if n := len(v); n != 4 { ++ t.Fatalf("missing appended result") ++ } ++ if got, ok := v[3].(string); !ok || got != "Extra Value" { ++ t.Fatalf("got %s, want %s", got, "Extra Value") ++ } ++} ++ ++func Test_decodeNonUTF8Response(t *testing.T) { ++ data, err := ioutil.ReadFile("fixtures/cp1251.xml") ++ if err != nil { ++ t.Fatal(err) ++ } ++ ++ CharsetReader = decode ++ ++ var s string ++ if err = unmarshal(data, &s); err != nil { ++ fmt.Println(err) ++ t.Fatal("unmarshal error: cannot decode non utf-8 response") ++ } ++ ++ expected := "Л.Н. Толстой - Война и Мир" ++ ++ if s != expected { ++ t.Fatalf("unmarshal error:\nexpected: %v\n got: %v", expected, s) ++ } ++ ++ CharsetReader = nil ++} ++ ++func decode(charset string, input io.Reader) (io.Reader, error) { ++ if charset != "cp1251" { ++ return nil, fmt.Errorf("unsupported charset") ++ } ++ ++ return transform.NewReader(input, charmap.Windows1251.NewDecoder()), nil ++} +diff --git a/vendor/github.com/kolo/xmlrpc/encoder.go b/vendor/github.com/kolo/xmlrpc/encoder.go +new file mode 100644 +index 00000000..d585a7d3 +--- /dev/null ++++ b/vendor/github.com/kolo/xmlrpc/encoder.go +@@ -0,0 +1,171 @@ ++package xmlrpc ++ ++import ( ++ "bytes" ++ "encoding/xml" ++ "fmt" ++ "reflect" ++ "sort" ++ "strconv" ++ "time" ++) ++ ++type encodeFunc func(reflect.Value) ([]byte, error) ++ ++func marshal(v interface{}) ([]byte, error) { ++ if v == nil { ++ return []byte{}, nil ++ } ++ ++ val := reflect.ValueOf(v) ++ return encodeValue(val) ++} ++ ++func encodeValue(val reflect.Value) ([]byte, error) { ++ var b []byte ++ var err error ++ ++ if val.Kind() == reflect.Ptr || val.Kind() == reflect.Interface { ++ if val.IsNil() { ++ return []byte(""), nil ++ } ++ ++ val = val.Elem() ++ } ++ ++ switch val.Kind() { ++ case reflect.Struct: ++ switch val.Interface().(type) { ++ case time.Time: ++ t := val.Interface().(time.Time) ++ b = []byte(fmt.Sprintf("%s", t.Format(iso8601))) ++ default: ++ b, err = encodeStruct(val) ++ } ++ case reflect.Map: ++ b, err = encodeMap(val) ++ case reflect.Slice: ++ b, err = encodeSlice(val) ++ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: ++ b = []byte(fmt.Sprintf("%s", strconv.FormatInt(val.Int(), 10))) ++ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: ++ b = []byte(fmt.Sprintf("%s", strconv.FormatUint(val.Uint(), 10))) ++ case reflect.Float32, reflect.Float64: ++ b = []byte(fmt.Sprintf("%s", ++ strconv.FormatFloat(val.Float(), 'f', -1, val.Type().Bits()))) ++ case reflect.Bool: ++ if val.Bool() { ++ b = []byte("1") ++ } else { ++ b = []byte("0") ++ } ++ case reflect.String: ++ var buf bytes.Buffer ++ ++ xml.Escape(&buf, []byte(val.String())) ++ ++ if _, ok := val.Interface().(Base64); ok { ++ b = []byte(fmt.Sprintf("%s", buf.String())) ++ } else { ++ b = []byte(fmt.Sprintf("%s", buf.String())) ++ } ++ default: ++ return nil, fmt.Errorf("xmlrpc encode error: unsupported type") ++ } ++ ++ if err != nil { ++ return nil, err ++ } ++ ++ return []byte(fmt.Sprintf("%s", string(b))), nil ++} ++ ++func encodeStruct(val reflect.Value) ([]byte, error) { ++ var b bytes.Buffer ++ ++ b.WriteString("") ++ ++ t := val.Type() ++ for i := 0; i < t.NumField(); i++ { ++ b.WriteString("") ++ f := t.Field(i) ++ ++ name := f.Tag.Get("xmlrpc") ++ if name == "" { ++ name = f.Name ++ } ++ b.WriteString(fmt.Sprintf("%s", name)) ++ ++ p, err := encodeValue(val.FieldByName(f.Name)) ++ if err != nil { ++ return nil, err ++ } ++ b.Write(p) ++ ++ b.WriteString("") ++ } ++ ++ b.WriteString("") ++ ++ return b.Bytes(), nil ++} ++ ++var sortMapKeys bool ++ ++func encodeMap(val reflect.Value) ([]byte, error) { ++ var t = val.Type() ++ ++ if t.Key().Kind() != reflect.String { ++ return nil, fmt.Errorf("xmlrpc encode error: only maps with string keys are supported") ++ } ++ ++ var b bytes.Buffer ++ ++ b.WriteString("") ++ ++ keys := val.MapKeys() ++ ++ if sortMapKeys { ++ sort.Slice(keys, func(i, j int) bool { return keys[i].String() < keys[j].String() }) ++ } ++ ++ for i := 0; i < val.Len(); i++ { ++ key := keys[i] ++ kval := val.MapIndex(key) ++ ++ b.WriteString("") ++ b.WriteString(fmt.Sprintf("%s", key.String())) ++ ++ p, err := encodeValue(kval) ++ ++ if err != nil { ++ return nil, err ++ } ++ ++ b.Write(p) ++ b.WriteString("") ++ } ++ ++ b.WriteString("") ++ ++ return b.Bytes(), nil ++} ++ ++func encodeSlice(val reflect.Value) ([]byte, error) { ++ var b bytes.Buffer ++ ++ b.WriteString("") ++ ++ for i := 0; i < val.Len(); i++ { ++ p, err := encodeValue(val.Index(i)) ++ if err != nil { ++ return nil, err ++ } ++ ++ b.Write(p) ++ } ++ ++ b.WriteString("") ++ ++ return b.Bytes(), nil ++} +diff --git a/vendor/github.com/kolo/xmlrpc/encoder_test.go b/vendor/github.com/kolo/xmlrpc/encoder_test.go +new file mode 100644 +index 00000000..ca4ac706 +--- /dev/null ++++ b/vendor/github.com/kolo/xmlrpc/encoder_test.go +@@ -0,0 +1,58 @@ ++package xmlrpc ++ ++import ( ++ "testing" ++ "time" ++) ++ ++var marshalTests = []struct { ++ value interface{} ++ xml string ++}{ ++ {100, "100"}, ++ {"Once upon a time", "Once upon a time"}, ++ {"Mike & Mick ", "Mike & Mick <London, UK>"}, ++ {Base64("T25jZSB1cG9uIGEgdGltZQ=="), "T25jZSB1cG9uIGEgdGltZQ=="}, ++ {true, "1"}, ++ {false, "0"}, ++ {12.134, "12.134"}, ++ {-12.134, "-12.134"}, ++ {738777323.0, "738777323"}, ++ {time.Unix(1386622812, 0).UTC(), "20131209T21:00:12"}, ++ {[]interface{}{1, "one"}, "1one"}, ++ {&struct { ++ Title string ++ Amount int ++ }{"War and Piece", 20}, "TitleWar and PieceAmount20"}, ++ {&struct { ++ Value interface{} `xmlrpc:"value"` ++ }{}, "value"}, ++ { ++ map[string]interface{}{"title": "War and Piece", "amount": 20}, ++ "amount20titleWar and Piece", ++ }, ++ { ++ map[string]interface{}{ ++ "Name": "John Smith", ++ "Age": 6, ++ "Wight": []float32{66.67, 100.5}, ++ "Dates": map[string]interface{}{"Birth": time.Date(1829, time.November, 10, 23, 0, 0, 0, time.UTC), "Death": time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC)}}, ++ "Age6DatesBirth18291110T23:00:00Death20091110T23:00:00NameJohn SmithWight66.67100.5", ++ }, ++} ++ ++func Test_marshal(t *testing.T) { ++ sortMapKeys = true ++ ++ for _, tt := range marshalTests { ++ b, err := marshal(tt.value) ++ if err != nil { ++ t.Fatalf("unexpected marshal error: %v", err) ++ } ++ ++ if string(b) != tt.xml { ++ t.Fatalf("marshal error:\nexpected: %s\n got: %s", tt.xml, string(b)) ++ } ++ ++ } ++} +diff --git a/vendor/github.com/kolo/xmlrpc/fixtures/cp1251.xml b/vendor/github.com/kolo/xmlrpc/fixtures/cp1251.xml +new file mode 100644 +index 00000000..1d5e9bfc +--- /dev/null ++++ b/vendor/github.com/kolo/xmlrpc/fixtures/cp1251.xml +@@ -0,0 +1,6 @@ ++ ++ ++ ++ .. - ++ ++ +\ No newline at end of file +diff --git a/vendor/github.com/kolo/xmlrpc/request.go b/vendor/github.com/kolo/xmlrpc/request.go +new file mode 100644 +index 00000000..acb8251b +--- /dev/null ++++ b/vendor/github.com/kolo/xmlrpc/request.go +@@ -0,0 +1,57 @@ ++package xmlrpc ++ ++import ( ++ "bytes" ++ "fmt" ++ "net/http" ++) ++ ++func NewRequest(url string, method string, args interface{}) (*http.Request, error) { ++ var t []interface{} ++ var ok bool ++ if t, ok = args.([]interface{}); !ok { ++ if args != nil { ++ t = []interface{}{args} ++ } ++ } ++ ++ body, err := EncodeMethodCall(method, t...) ++ if err != nil { ++ return nil, err ++ } ++ ++ request, err := http.NewRequest("POST", url, bytes.NewReader(body)) ++ if err != nil { ++ return nil, err ++ } ++ ++ request.Header.Set("Content-Type", "text/xml") ++ request.Header.Set("Content-Length", fmt.Sprintf("%d", len(body))) ++ ++ return request, nil ++} ++ ++func EncodeMethodCall(method string, args ...interface{}) ([]byte, error) { ++ var b bytes.Buffer ++ b.WriteString(``) ++ b.WriteString(fmt.Sprintf("%s", method)) ++ ++ if args != nil { ++ b.WriteString("") ++ ++ for _, arg := range args { ++ p, err := marshal(arg) ++ if err != nil { ++ return nil, err ++ } ++ ++ b.WriteString(fmt.Sprintf("%s", string(p))) ++ } ++ ++ b.WriteString("") ++ } ++ ++ b.WriteString("") ++ ++ return b.Bytes(), nil ++} +diff --git a/vendor/github.com/kolo/xmlrpc/response.go b/vendor/github.com/kolo/xmlrpc/response.go +new file mode 100644 +index 00000000..6742a1c7 +--- /dev/null ++++ b/vendor/github.com/kolo/xmlrpc/response.go +@@ -0,0 +1,52 @@ ++package xmlrpc ++ ++import ( ++ "regexp" ++) ++ ++var ( ++ faultRx = regexp.MustCompile(`(\s|\S)+`) ++) ++ ++type failedResponse struct { ++ Code int `xmlrpc:"faultCode"` ++ Error string `xmlrpc:"faultString"` ++} ++ ++func (r *failedResponse) err() error { ++ return &xmlrpcError{ ++ code: r.Code, ++ err: r.Error, ++ } ++} ++ ++type Response struct { ++ data []byte ++} ++ ++func NewResponse(data []byte) *Response { ++ return &Response{ ++ data: data, ++ } ++} ++ ++func (r *Response) Failed() bool { ++ return faultRx.Match(r.data) ++} ++ ++func (r *Response) Err() error { ++ failedResp := new(failedResponse) ++ if err := unmarshal(r.data, failedResp); err != nil { ++ return err ++ } ++ ++ return failedResp.err() ++} ++ ++func (r *Response) Unmarshal(v interface{}) error { ++ if err := unmarshal(r.data, v); err != nil { ++ return err ++ } ++ ++ return nil ++} +diff --git a/vendor/github.com/kolo/xmlrpc/response_test.go b/vendor/github.com/kolo/xmlrpc/response_test.go +new file mode 100644 +index 00000000..55095c24 +--- /dev/null ++++ b/vendor/github.com/kolo/xmlrpc/response_test.go +@@ -0,0 +1,84 @@ ++package xmlrpc ++ ++import ( ++ "testing" ++) ++ ++const faultRespXml = ` ++ ++ ++ ++ ++ ++ ++ faultString ++ ++ You must log in before using this part of Bugzilla. ++ ++ ++ ++ faultCode ++ ++ 410 ++ ++ ++ ++ ++ ++` ++ ++func Test_failedResponse(t *testing.T) { ++ resp := NewResponse([]byte(faultRespXml)) ++ ++ if !resp.Failed() { ++ t.Fatal("Failed() error: expected true, got false") ++ } ++ ++ if resp.Err() == nil { ++ t.Fatal("Err() error: expected error, got nil") ++ } ++ ++ err := resp.Err().(*xmlrpcError) ++ if err.code != 410 && err.err != "You must log in before using this part of Bugzilla." { ++ t.Fatal("Err() error: got wrong error") ++ } ++} ++ ++const emptyValResp = ` ++ ++ ++ ++ ++ ++ ++ ++ user ++ Joe Smith ++ ++ ++ token ++ ++ ++ ++ ++ ++ ++` ++ ++ ++func Test_responseWithEmptyValue(t *testing.T) { ++ resp := NewResponse([]byte(emptyValResp)) ++ ++ result := struct{ ++ User string `xmlrpc:"user"` ++ Token string `xmlrpc:"token"` ++ }{} ++ ++ if err := resp.Unmarshal(&result); err != nil { ++ t.Fatalf("unmarshal error: %v", err) ++ } ++ ++ if result.User != "Joe Smith" || result.Token != "" { ++ t.Fatalf("unexpected result: %v", result) ++ } ++} +diff --git a/vendor/github.com/kolo/xmlrpc/test_server.rb b/vendor/github.com/kolo/xmlrpc/test_server.rb +new file mode 100644 +index 00000000..1b1ff876 +--- /dev/null ++++ b/vendor/github.com/kolo/xmlrpc/test_server.rb +@@ -0,0 +1,25 @@ ++# encoding: utf-8 ++ ++require "xmlrpc/server" ++ ++class Service ++ def time ++ Time.now ++ end ++ ++ def upcase(s) ++ s.upcase ++ end ++ ++ def sum(x, y) ++ x + y ++ end ++ ++ def error ++ raise XMLRPC::FaultException.new(500, "Server error") ++ end ++end ++ ++server = XMLRPC::Server.new 5001, 'localhost' ++server.add_handler "service", Service.new ++server.serve +diff --git a/vendor/github.com/kolo/xmlrpc/xmlrpc.go b/vendor/github.com/kolo/xmlrpc/xmlrpc.go +new file mode 100644 +index 00000000..8766403a +--- /dev/null ++++ b/vendor/github.com/kolo/xmlrpc/xmlrpc.go +@@ -0,0 +1,19 @@ ++package xmlrpc ++ ++import ( ++ "fmt" ++) ++ ++// xmlrpcError represents errors returned on xmlrpc request. ++type xmlrpcError struct { ++ code int ++ err string ++} ++ ++// Error() method implements Error interface ++func (e *xmlrpcError) Error() string { ++ return fmt.Sprintf("error: \"%s\" code: %d", e.err, e.code) ++} ++ ++// Base64 represents value in base64 encoding ++type Base64 string +-- +2.16.4 + diff --git a/golang-github-prometheus-prometheus.changes b/golang-github-prometheus-prometheus.changes index 3778109..978179c 100644 --- a/golang-github-prometheus-prometheus.changes +++ b/golang-github-prometheus-prometheus.changes @@ -1,3 +1,9 @@ +------------------------------------------------------------------- +Thu Jul 25 16:34:29 UTC 2019 - Joao Cavalheiro + +- Add support for Uyuni/SUSE Manager service discovery + + Added 0003-Add-Uyuni-service-discovery + ------------------------------------------------------------------- Thu Jul 18 01:06:13 UTC 2019 - Simon Crute diff --git a/golang-github-prometheus-prometheus.spec b/golang-github-prometheus-prometheus.spec index ebcff3d..517e916 100644 --- a/golang-github-prometheus-prometheus.spec +++ b/golang-github-prometheus-prometheus.spec @@ -32,6 +32,8 @@ Source2: prometheus.yml Patch1: 0001-Do-not-force-the-pure-Go-name-resolver.patch # Lifted from Debian's prometheus package Patch2: 0002-Default-settings.patch +# Uyuni service discovery support +Patch3: 0003-Add-Uyuni-service-discovery.patch %ifarch aarch64 # For some reason the aarch64 build fails with: # + promu build @@ -69,6 +71,7 @@ Prometheus's main features are: %setup -q -n prometheus-%{version} %patch1 -p 1 %patch2 -p 1 +%patch3 -p 1 %build %goprep github.com/prometheus/prometheus From e4a9cf08d9d5d11d132f09fb5faf84147b729f3bf04b16d11199a01ce92ac53c Mon Sep 17 00:00:00 2001 From: Jan Fajerski Date: Fri, 2 Aug 2019 11:20:08 +0000 Subject: [PATCH 2/3] Accepting request 720580 from home:gladiac:branches:server:monitoring Uses $ARGS in sysconfig and service file now. OBS-URL: https://build.opensuse.org/request/show/720580 OBS-URL: https://build.opensuse.org/package/show/server:monitoring/golang-github-prometheus-prometheus?expand=0&rev=16 --- golang-github-prometheus-prometheus.changes | 14 ++++ golang-github-prometheus-prometheus.spec | 80 +++++++++++++-------- prometheus-buildmode-pie.patch | 13 ++++ prometheus.firewall.xml | 6 ++ prometheus.sysconfig | 9 +++ 5 files changed, 91 insertions(+), 31 deletions(-) create mode 100644 prometheus-buildmode-pie.patch create mode 100644 prometheus.firewall.xml create mode 100644 prometheus.sysconfig diff --git a/golang-github-prometheus-prometheus.changes b/golang-github-prometheus-prometheus.changes index 978179c..02713c0 100644 --- a/golang-github-prometheus-prometheus.changes +++ b/golang-github-prometheus-prometheus.changes @@ -1,3 +1,17 @@ +------------------------------------------------------------------- +Wed Jul 31 10:46:13 UTC 2019 - Andreas Schneider + +- Build with PIE + + Added prometheus-buildmode-pie.patch + +------------------------------------------------------------------- +Wed Jul 31 06:47:55 UTC 2019 - Andreas Schneider + +- Only package required files (reduces rpm size by 4 MB) +- Add sysconfig file +- Add firewall config file +- Use variables for defining user and group + ------------------------------------------------------------------- Thu Jul 25 16:34:29 UTC 2019 - Joao Cavalheiro diff --git a/golang-github-prometheus-prometheus.spec b/golang-github-prometheus-prometheus.spec index 517e916..a8d4977 100644 --- a/golang-github-prometheus-prometheus.spec +++ b/golang-github-prometheus-prometheus.spec @@ -17,6 +17,9 @@ # +%global prometheus_user prometheus +%global prometheus_group %{prometheus_user} + %{go_nostrip} Name: golang-github-prometheus-prometheus @@ -29,25 +32,19 @@ Url: https://prometheus.io/ Source: prometheus-%{version}.tar.xz Source1: prometheus.service Source2: prometheus.yml +Source3: prometheus.sysconfig +Source4: prometheus.firewall.xml Patch1: 0001-Do-not-force-the-pure-Go-name-resolver.patch # Lifted from Debian's prometheus package Patch2: 0002-Default-settings.patch # Uyuni service discovery support Patch3: 0003-Add-Uyuni-service-discovery.patch -%ifarch aarch64 -# For some reason the aarch64 build fails with: -# + promu build -# > prometheus -# # github.com/prometheus/prometheus/cmd/prometheus -# /usr/lib64/go/pkg/tool/linux_arm64/link: running gcc failed: exit status 1 -# /usr/lib64/gcc/aarch64-suse-linux/4.8/../../../../aarch64-suse-linux/bin/ld: cannot find -lpthread -# /usr/lib64/gcc/aarch64-suse-linux/4.8/../../../../aarch64-suse-linux/bin/ld: cannot find -lc -# collect2: error: ld returned 1 exit status -# Adding glibc-devel-static fixes it, but it's odd that this isn't -# also a problem on x86_64. -BuildRequires: glibc-devel-static -%endif +# Add -buildmode=pie +Patch4: prometheus-buildmode-pie.patch BuildRequires: fdupes +# Adding glibc-devel-static seems to be required for linking if building +# with -buildmode=pie +BuildRequires: glibc-devel-static BuildRequires: golang-github-prometheus-promu BuildRequires: golang-packaging BuildRequires: xz @@ -55,6 +52,7 @@ BuildRequires: golang(API) >= 1.12 BuildRoot: %{_tmppath}/%{name}-%{version}-build %{?systemd_requires} Requires(pre): shadow +Requires(post): %fillup_prereq %{go_provides} %description @@ -68,35 +66,46 @@ Prometheus's main features are: - multiple modes of graphing and dashboarding support %prep -%setup -q -n prometheus-%{version} -%patch1 -p 1 -%patch2 -p 1 -%patch3 -p 1 +%autosetup -p1 -n prometheus-%{version} %build %goprep github.com/prometheus/prometheus GOPATH=%{_builddir}/go promu build %install -%goinstall install -D -m0755 %{_builddir}/prometheus-%{version}/prometheus %{buildroot}/%{_bindir}/prometheus install -D -m0755 %{_builddir}/prometheus-%{version}/promtool %{buildroot}/%{_bindir}/promtool -%gosrc -install -D -m 0644 %{SOURCE1} %{buildroot}%{_unitdir}/prometheus.service -install -Dd -m 0755 %{buildroot}%{_sbindir} +install -m 0755 -d %{buildroot}%{_datarootdir}/prometheus +cp -fr console_libraries/ consoles/ %{buildroot}%{_datarootdir}/prometheus + +install -m 0755 -d %{buildroot}%{_unitdir} +install -m 0644 %{SOURCE1} %{buildroot}%{_unitdir}/prometheus.service + +install -d -m 0755 %{buildroot}%{_sbindir} ln -s /usr/sbin/service %{buildroot}%{_sbindir}/rcprometheus -install -D -m 0644 %{SOURCE2} %{buildroot}%{_sysconfdir}/prometheus/prometheus.yml -install -Dd -m 0750 %{buildroot}%{_localstatedir}/lib/prometheus -install -Dd -m 0750 %{buildroot}%{_localstatedir}/lib/prometheus/metrics -%gofilelist -%fdupes %{buildroot}/%{_prefix} + +install -d -m 0755 %{buildroot}%{_sysconfdir}/prometheus +install -m 0644 %{SOURCE2} %{buildroot}%{_sysconfdir}/prometheus/prometheus.yml + +install -m 0755 -d %{buildroot}%{_fillupdir} +install -m 0644 %{SOURCE3} %{buildroot}%{_fillupdir}/sysconfig.prometheus + +install -m 0755 -d %{buildroot}%{_libdir}/firewalld/services/ +install -m 0644 %{SOURCE4} %{buildroot}%{_libdir}/firewalld/services/prometheus.xml + +install -d -m 0755 %{buildroot}%{_sharedstatedir}/prometheus +install -d -m 0755 %{buildroot}%{_sharedstatedir}/prometheus/data +install -d -m 0755 %{buildroot}%{_sharedstatedir}/prometheus/metrics + +%fdupes %{buildroot}/%{_datarootdir} %pre +getent group %{prometheus_group} >/dev/null || %{_sbindir}/groupadd -r %{prometheus_group} +getent passwd %{prometheus_user} >/dev/null || %{_sbindir}/useradd -r -g %{prometheus_group} -d %{_localstatedir}/lib/prometheus -s /sbin/nologin %{prometheus_user} %service_add_pre prometheus.service -getent group prometheus >/dev/null || %{_sbindir}/groupadd -r prometheus -getent passwd prometheus >/dev/null || %{_sbindir}/useradd -r -g prometheus -d %{_localstatedir}/lib/prometheus -s /sbin/nologin prometheus %post +%fillup_only -n prometheus %service_add_post prometheus.service %preun @@ -105,7 +114,10 @@ getent passwd prometheus >/dev/null || %{_sbindir}/useradd -r -g prometheus -d % %postun %service_del_postun prometheus.service -%files -f file.lst +%verifyscript +%fillup_only -n prometheus + +%files %defattr(-,root,root,-) %doc README.md %license LICENSE @@ -113,9 +125,15 @@ getent passwd prometheus >/dev/null || %{_sbindir}/useradd -r -g prometheus -d % %{_bindir}/promtool %{_unitdir}/prometheus.service %{_sbindir}/rcprometheus -%dir %attr(0750, prometheus, prometheus) %{_localstatedir}/lib/prometheus -%dir %attr(0750, prometheus, prometheus) %{_localstatedir}/lib/prometheus/metrics +%{_datarootdir}/prometheus +%{_fillupdir}/sysconfig.prometheus +%dir %attr(0700,%{prometheus_user},%{prometheus_group}) %{_sharedstatedir}/prometheus +%dir %attr(0700,%{prometheus_user},%{prometheus_group}) %{_sharedstatedir}/prometheus/data +%dir %attr(0700,%{prometheus_user},%{prometheus_group}) %{_sharedstatedir}/prometheus/metrics %dir %{_sysconfdir}/prometheus %config(noreplace) %{_sysconfdir}/prometheus/prometheus.yml +%dir %{_libdir}/firewalld +%dir %{_libdir}/firewalld/services +%{_libdir}/firewalld/services/prometheus.xml %changelog diff --git a/prometheus-buildmode-pie.patch b/prometheus-buildmode-pie.patch new file mode 100644 index 0000000..685b0ed --- /dev/null +++ b/prometheus-buildmode-pie.patch @@ -0,0 +1,13 @@ +Index: prometheus-2.11.1/.promu.yml +=================================================================== +--- prometheus-2.11.1.orig/.promu.yml 2019-07-31 12:44:46.190247145 +0200 ++++ prometheus-2.11.1/.promu.yml 2019-07-31 12:45:18.354547518 +0200 +@@ -10,7 +10,7 @@ build: + path: ./cmd/prometheus + - name: promtool + path: ./cmd/promtool +- flags: -mod=vendor -a ++ flags: -mod=vendor -buildmode=pie -a + ldflags: | + -X github.com/prometheus/common/version.Version={{.Version}} + -X github.com/prometheus/common/version.Revision={{.Revision}} diff --git a/prometheus.firewall.xml b/prometheus.firewall.xml new file mode 100644 index 0000000..c9c2b50 --- /dev/null +++ b/prometheus.firewall.xml @@ -0,0 +1,6 @@ + + + Prometheus + Prometheus monitoring system and time series database. + + diff --git a/prometheus.sysconfig b/prometheus.sysconfig new file mode 100644 index 0000000..b26fb5b --- /dev/null +++ b/prometheus.sysconfig @@ -0,0 +1,9 @@ +## Path: +## Description: Prometheus monitoring server settings +## Type: string +## Default: "" +## ServiceRestart: prometheus +# +# Command line options for prometheus +# +ARGS="" From 2f88fef4ba207f7d80678a136edf8b30c6f06e38b80f7bb022bf02ef3269a617 Mon Sep 17 00:00:00 2001 From: Tim Serong Date: Wed, 7 Aug 2019 08:15:50 +0000 Subject: [PATCH 3/3] Accepting request 720641 from home:jfajerski:branches:server:monitoring - Add network-online (Wants and After) dependency to systemd unit bsc#1143913 - Build with PIE + Added 0004-prometheus-buildmode-pie.patch - Only package required files (reduces rpm size by 4 MB) - Add sysconfig file - Add firewall config file - Use variables for defining user and group - Add support for Uyuni/SUSE Manager service discovery + Added 0003-Add-Uyuni-service-discovery.patch - readded _service file removed in error. - Update to 2.11.1 + Bug Fix: * Fix potential panic when prometheus is watching multiple zookeeper paths. - Update to 2.11.0 + Bug Fix: * resolve race condition in maxGauge. * Fix ZooKeeper connection leak. * Improved atomicity of .tmp block replacement during compaction for usual case. * Fix "unknown series references" after clean shutdown. * Re-calculate block size when calling block.Delete. * Fix unsafe snapshots with head block. * prometheus_tsdb_compactions_failed_total is now incremented on any compaction failure. + Changes: * Remove max_retries from queue_config (it has been unused since rewriting remote-write to utilize the write-ahead-log) * The meta file BlockStats no longer holds size information. This is now dynamically calculated and kept in memory. It also includes the meta file size which was not included before * Renamed metric from prometheus_tsdb_wal_reader_corruption_errors to prometheus_tsdb_wal_reader_corruption_errors_total + Features: * Add option to use Alertmanager API v2. * Added humanizePercentage function for templates. * Include InitContainers in Kubernetes Service Discovery. * Provide option to compress WAL records using Snappy. + Enhancements: * Create new clean segment when starting the WAL. * Reduce allocations in PromQL aggregations. * Add storage warnings to LabelValues and LabelNames API results. * Add prometheus_http_requests_total metric. * Enable openbsd/arm build. * Remote-write allocation improvements. * Query performance improvement: Efficient iteration and search in HashForLabels and HashWithoutLabels. * Allow injection of arbitrary headers in promtool. * Allow passing external_labels in alert unit tests groups. * Allows globs for rules when unit testing. * Improved postings intersection matching. * Reduced disk usage for WAL for small setups. * Optimize queries using regexp for set lookups. - rebase patch002-Default-settings.patch - Update to 2.10.0: + Bug Fixes: * TSDB: Don't panic when running out of disk space and recover nicely from the condition * TSDB: Correctly handle empty labels. * TSDB: Don't crash on an unknown tombstone reference. * Storage/remote: Remove queue-manager specific metrics if queue no longer exists. * PromQL: Correctly display {__name__="a"}. * Discovery/kubernetes: Use service rather than ingress as the name for the service workqueue. * Discovery/azure: Don't panic on a VM with a public IP. * Web: Fixed Content-Type for js and css instead of using /etc/mime.types. * API: Encode alert values as string to correctly represent Inf/NaN. + Features: * Template expansion: Make external labels available as $externalLabels in alert and console template expansion. * TSDB: Add prometheus_tsdb_wal_segment_current metric for the WAL segment index that TSDB is currently writing to. tsdb * Scrape: Add scrape_series_added per-scrape metric. #5546 + Enhancements * Discovery/kubernetes: Add labels __meta_kubernetes_endpoint_node_name and __meta_kubernetes_endpoint_hostname. * Discovery/azure: Add label __meta_azure_machine_public_ip. * TSDB: Simplify mergedPostings.Seek, resulting in better performance if there are many posting lists. tsdb * Log filesystem type on startup. * Cmd/promtool: Use POST requests for Query and QueryRange. client_golang * Web: Sort alerts by group name. * Console templates: Add convenience variables $rawParams, $params, $path. - Upadte to 2.9.2 + Bug Fixes: * Make sure subquery range is taken into account for selection * Exhaust every request body before closing it * Cmd/promtool: return errors from rule evaluations * Remote Storage: string interner should not panic in release * Fix memory allocation regression in mergedPostings.Seek tsdb - Update to 2.9.1 + Bug Fixes: * Discovery/kubernetes: fix missing label sanitization * Remote_write: Prevent reshard concurrent with calling stop - Update to 2.9.0 + Feature: * Add honor_timestamps scrape option. + Enhancements: * Update Consul to support catalog.ServiceMultipleTags. * Discovery/kubernetes: add present labels for labels/annotations. * OpenStack SD: Add ProjectID and UserID meta labels. * Add GODEBUG and retention to the runtime page. * Add support for POSTing to /series endpoint. * Support PUT methods for Lifecycle and Admin APIs. * Scrape: Add global jitter for HA server. * Check for cancellation on every step of a range evaluation. * String interning for labels & values in the remote_write path. * Don't lose the scrape cache on a failed scrape. * Reload cert files from disk automatically. common * Use fixed length millisecond timestamp format for logs. common * Performance improvements for postings. Bug Fixes: * Remote Write: fix checkpoint reading. * Check if label value is valid when unmarshaling external labels from YAML. * Promparse: sort all labels when parsing. * Reload rules: copy state on both name and labels. * Exponentation operator to drop metric name in result of operation. * Config: resolve more file paths. * Promtool: resolve relative paths in alert test files. * Set TLSHandshakeTimeout in HTTP transport. common * Use fsync to be more resilient to machine crashes. * Keep series that are still in WAL in checkpoints. - Update to 2.8.1 + Bug Fixes * Display the job labels in /targets which was removed accidentally - Update to 2.8.0 + Change: * This release uses Write-Ahead Logging (WAL) for the remote_write API. This currently causes a slight increase in memory usage, which will be addressed in future releases. * Default time retention is used only when no size based retention is specified. These are flags where time retention is specified by the flag --storage.tsdb.retention and size retention by --storage.tsdb.retention.size. * prometheus_tsdb_storage_blocks_bytes_total is now prometheus_tsdb_storage_blocks_bytes. + Feature: * (EXPERIMENTAL) Time overlapping blocks are now allowed; vertical compaction and vertical query merge. It is an optional feature which is controlled by the --storage.tsdb.allow-overlapping-blocks flag, disabled by default. + Enhancements: * Use the WAL for remote_write API. * Query performance improvements. * UI enhancements with upgrade to Bootstrap 4. * Reduce time that Alertmanagers are in flux when reloaded. * Limit number of metrics displayed on UI to 10000. * (1) Remember All/Unhealthy choice on target-overview when reloading page. (2) Resize text-input area on Graph page on mouseclick. * In histogram_quantile merge buckets with equivalent le values. * Show list of offending labels in the error message in many-to-many scenarios. * Show Storage Retention criteria in effect on /status page. + Bug Fixes: + Fix sorting of rule groups. + Fix support for password_file and bearer_token_file in Kubernetes SD. + Scrape: catch errors when creating HTTP clients + Adds new metrics: prometheus_target_scrape_pools_total prometheus_target_scrape_pools_failed_total prometheus_target_scrape_pool_reloads_total prometheus_target_scrape_pool_reloads_failed_total + Fix panic when aggregator param is not a literal. - fix spec file: actually ship promtool - Update to 2.7.1: + Bug Fixes: * Fix a Stored DOM XSS vulnerability with query history (boo#1124610) * prometheus_rule_group_last_duration_seconds now reports seconds instead of nanoseconds * Make sure the targets are consistently sorted in the targets page - Update to 2.7.0: + cli flag depreacted: storage.tsdb.retention use storage.tsdb.retention.time instead; depreacted flag will be removed in 3.0 + Features: * Add subqueries to PromQL * Add support for disk size based retention. Note that we don't consider the WAL size which could be significant and the time based retention policy also applies (experimental) * Add CORS origin flag + Bug Fixes: * Don't depend on given order when comparing samples in alert unit testing * Make sure the retention period doesn't overflow * Don't generate blocks with no samples - Update to 2.6.0: + Remove default flags from the container's entrypoint, run Prometheus from /etc/prometheus and symlink the storage directory to /etc/prometheus/data + Promtool: Remove the update command + Features: * Add JSON log format via the --log.format flag * API: Add /api/v1/labels endpoint to get all label names * Web: Allow setting the page's title via the --web.ui-title flag + Enhancements: * Add prometheus_tsdb_lowest_timestamp_seconds, prometheus_tsdb_head_min_time_seconds and prometheus_tsdb_head_max_time_seconds metrics * Add rule_group_last_evaluation_timestamp_seconds metric * Add prometheus_template_text_expansion_failures_total and prometheus_template_text_expansions_total metrics * Set consistent User-Agent header in outgoing requests * Azure SD: Error out at load time when authentication parameters are missing * EC2 SD: Add the machine's private DNS name to the discovery metadata * EC2 SD: Add the operating system's platform to the discovery metadata * Kubernetes SD: Add the pod's phase to the discovery metadata * Kubernetes SD: Log Kubernetes messages * Promtool: Collect CPU and trace profiles * Promtool: Support writing output as JSON * Remote Read: Return available data if remote read fails partially * Remote Write: Improve queue performance * Remote Write: Add min_shards parameter to set the minimum number of shards * TSDB: Improve WAL reading * TSDB: Memory improvements * Web: Log stack traces on panic * Web UI: Add copy to clipboard button for configuration * Web UI: Support console queries at specific times * Web UI: group targets by job then instance + Bug Fixes: * Deduplicate handler labels for HTTP metrics * Fix leaked queriers causing shutdowns to hang * Fix configuration loading panics on nil pointer slice elements * API: Correctly skip mismatching targets on /api/v1/targets/metadata * API: Better rounding for incoming query timestamps * Discovery: Remove all targets when the scrape configuration gets empty * PromQL: Fix a goroutine leak in the lexer/parser * Scrape: Fix deadlock in the scrape's manager * Scrape: Scrape targets at fixed intervals even after Prometheus restarts * TSDB: Support restored snapshots including the head properly * TSDB: Repair WAL when the last record in a segment is torn - Update to 2.5.0 + Group targets by scrape config instead of job name + Marathon SD: Various changes to adapt to Marathon 1.5+ + Discovery: Split prometheus_sd_discovered_targets metric by scrape and notify (Alertmanager SD) as well as by section in the respective configuration + Enhancements: * Support s390x platform for Linux * API: Add prometheus_api_remote_read_queries metric tracking currently executed or waiting remote read API requests * Remote Read: Add prometheus_remote_storage_remote_read_queries metric tracking currently in-flight remote read queries * Remote Read: Reduced memory usage * Discovery: Add prometheus_sd_discovered_targets, prometheus_sd_received_updates_total, prometheus_sd_updates_delayed_total, and prometheus_sd_updates_total metrics for discovery subsystem * Discovery: Improve performance of previously slow updates of changes of targets * Kubernetes SD: Add extended metrics * OpenStack SD: Support discovering instances from all projects * OpenStack SD: Discover all interfaces * OpenStack SD: Support tls_config for the used HTTP client * Triton SD: Add ability to filter triton_sd targets by pre-defined groups * Web UI: Avoid browser spell-checking in expression field * Web UI: Add scrape duration and last evaluation time in targets and rules pages * Web UI: Improve rule view by wrapping lines * Rules: Error out at load time for invalid templates, rather than at evaluation time + Bug Fixes: * Change max/min over_time to handle NaNs properly * Check label name for count_values PromQL function * Ensure that vectors and matrices do not contain identical label-sets - Update to 2.4.3 + Bug Fixes: [BUGFIX] Fix panic when using custom EC2 API for SD #4672 [BUGFIX] Fix panic when Zookeeper SD cannot connect to servers #4669 [BUGFIX] Make the skip_head an optional parameter for snapshot API #4674 - Update to 2.4.2 + Bug Fixes: [BUGFIX] Handle WAL corruptions properly prometheus/tsdb#389 [BUGFIX] Handle WAL migrations correctly on Windows prometheus/tsdb#392 - Update to 2.4.1 + New TSDB metrics + Bug Fixes: Render UI correctly for Windows - Update to 2.4.0 + The WAL implementation has been re-written so the storage is not forward compatible. Prometheus 2.3 storage will work on 2.4 but not vice-versa + Reduce remote write default retries + Remove /heap endpoint + Features: * Persist alert 'for' state across restarts * Add API providing per target metric metadata * Add API providing recording and alerting rules + Enhancements: * Brand new WAL implementation for TSDB. Forwards incompatible with previous WAL. * Show rule evaluation errors in UI * Throttle resends of alerts to Alertmanager * Send EndsAt along with the alert to Alertmanager * Limit the samples returned by remote read endpoint * Limit the data read in through remote read * Coalesce identical SD configuations * promtool: Add new commands for debugging and querying * Update console examples for node_exporter v0.16.0 * Optimize PromQL aggregations * Remote read: Add Offset to hints * consul_sd: Add support for ServiceMeta field * ec2_sd: Maintain order of subnet_id label * ec2_sd: Add support for custom endpoint to support EC2 compliant APIs * ec2_sd: Add instance_owner label * azure_sd: Add support for VMSS discovery and multiple environments * gce_sd: Add instance_id label * Forbid rule-abiding robots from indexing * Log virtual memory limits on startup + Bug Fixes: * Wait for service discovery to stop before exiting * Render SD configs properly * Only add LookbackDelta to vector selectors * ec2_sd: Handle panic-ing nil pointer * consul_sd: Stop leaking connections * Use templated labels also to identify alerts * Reduce floating point errors in stddev and related functions * Log errors while encoding responses - Update to 2.3.2 + Bug Fixes: * Fix various tsdb bugs * Reorder startup and shutdown to prevent panics. * Exit with non-zero code on error * discovery/kubernetes/ingress: fix scheme discovery * Fix race in zookeeper sd * Better timeout handling in promql * Propogate errors when selecting series from the tsdb - Update to 2.3.1 + Bug Fixes: * Avoid infinite loop on duplicate NaN values. * Fix nil pointer deference when using various API endpoints * config: set target group source index during unmarshalling * discovery/file: fix logging * kubernetes_sd: fix namespace filtering * web: restore old path prefix behavior * web: remove security headers added in 2.3.0 - Update to 2.3.0 + marathon_sd: use auth_token and auth_token_file for token-based authentication instead of bearer_token and bearer_token_file respectively + Metric names for HTTP server metrics changed + Features: * Add query commands to promtool * Add security headers to HTTP server responses * Pass query hints via remote read API * Basic auth passwords can now be configured via file across all configuration + Enhancements: * Optimise PromQL and API serialization for memory usage and allocations * Limit number of dropped targets in web UI * Consul and EC2 service discovery allow using server-side filtering for performance improvement * Add advanced filtering configuration to EC2 service discovery * marathon_sd: adds support for basic and bearer authentication, plus all other common HTTP client options (TLS config, proxy URL, etc.) * Provide machine type metadata and labels in GCE service discovery * Add pod controller kind and name to Kubernetes service discovery data * Move TSDB to flock-based log file that works with Docker containers + Bug Fixes: * Properly propagate storage errors in PromQL * Fix path prefix for web pages * Fix goroutine leak in Consul service discovery * Fix races in scrape manager * Fix OOM for very large k in PromQL topk() queries * Make remote write more resilient to unavailable receivers * Make remote write shutdown cleanly * Don't leak files on errors in TSDB's tombstone cleanup * Unary minus expressions now removes the metric name from results * Fix bug that lead to wrong amount of samples considered for time range expressions - Update to 2.2.1 + Bug Fixes: * Fix data loss in TSDB on compaction * Correctly stop timer in remote-write path * Fix deadlock triggered by loading targets page * Fix incorrect buffering of samples on range selection queries * Handle large index files on windows properly - Update to 2.2.0 + This release introduces improvements to the storage format and fixes a regression introduced in 2.1. As a result Prometheus servers upgraded to 2.2 cannot be downgraded to a lower version anymore! + Rename file SD mtime metric + Send target update on empty pod IP in Kubernetes SD + Features: * Add API endpoint for flags. * Add API endpoint for dropped targets. * Display annotations on alerts page. * Add option to skip head data when taking snapshots + Enhancements: * Federation performance improvement. * Read bearer token file on every scrape. * Improve typeahead on /graph page. * Change rule file formatting. * Set consul server default to localhost:8500. * Add dropped Alertmanagers to API info endpoint. * Add OS type meta label to Azure SD. * Validate required fields in SD configuration. + Bug Fixes: * Prevent stack overflow on deep recursion in TSDB. * Correctly read offsets in index files that are greater than 4GB. * Fix scraping behavior for empty labels. * Drop metric name for bool modifier. * Fix races in discovery. * Fix Kubernetes endpoints SD for empty subsets. * Throttle updates from SD providers, which caused increased CPU usage and allocations. * Fix TSDB block reload issue. * Fix PromQL printing of empty without(). * Don't reset FiredAt for inactive alerts. * Fix erroneous file version changes and repair existing data. - remove pr-3174.patch, has been fixed in https://github.com/prometheus/prometheus/pull/3517 - spec: fix directory for fdupes - Update to 2.1.0: + Features: * New Service Discovery UI showing labels before and after relabelling. * New Admin APIs added to v1 to delete, snapshot and remove tombstones. + Enhancements: * The graph UI autcomplete now includes your previous queries. * Federation is now much faster for large numbers of series. * Added new metrics to measure rule timings. * Rule evaluation times added to the rules UI. * Added metrics to measure modified time of file SD files. * Kubernetes SD now includes POD UID in discovery metadata. * The Query APIs now return optional stats on query execution times. * The index now no longer has the 4GiB size limit and is also smaller. + Bug Fixes: * Remote read read_recent option is now false by default. * Pass the right configuration to each Alertmanager (AM) when using multiple AM configs. * Fix not-matchers not selecting series with labels unset. * tsdb: Fix occasional panic in head block. * tsdb: Close files before deletion to fix retention issues on Windows and NFS. * tsdb: Cleanup and do not retry failing compactions. * tsdb: Close WAL while shutting down. - Update to final v2.0.0 release (bsc#1067341): + Compared to version 1.x, this release includes a completely rewritten storage engine, huge performance improvements, but also many backwards incompatible changes. For more information, read the announcement blog post and migration guide: - https://prometheus.io/blog/2017/11/08/announcing-prometheus-2-0/ - https://prometheus.io/docs/prometheus/2.0/migration/ + Changes: * Completely rewritten storage layer, with WAL. This is not backwards compatible with 1.x storage, and many flags have changed/disappeared. * New staleness behavior. Series now marked stale after target scrapes no longer return them, and soon after targets disappear from service discovery. * Rules files use YAML syntax now. Conversion tool added to promtool. * Removed count_scalar, drop_common_labels functions and keep_common modifier from PromQL. * Rewritten exposition format parser with much higher performance. The Protobuf exposition format is no longer supported. * Example console templates updated for new storage and metrics names. Examples other than node exporter and Prometheus removed. * Admin and lifecycle APIs now disabled by default, can be reenabled via flags * Flags switched to using Kingpin, all flags are now --flagname rather than -flagname. + Features: * Remote read can be configured to not read data which is available locally. This is enabled by default. * Rules can be grouped now. Rules within a rule group are executed sequentially. * Added experimental GRPC apis * Add timestamp() function to PromQL. + Enhancements: * Remove remote read from the query path if no remote storage is configured. * Bump Consul HTTP client timeout to not match the Consul SD watch timeout. * Go-conntrack added to provide HTTP connection metrics. + Bug Fixes: * Fix connection leak in Consul SD. - Update to v2.0.0-rc.3 (bsc#1067030): + Enhancements: * Remove remote read from the query path if no remote storage is configured. * Bump Consul HTTP client timeout to not match the Consul SD watch timeout. * Bump up a too small max block duration to the min block duration instead of returning an error. + Bug Fixes: * Avoid needless padding of 4 zero bytes in index files. * Delete old blocks during reload (necessary on MS Windows). * Fix regression of alert rules state loss on config reload. * Serialize background WAL operations to avoid data races. + This also contains bugfixes and remote-storage features from the 1.8 branch. - Update to v2.0.0-rc.2 (bsc#1065377): + Enhancements: * Handle WAL segments with corrupted header gracefully * Stabilize memory usage during WAL replay + Changes: * Prefix all storage metrics with prometheus_ + Bug Fixes: * Correctly handle label removal in remote read * Fix chunk misalignment causing out-of-order samples * Fix connection leak in Consul SD * Handle invalid chunk dereferences gracefully * Prevent potential deadlock during failing querier construction - Update to v2.0.0-rc.1: + Features/Enhancements: * Added a warning for time-drift between the browser and the prometheus-server. * Much faster WAL read-back on restart. + Bug Fixes: * Fixed Remote-read to not drop the first series. * Validate recording-rule names. * Fix several races. * Only close blocks if there are no iterators accessing it. - Refresh 0002-Default-settings.patch - Update to v2.0.0-rc.0: + Numerous changes to the new storage layer, the main changes being: * Remove `count_scalar`, `keep_common` and `drop_common_labels` functions * Breaking change in the index format for better consistency * Fix panic due garbage collected mmap'ed strings * Fix broken snapshots and admin APIs * Send HTTP Accept header when scraping * Use the WAL flush interval passed instead of the hardcoded value + This release requires a clean storage directory and is not compatible with files created by previous beta releases - Refresh 0002-Default-settings.patch - Fix loopback address for REST API gateway (bsc#1059462) + Added pr-3174.patch - Update to v2.0.0-beta.5: + Bug Fixes: * Remove deadlock on startup when restoring WAL * Fix semantical races resulting in invalid persisted files * Correctly read back WAL in certain edge cases * Prevent crashes caused by changing metric representations in target's /metrics + Enhancements: * Overall memory usage reduction * Serve debugging endpoints while TSDB is loading * Healthy endpoint correctly reflects liveness during startup * Switch to consistent usage of go-kit/log + This release may have issues with files written by previous beta releases - Refresh 0002-Default-settings.patch to apply cleanly again - Update to v2.0.0-beta.4: + Numerous changes to the new storage layer, the main changes being: * Single, compacted write ahead log * Single in-memory block with garbage collection * Improve compression of index files * Cache series dropped via metric_relabel_configs * Pool byte buffers for scraping + Overall the changes achieve a baseline reduction in memory consumption and reduce peak memory usage by 30-40% compared to 2.0.0-beta.2 + This release requires a clean storage directory and is not compatible with files created by previous beta releases - Update to v2.0.0-beta.2 + vendor: update prometheus/tsdb (Fix panic caused by 0 division) - Update to v2.0.0-beta.1 + Includes a new storage layer, which reduces bottlenecks and shows considerable performance improvements, but does not work with old v1 storage data. - Refresh 0002-Default-settings.patch to apply cleanly again - Add systemd unit file and default config (/etc/prometheus/prometheus.yml) - Run as unprivileged "prometheus" user - Default to /etc/prometheus for config and /var/lib/prometheus for metrics + Added 0002-Default-settings.patch - Add BuildRequires: glibc-devel-static to fix aarch64 build - Initial version OBS-URL: https://build.opensuse.org/request/show/720641 OBS-URL: https://build.opensuse.org/package/show/server:monitoring/golang-github-prometheus-prometheus?expand=0&rev=17 --- ...mode-pie.patch => 0004-prometheus-buildmode-pie.patch | 0 golang-github-prometheus-prometheus.changes | 9 +++++++-- golang-github-prometheus-prometheus.spec | 2 +- prometheus.service | 2 ++ 4 files changed, 10 insertions(+), 3 deletions(-) rename prometheus-buildmode-pie.patch => 0004-prometheus-buildmode-pie.patch (100%) diff --git a/prometheus-buildmode-pie.patch b/0004-prometheus-buildmode-pie.patch similarity index 100% rename from prometheus-buildmode-pie.patch rename to 0004-prometheus-buildmode-pie.patch diff --git a/golang-github-prometheus-prometheus.changes b/golang-github-prometheus-prometheus.changes index 02713c0..f093df5 100644 --- a/golang-github-prometheus-prometheus.changes +++ b/golang-github-prometheus-prometheus.changes @@ -1,8 +1,13 @@ +------------------------------------------------------------------- +Fri Aug 2 11:22:25 UTC 2019 - Jan Fajerski + +- Add network-online (Wants and After) dependency to systemd unit bsc#1143913 + ------------------------------------------------------------------- Wed Jul 31 10:46:13 UTC 2019 - Andreas Schneider - Build with PIE - + Added prometheus-buildmode-pie.patch + + Added 0004-prometheus-buildmode-pie.patch ------------------------------------------------------------------- Wed Jul 31 06:47:55 UTC 2019 - Andreas Schneider @@ -16,7 +21,7 @@ Wed Jul 31 06:47:55 UTC 2019 - Andreas Schneider Thu Jul 25 16:34:29 UTC 2019 - Joao Cavalheiro - Add support for Uyuni/SUSE Manager service discovery - + Added 0003-Add-Uyuni-service-discovery + + Added 0003-Add-Uyuni-service-discovery.patch ------------------------------------------------------------------- Thu Jul 18 01:06:13 UTC 2019 - Simon Crute diff --git a/golang-github-prometheus-prometheus.spec b/golang-github-prometheus-prometheus.spec index a8d4977..d746090 100644 --- a/golang-github-prometheus-prometheus.spec +++ b/golang-github-prometheus-prometheus.spec @@ -40,7 +40,7 @@ Patch2: 0002-Default-settings.patch # Uyuni service discovery support Patch3: 0003-Add-Uyuni-service-discovery.patch # Add -buildmode=pie -Patch4: prometheus-buildmode-pie.patch +Patch4: 0004-prometheus-buildmode-pie.patch BuildRequires: fdupes # Adding glibc-devel-static seems to be required for linking if building # with -buildmode=pie diff --git a/prometheus.service b/prometheus.service index 9ea54c2..68c7e8e 100644 --- a/prometheus.service +++ b/prometheus.service @@ -1,6 +1,8 @@ [Unit] Description=Monitoring system and time series database Documentation=https://prometheus.io/docs/introduction/overview/ +Wants=network-online.target +After=network-online.target [Service] Restart=always