diff --git a/LICENSE b/LICENSE
new file mode 100644
index 0000000000..841186186d
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,202 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright 2017 SecureKey Technologies Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
diff --git a/README.md b/README.md
new file mode 100644
index 0000000000..c67d75d316
--- /dev/null
+++ b/README.md
@@ -0,0 +1,24 @@
+# Hyperledger Fabric Client SDK for Go
+
+The Hyperledger Fabric Client SDK makes it easy to use APIs to interact with a Hyperledger Fabric blockchain.
+
+This SDK is targeted both towards the external access to a Hyperledger Fabric blockchain using a Go application, as well as being targeted at the internal library in a peer to access API functions on other parts of the network.
+
+**NOTE:** In an effort to make the codebase more modular, there will be interface changes over the course of the next week.
+
+## Build and Test
+
+This project must be cloned into `$GOPATH/src/github.com/hyperledger`. Package names have been chosen to match the Hyperledger project.
+
+Execute `go test` from the project root to build the library and run the basic headless tests.
+
+Execute `go test` in the `integration_test` to run end-to-end tests. This requires you to have:
+- A working fabric and fabric-ca set up. Refer to the Hyperledger Fabric [documentation](https://github.com/hyperledger/fabric) on how to do this.
+- Customized settings in the `integration_test/test_resources/config/config_test.yaml` in case your Hyperledger Fabric network is not running on `localhost` or is using different ports.
+
+## Work in Progress
+
+This client was last tested and found to be compatible with the following Hyperledger Fabric commit levels:
+- fabric: `22d98b9e5ea36a6b209b3ea67def50a678718679`
+- fabric-ca: `f18b6b769b80c889cb6b82ce34d755d9303ec881`
+
diff --git a/config/config.go b/config/config.go
new file mode 100644
index 0000000000..ea54b68280
--- /dev/null
+++ b/config/config.go
@@ -0,0 +1,233 @@
+/*
+Copyright SecureKey Technologies Inc. All Rights Reserved.
+
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package config
+
+import (
+ "crypto/x509"
+ "encoding/json"
+ "encoding/pem"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "strconv"
+
+ "github.com/op/go-logging"
+ "github.com/spf13/viper"
+)
+
+// PeerConfig ...
+type PeerConfig struct {
+ Host string
+ Port string
+ EventHost string
+ EventPort string
+}
+
+type fabricCAConfig struct {
+ ServerURL string `json:"serverURL"`
+ Certfiles []string `json:"certfiles"`
+ Client struct {
+ Keyfile string `json:"keyfile"`
+ Certfile string `json:"certfile"`
+ } `json:"client"`
+}
+
+var myViper = viper.New()
+var log = logging.MustGetLogger("fabric_sdk_go")
+var format = logging.MustStringFormatter(
+ `%{color}%{time:15:04:05.000} [%{module}] %{level:.4s} : %{message}`,
+)
+
+// InitConfig ...
+// initConfig reads in config file
+func InitConfig(configFile string) error {
+
+ if configFile != "" {
+ // create new viper
+ myViper.SetConfigFile(configFile)
+ // If a config file is found, read it in.
+ err := myViper.ReadInConfig()
+
+ if err == nil {
+ log.Infof("Using config file: %s", myViper.ConfigFileUsed())
+ } else {
+ return fmt.Errorf("Fatal error config file: %v", err)
+ }
+ }
+
+ backend := logging.NewLogBackend(os.Stderr, "", 0)
+ backendFormatter := logging.NewBackendFormatter(backend, format)
+
+ loggingLevelString := myViper.GetString("client.logging.level")
+ logLevel := logging.INFO
+ if loggingLevelString != "" {
+ log.Infof("fabric_sdk_go Logging level: %v", loggingLevelString)
+ var err error
+ logLevel, err = logging.LogLevel(loggingLevelString)
+ if err != nil {
+ panic(err)
+ }
+ }
+ logging.SetBackend(backendFormatter).SetLevel(logging.Level(logLevel), "fabric_sdk_go")
+
+ return nil
+}
+
+// GetFabricClientViper returns the internal viper instance used by the
+// SDK to read configuration options
+func GetFabricClientViper() *viper.Viper {
+ return myViper
+}
+
+// GetPeersConfig ...
+func GetPeersConfig() []PeerConfig {
+ peersConfig := []PeerConfig{}
+ peers := myViper.GetStringMap("client.peers")
+ for key, value := range peers {
+ mm, ok := value.(map[string]interface{})
+ var host string
+ var port int
+ var eventHost string
+ var eventPort int
+
+ if ok {
+ host, _ = mm["host"].(string)
+ port, _ = mm["port"].(int)
+ eventHost, _ = mm["event_host"].(string)
+ eventPort, _ = mm["event_port"].(int)
+ } else {
+ mm1 := value.(map[interface{}]interface{})
+ host, _ = mm1["host"].(string)
+ port, _ = mm1["port"].(int)
+ eventHost, _ = mm1["event_host"].(string)
+ eventPort, _ = mm1["event_port"].(int)
+ }
+ p := PeerConfig{Host: host, Port: strconv.Itoa(port), EventHost: eventHost, EventPort: strconv.Itoa(eventPort)}
+ if p.Host == "" {
+ panic(fmt.Sprintf("host key not exist or empty for %s", key))
+ }
+ if p.Port == "" {
+ panic(fmt.Sprintf("port key not exist or empty for %s", key))
+ }
+ if p.EventHost == "" {
+ panic(fmt.Sprintf("event_host not exist or empty for %s", key))
+ }
+ if p.EventPort == "" {
+ panic(fmt.Sprintf("event_port not exist or empty for %s", key))
+ }
+ peersConfig = append(peersConfig, p)
+ }
+ return peersConfig
+
+}
+
+// IsTLSEnabled ...
+func IsTLSEnabled() bool {
+ return myViper.GetBool("client.tls.enabled")
+}
+
+// GetTLSCACertPool ...
+func GetTLSCACertPool() *x509.CertPool {
+ certPool := x509.NewCertPool()
+ if myViper.GetString("client.tls.certificate") != "" {
+ rawData, err := ioutil.ReadFile(myViper.GetString("tls.certificate"))
+ if err != nil {
+ panic(err)
+ }
+ certPool.AddCert(loadCAKey(rawData))
+ }
+ return certPool
+}
+
+// GetTLSServerHostOverride ...
+func GetTLSServerHostOverride() string {
+ return myViper.GetString("client.tls.serverhostoverride")
+}
+
+// IsSecurityEnabled ...
+func IsSecurityEnabled() bool {
+ return myViper.GetBool("client.security.enabled")
+}
+
+// TcertBatchSize ...
+func TcertBatchSize() int {
+ return myViper.GetInt("client.tcert.batch.size")
+}
+
+// GetSecurityAlgorithm ...
+func GetSecurityAlgorithm() string {
+ return myViper.GetString("client.security.hashAlgorithm")
+}
+
+// GetSecurityLevel ...
+func GetSecurityLevel() int {
+ return myViper.GetInt("client.security.level")
+
+}
+
+// GetOrdererHost ...
+func GetOrdererHost() string {
+ return myViper.GetString("client.orderer.host")
+}
+
+// GetFabricCAID ...
+func GetFabricCAID() string {
+ return myViper.GetString("client.fabricCA.id")
+}
+
+// GetFabricCAClientPath This method will read the fabric-ca configurations from the
+// config yaml file and return the path to a json client config file
+// in the format that is expected by the fabric-ca client
+func GetFabricCAClientPath() (string, error) {
+ filePath := "/tmp/client-config.json"
+ fabricCAConf := fabricCAConfig{}
+ err := myViper.UnmarshalKey("client.fabricCA", &fabricCAConf)
+ if err != nil {
+ return "", err
+ }
+ jsonConfig, err := json.Marshal(fabricCAConf)
+ if err != nil {
+ return "", err
+ }
+
+ err = ioutil.WriteFile(filePath, jsonConfig, 0644)
+ return filePath, err
+}
+
+// GetKeyStorePath ...
+func GetKeyStorePath() string {
+ return myViper.GetString("client.keystore.path")
+}
+
+// GetOrdererPort ...
+func GetOrdererPort() string {
+ return strconv.Itoa(myViper.GetInt("client.orderer.port"))
+}
+
+// loadCAKey
+func loadCAKey(rawData []byte) *x509.Certificate {
+ block, _ := pem.Decode(rawData)
+
+ pub, err := x509.ParseCertificate(block.Bytes)
+ if err != nil {
+ panic(err)
+ }
+ return pub
+}
diff --git a/config/config_test.go b/config/config_test.go
new file mode 100644
index 0000000000..a5e6f7e50f
--- /dev/null
+++ b/config/config_test.go
@@ -0,0 +1,87 @@
+/*
+Copyright SecureKey Technologies Inc. All Rights Reserved.
+
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package config
+
+import (
+ "fmt"
+ "os"
+ "testing"
+
+ "github.com/spf13/viper"
+)
+
+func TestGetPeersConfig(t *testing.T) {
+ pc := GetPeersConfig()
+
+ for _, value := range pc {
+ if value.Host == "" {
+ t.Fatalf("Host value is empty")
+ }
+ if value.Port == "" {
+ t.Fatalf("Port value is empty")
+ }
+ if value.Port == "" {
+ t.Fatalf("EventHost value is empty")
+ }
+ if value.Port == "" {
+ t.Fatalf("EventPort value is empty")
+ }
+
+ }
+
+}
+
+// Test case to create a new viper instance to prevent conflict with existing
+// viper instances in applications that use the SDK
+func TestMultipleVipers(t *testing.T) {
+ viper.SetConfigFile("./test.yaml")
+ err := viper.ReadInConfig()
+ if err != nil {
+ fmt.Println(err.Error())
+ }
+ testValue1 := viper.GetString("test.testKey")
+ // Read initial value from test.yaml
+ if testValue1 != "testvalue" {
+ t.Fatalf("Expected testValue before config initialization got: %s", testValue1)
+ }
+ // initialize go sdk
+ err = InitConfig("../integration_test/test_resources/config/config_test.yaml")
+ if err != nil {
+ fmt.Println(err.Error())
+ }
+ // Make sure initial value is unaffected
+ testValue2 := viper.GetString("test.testKey")
+ if testValue2 != "testvalue" {
+ t.Fatalf("Expected testvalue after config initialization")
+ }
+ // Make sure Go SDK config is unaffected
+ testValue3 := myViper.GetString("client.peers.peer1.host")
+ if testValue3 != "localhost" {
+ t.Fatalf("Expected existing config value to remain unchanged")
+ }
+}
+
+func TestMain(m *testing.M) {
+ err := InitConfig("../integration_test/test_resources/config/config_test.yaml")
+ if err != nil {
+ fmt.Println(err.Error())
+ }
+ os.Exit(m.Run())
+}
diff --git a/config/test.yaml b/config/test.yaml
new file mode 100644
index 0000000000..ec88a475da
--- /dev/null
+++ b/config/test.yaml
@@ -0,0 +1,2 @@
+test:
+ testkey: testvalue
diff --git a/fabric-ca-client/fabricca.go b/fabric-ca-client/fabricca.go
new file mode 100644
index 0000000000..4caa355fd9
--- /dev/null
+++ b/fabric-ca-client/fabricca.go
@@ -0,0 +1,91 @@
+/*
+Copyright SecureKey Technologies Inc. All Rights Reserved.
+
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package fabricca
+
+import (
+ "fmt"
+ "os"
+
+ "github.com/hyperledger/fabric-ca/api"
+ fabric_ca "github.com/hyperledger/fabric-ca/lib"
+ "github.com/hyperledger/fabric-sdk-go/config"
+
+ "github.com/op/go-logging"
+)
+
+var logger = logging.MustGetLogger("fabric_sdk_go")
+
+// Services ...
+type Services interface {
+ Enroll(enrollmentID string, enrollmentSecret string) ([]byte, []byte, error)
+}
+
+type services struct {
+ fabricCAClient *fabric_ca.Client
+}
+
+// NewFabricCAClient ...
+/**
+ * @param {string} clientConfigFile for fabric-ca services"
+ */
+func NewFabricCAClient() (Services, error) {
+ configPath, err := config.GetFabricCAClientPath()
+ if err != nil {
+ return nil, fmt.Errorf("error setting up fabric-ca configurations: %s", err.Error())
+ }
+ //Remove temporary config file after setup
+ defer os.Remove(configPath)
+ // Create new Fabric-ca client with configs
+ c, err := fabric_ca.NewClient(configPath)
+ if err != nil {
+ return nil, fmt.Errorf("New fabricCAClient failed: %s", err)
+ }
+
+ fabricCAClient := &services{fabricCAClient: c}
+ logger.Infof("Constructed fabricCAClient instance: %v", fabricCAClient)
+
+ return fabricCAClient, nil
+}
+
+// Enroll ...
+/**
+ * Enroll a registered user in order to receive a signed X509 certificate
+ * @param {string} enrollmentID The registered ID to use for enrollment
+ * @param {string} enrollmentSecret The secret associated with the enrollment ID
+ * @returns {[]byte} X509 certificate
+ * @returns {[]byte} private key
+ */
+func (fabricCAServices *services) Enroll(enrollmentID string, enrollmentSecret string) ([]byte, []byte, error) {
+ if enrollmentID == "" {
+ return nil, nil, fmt.Errorf("enrollmentID is empty")
+ }
+ if enrollmentSecret == "" {
+ return nil, nil, fmt.Errorf("enrollmentSecret is empty")
+ }
+ req := &api.EnrollmentRequest{
+ Name: enrollmentID,
+ Secret: enrollmentSecret,
+ }
+ id, err := fabricCAServices.fabricCAClient.Enroll(req)
+ if err != nil {
+ return nil, nil, fmt.Errorf("Enroll failed: %s", err)
+ }
+ return id.GetECert().Key(), id.GetECert().Cert(), nil
+}
diff --git a/fabric-ca-client/fabricca_test.go b/fabric-ca-client/fabricca_test.go
new file mode 100644
index 0000000000..86c1b79c97
--- /dev/null
+++ b/fabric-ca-client/fabricca_test.go
@@ -0,0 +1,45 @@
+/*
+Copyright SecureKey Technologies Inc. All Rights Reserved.
+
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package fabricca
+
+import (
+ "testing"
+)
+
+func TestEnrollWithMissingParameters(t *testing.T) {
+ fabricCAClient, err := NewFabricCAClient()
+ if err != nil {
+ t.Fatalf("NewFabricCAClient return error: %v", err)
+ }
+ _, _, err = fabricCAClient.Enroll("", "user1")
+ if err == nil {
+ t.Fatalf("Enroll didn't return error")
+ }
+ if err.Error() != "enrollmentID is empty" {
+ t.Fatalf("Enroll didn't return right error")
+ }
+ _, _, err = fabricCAClient.Enroll("test", "")
+ if err == nil {
+ t.Fatalf("Enroll didn't return error")
+ }
+ if err.Error() != "enrollmentSecret is empty" {
+ t.Fatalf("Enroll didn't return right error")
+ }
+}
diff --git a/fabric-client/chain.go b/fabric-client/chain.go
new file mode 100644
index 0000000000..0f004ddace
--- /dev/null
+++ b/fabric-client/chain.go
@@ -0,0 +1,817 @@
+/*
+Copyright SecureKey Technologies Inc. All Rights Reserved.
+
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package fabricclient
+
+import (
+ "fmt"
+ "sync"
+
+ "time"
+
+ "github.com/golang/protobuf/proto"
+ "github.com/hyperledger/fabric/bccsp"
+ msp "github.com/hyperledger/fabric/msp"
+ "github.com/hyperledger/fabric/protos/common"
+ pb "github.com/hyperledger/fabric/protos/peer"
+
+ google_protobuf "github.com/golang/protobuf/ptypes/timestamp"
+ protos_utils "github.com/hyperledger/fabric/protos/utils"
+ "github.com/op/go-logging"
+
+ config "github.com/hyperledger/fabric-sdk-go/config"
+)
+
+var logger = logging.MustGetLogger("fabric_sdk_go")
+
+// Chain ...
+/**
+ * The “Chain” object captures settings for a channel, which is created by
+ * the orderers to isolate transactions delivery to peers participating on channel.
+ * A chain must be initialized after it has been configured with the list of peers
+ * and orderers. The initialization sends a CONFIGURATION transaction to the orderers
+ * to create the specified channel and asks the peers to join that channel.
+ *
+ */
+type Chain interface {
+ GetName() string
+ IsSecurityEnabled() bool
+ GetTCertBatchSize() int
+ SetTCertBatchSize(batchSize int)
+ AddPeer(peer Peer)
+ RemovePeer(peer Peer)
+ GetPeers() []Peer
+ AddOrderer(orderer Orderer)
+ RemoveOrderer(orderer Orderer)
+ GetOrderers() []Orderer
+ InitializeChain() bool
+ UpdateChain() bool
+ IsReadonly() bool
+ QueryInfo()
+ QueryBlock(blockNumber int)
+ QueryTransaction(transactionID int)
+ CreateTransactionProposal(chaincodeName string, chainID string, args []string, sign bool, transientData map[string][]byte) (*TransactionProposal, error)
+ SendTransactionProposal(proposal *TransactionProposal, retry int) ([]*TransactionProposalResponse, error)
+ CreateTransaction(resps []*TransactionProposalResponse) (*Transaction, error)
+ SendTransaction(tx *Transaction) ([]*TransactionResponse, error)
+ SendInstallProposal(chaincodeName string, chaincodePath string, chaincodeVersion string, chaincodePackage []byte) ([]*TransactionProposalResponse, string, error)
+ SendInstantiateProposal(chaincodeName string, chainID string, args []string, chaincodePath string, chaincodeVersion string) ([]*TransactionProposalResponse, string, error)
+}
+
+type chain struct {
+ name string // Name of the chain is only meaningful to the client
+ securityEnabled bool // Security enabled flag
+ peers map[string]Peer
+ tcertBatchSize int // The number of tcerts to get in each batch
+ orderers map[string]Orderer
+ clientContext Client
+}
+
+// The TransactionProposal object to be send to the endorsers
+type TransactionProposal struct {
+ TransactionID string
+
+ signedProposal *pb.SignedProposal
+ proposal *pb.Proposal
+}
+
+// TransactionProposalResponse ...
+/**
+ * The TransactionProposalResponse result object returned from endorsers.
+ */
+type TransactionProposalResponse struct {
+ Endorser string
+ Err error
+
+ proposal *TransactionProposal
+ proposalResponse *pb.ProposalResponse
+}
+
+// GetResponsePayload returns the response payload
+func (tpr *TransactionProposalResponse) GetResponsePayload() []byte {
+ if tpr == nil || tpr.proposalResponse == nil {
+ return nil
+ }
+ return tpr.proposalResponse.GetResponse().Payload
+}
+
+// The Transaction object created from an endorsed proposal
+type Transaction struct {
+ proposal *TransactionProposal
+ transaction *pb.Transaction
+}
+
+// TransactionResponse ...
+/**
+ * The TransactionProposalResponse result object returned from orderers.
+ */
+type TransactionResponse struct {
+ Orderer string
+ Err error
+}
+
+// A SignedEnvelope can can be sent to an orderer for broadcasting
+type SignedEnvelope struct {
+ Payload []byte
+ signature []byte
+}
+
+// NewChain ...
+/**
+ * @param {string} name to identify different chain instances. The naming of chain instances
+ * is enforced by the ordering service and must be unique within the blockchain network
+ * @param {Client} clientContext An instance of {@link Client} that provides operational context
+ * such as submitting User etc.
+ */
+func NewChain(name string, client Client) (Chain, error) {
+ if name == "" {
+ return nil, fmt.Errorf("Failed to create Chain. Missing requirement 'name' parameter.")
+ }
+ if client == nil {
+ return nil, fmt.Errorf("Failed to create Chain. Missing requirement 'clientContext' parameter.")
+ }
+ p := make(map[string]Peer)
+ o := make(map[string]Orderer)
+ c := &chain{name: name, securityEnabled: config.IsSecurityEnabled(), peers: p,
+ tcertBatchSize: config.TcertBatchSize(), orderers: o, clientContext: client}
+ logger.Infof("Constructed Chain instance: %v", c)
+
+ return c, nil
+}
+
+// GetName ...
+/**
+ * Get the chain name.
+ * @returns {string} The name of the chain.
+ */
+func (c *chain) GetName() string {
+ return c.name
+}
+
+// IsSecurityEnabled ...
+/**
+ * Determine if security is enabled.
+ */
+func (c *chain) IsSecurityEnabled() bool {
+ return c.securityEnabled
+}
+
+// GetTCertBatchSize ...
+/**
+ * Get the tcert batch size.
+ */
+func (c *chain) GetTCertBatchSize() int {
+ return c.tcertBatchSize
+}
+
+// SetTCertBatchSize ...
+/**
+ * Set the tcert batch size.
+ */
+func (c *chain) SetTCertBatchSize(batchSize int) {
+ c.tcertBatchSize = batchSize
+}
+
+// AddPeer ...
+/**
+ * Add peer endpoint to chain.
+ * @param {Peer} peer An instance of the Peer that has been initialized with URL,
+ * TLC certificate, and enrollment certificate.
+ */
+func (c *chain) AddPeer(peer Peer) {
+ c.peers[peer.GetURL()] = peer
+}
+
+// RemovePeer ...
+/**
+ * Remove peer endpoint from chain.
+ * @param {Peer} peer An instance of the Peer.
+ */
+func (c *chain) RemovePeer(peer Peer) {
+ delete(c.peers, peer.GetURL())
+}
+
+// GetPeers ...
+/**
+ * Get peers of a chain from local information.
+ * @returns {[]Peer} The peer list on the chain.
+ */
+func (c *chain) GetPeers() []Peer {
+ var peersArray []Peer
+ for _, v := range c.peers {
+ peersArray = append(peersArray, v)
+ }
+ return peersArray
+}
+
+// AddOrderer ...
+/**
+ * Add orderer endpoint to a chain object, this is a local-only operation.
+ * A chain instance may choose to use a single orderer node, which will broadcast
+ * requests to the rest of the orderer network. Or if the application does not trust
+ * the orderer nodes, it can choose to use more than one by adding them to the chain instance.
+ * All APIs concerning the orderer will broadcast to all orderers simultaneously.
+ * @param {Orderer} orderer An instance of the Orderer class.
+ */
+func (c *chain) AddOrderer(orderer Orderer) {
+ c.orderers[orderer.GetURL()] = orderer
+}
+
+// RemoveOrderer ...
+/**
+ * Remove orderer endpoint from a chain object, this is a local-only operation.
+ * @param {Orderer} orderer An instance of the Orderer class.
+ */
+func (c *chain) RemoveOrderer(orderer Orderer) {
+ delete(c.orderers, orderer.GetURL())
+
+}
+
+// GetOrderers ...
+/**
+ * Get orderers of a chain.
+ */
+func (c *chain) GetOrderers() []Orderer {
+ var orderersArray []Orderer
+ for _, v := range c.orderers {
+ orderersArray = append(orderersArray, v)
+ }
+ return orderersArray
+}
+
+// InitializeChain ...
+/**
+ * Calls the orderer(s) to start building the new chain, which is a combination
+ * of opening new message stream and connecting the list of participating peers.
+ * This is a long-running process. Only one of the application instances needs
+ * to call this method. Once the chain is successfully created, other application
+ * instances only need to call getChain() to obtain the information about this chain.
+ * @returns {bool} Whether the chain initialization process was successful.
+ */
+func (c *chain) InitializeChain() bool {
+ return false
+}
+
+// UpdateChain ...
+/**
+ * Calls the orderer(s) to update an existing chain. This allows the addition and
+ * deletion of Peer nodes to an existing chain, as well as the update of Peer
+ * certificate information upon certificate renewals.
+ * @returns {bool} Whether the chain update process was successful.
+ */
+func (c *chain) UpdateChain() bool {
+ return false
+}
+
+// IsReadonly ...
+/**
+ * Get chain status to see if the underlying channel has been terminated,
+ * making it a read-only chain, where information (transactions and states)
+ * can be queried but no new transactions can be submitted.
+ * @returns {bool} Is read-only, true or not.
+ */
+func (c *chain) IsReadonly() bool {
+ return false //to do
+}
+
+// QueryInfo ...
+/**
+ * Queries for various useful information on the state of the Chain
+ * (height, known peers).
+ * @returns {object} With height, currently the only useful info.
+ */
+func (c *chain) QueryInfo() {
+ //to do
+}
+
+// QueryBlock ...
+/**
+ * Queries the ledger for Block by block number.
+ * @param {int} blockNumber The number which is the ID of the Block.
+ * @returns {object} Object containing the block.
+ */
+func (c *chain) QueryBlock(blockNumber int) {
+ //to do
+}
+
+// QueryTransaction ...
+/**
+ * Queries the ledger for Transaction by number.
+ * @param {int} transactionID
+ * @returns {object} Transaction information containing the transaction.
+ */
+func (c *chain) QueryTransaction(transactionID int) {
+ //to do
+}
+
+// CreateTransactionProposal ...
+/**
+ * Create a proposal for transaction. This involves assembling the proposal
+ * with the data (chaincodeName, function to call, arguments, transient data, etc.) and signing it using the private key corresponding to the
+ * ECert to sign.
+ */
+func (c *chain) CreateTransactionProposal(chaincodeName string, chainID string,
+ args []string, sign bool, transientData map[string][]byte) (*TransactionProposal, error) {
+
+ argsArray := make([][]byte, len(args))
+ for i, arg := range args {
+ argsArray[i] = []byte(arg)
+ }
+ ccis := &pb.ChaincodeInvocationSpec{ChaincodeSpec: &pb.ChaincodeSpec{
+ Type: pb.ChaincodeSpec_GOLANG, ChaincodeId: &pb.ChaincodeID{Name: chaincodeName},
+ Input: &pb.ChaincodeInput{Args: argsArray}}}
+
+ user, err := c.clientContext.GetUserContext("")
+ if err != nil {
+ return nil, fmt.Errorf("GetUserContext return error: %s", err)
+ }
+
+ creatorID, err := getSerializedIdentity(user.GetEnrollmentCertificate())
+ if err != nil {
+ return nil, err
+ }
+ // create a proposal from a ChaincodeInvocationSpec
+ proposal, txID, err := protos_utils.CreateChaincodeProposalWithTransient(common.HeaderType_ENDORSER_TRANSACTION, chainID, ccis, creatorID, transientData)
+ if err != nil {
+ return nil, fmt.Errorf("Could not create chaincode proposal, err %s", err)
+ }
+
+ proposalBytes, err := protos_utils.GetBytesProposal(proposal)
+ if err != nil {
+ return nil, err
+ }
+
+ signature, err := c.signObjectWithKey(proposalBytes, user.GetPrivateKey(),
+ &bccsp.SHAOpts{}, nil)
+ if err != nil {
+ return nil, err
+ }
+ signedProposal := &pb.SignedProposal{ProposalBytes: proposalBytes, Signature: signature}
+ return &TransactionProposal{
+ TransactionID: txID,
+ signedProposal: signedProposal,
+ proposal: proposal,
+ }, nil
+}
+
+// SendTransactionProposal ...
+// Send the created proposal to peer for endorsement.
+func (c *chain) SendTransactionProposal(proposal *TransactionProposal, retry int) ([]*TransactionProposalResponse, error) {
+ if c.peers == nil || len(c.peers) == 0 {
+ return nil, fmt.Errorf("peers is nil")
+ }
+ if proposal == nil || proposal.signedProposal == nil {
+ return nil, fmt.Errorf("signedProposal is nil")
+ }
+
+ var responseMtx sync.Mutex
+ var transactionProposalResponses []*TransactionProposalResponse
+ var wg sync.WaitGroup
+
+ for _, p := range c.peers {
+ wg.Add(1)
+ go func(peer Peer) {
+ defer wg.Done()
+ var err error
+ var proposalResponse *TransactionProposalResponse
+ logger.Debugf("Send ProposalRequest to peer :%s\n", peer.GetURL())
+ if proposalResponse, err = peer.SendProposal(proposal); err != nil {
+ logger.Debugf("Receive Error Response :%v\n", proposalResponse)
+ proposalResponse = &TransactionProposalResponse{
+ Endorser: peer.GetURL(),
+ Err: fmt.Errorf("Error calling endorser '%s': %s", peer.GetURL(), err),
+ proposal: proposal,
+ }
+ } else {
+ prp1, _ := protos_utils.GetProposalResponsePayload(proposalResponse.proposalResponse.Payload)
+ act1, _ := protos_utils.GetChaincodeAction(prp1.Extension)
+ logger.Debugf("%s ProposalResponsePayload Extension ChaincodeAction Results\n%s\n", peer.GetURL(), string(act1.Results))
+
+ logger.Debugf("Receive Proposal ChaincodeActionResponse :%v\n", proposalResponse)
+ }
+
+ responseMtx.Lock()
+ transactionProposalResponses = append(transactionProposalResponses, proposalResponse)
+ responseMtx.Unlock()
+ }(p)
+ }
+ wg.Wait()
+ return transactionProposalResponses, nil
+}
+
+// CreateTransaction ...
+/**
+ * Create a transaction with proposal response, following the endorsement policy.
+ */
+func (c *chain) CreateTransaction(resps []*TransactionProposalResponse) (*Transaction, error) {
+ if len(resps) == 0 {
+ return nil, fmt.Errorf("At least one proposal response is necessary")
+ }
+
+ proposal := resps[0].proposal
+
+ // the original header
+ hdr, err := protos_utils.GetHeader(proposal.proposal.Header)
+ if err != nil {
+ return nil, fmt.Errorf("Could not unmarshal the proposal header")
+ }
+
+ // the original payload
+ pPayl, err := protos_utils.GetChaincodeProposalPayload(proposal.proposal.Payload)
+ if err != nil {
+ return nil, fmt.Errorf("Could not unmarshal the proposal payload")
+ }
+
+ // get header extensions so we have the visibility field
+ hdrExt, err := protos_utils.GetChaincodeHeaderExtension(hdr)
+ if err != nil {
+ return nil, err
+ }
+
+ // This code is commented out because the ProposalResponsePayload Extension ChaincodeAction Results
+ // return from endorsements is different so the compare will fail
+
+ // var a1 []byte
+ // for n, r := range resps {
+ // if n == 0 {
+ // a1 = r.Payload
+ // if r.Response.Status != 200 {
+ // return nil, fmt.Errorf("Proposal response was not successful, error code %d, msg %s", r.Response.Status, r.Response.Message)
+ // }
+ // continue
+ // }
+
+ // if bytes.Compare(a1, r.Payload) != 0 {
+ // return nil, fmt.Errorf("ProposalResponsePayloads do not match")
+ // }
+ // }
+
+ for _, r := range resps {
+ if r.proposalResponse.Response.Status != 200 {
+ return nil, fmt.Errorf("Proposal response was not successful, error code %d, msg %s", r.proposalResponse.Response.Status, r.proposalResponse.Response.Message)
+ }
+ }
+
+ // fill endorsements
+ endorsements := make([]*pb.Endorsement, len(resps))
+ for n, r := range resps {
+ endorsements[n] = r.proposalResponse.Endorsement
+ }
+ // create ChaincodeEndorsedAction
+ cea := &pb.ChaincodeEndorsedAction{ProposalResponsePayload: resps[0].proposalResponse.Payload, Endorsements: endorsements}
+
+ // obtain the bytes of the proposal payload that will go to the transaction
+ propPayloadBytes, err := protos_utils.GetBytesProposalPayloadForTx(pPayl, hdrExt.PayloadVisibility)
+ if err != nil {
+ return nil, err
+ }
+
+ // serialize the chaincode action payload
+ cap := &pb.ChaincodeActionPayload{ChaincodeProposalPayload: propPayloadBytes, Action: cea}
+ capBytes, err := protos_utils.GetBytesChaincodeActionPayload(cap)
+ if err != nil {
+ return nil, err
+ }
+
+ // create a transaction
+ taa := &pb.TransactionAction{Header: hdr.SignatureHeader, Payload: capBytes}
+ taas := make([]*pb.TransactionAction, 1)
+ taas[0] = taa
+
+ return &Transaction{
+ transaction: &pb.Transaction{Actions: taas},
+ proposal: proposal,
+ }, nil
+}
+
+// SendTransaction ...
+/**
+ * Send a transaction to the chain’s orderer service (one or more orderer endpoints) for consensus and committing to the ledger.
+ * This call is asynchronous and the successful transaction commit is notified via a BLOCK or CHAINCODE event. This method must provide a mechanism for applications to attach event listeners to handle “transaction submitted”, “transaction complete” and “error” events.
+ * Note that under the cover there are two different kinds of communications with the fabric backend that trigger different events to
+ * be emitted back to the application’s handlers:
+ * 1-)The grpc client with the orderer service uses a “regular” stateless HTTP connection in a request/response fashion with the “broadcast” call.
+ * The method implementation should emit “transaction submitted” when a successful acknowledgement is received in the response,
+ * or “error” when an error is received
+ * 2-)The method implementation should also maintain a persistent connection with the Chain’s event source Peer as part of the
+ * internal event hub mechanism in order to support the fabric events “BLOCK”, “CHAINCODE” and “TRANSACTION”.
+ * These events should cause the method to emit “complete” or “error” events to the application.
+ */
+func (c *chain) SendTransaction(tx *Transaction) ([]*TransactionResponse, error) {
+ if c.orderers == nil || len(c.orderers) == 0 {
+ return nil, fmt.Errorf("orderers is nil")
+ }
+ if tx == nil || tx.proposal == nil || tx.proposal.proposal == nil {
+ return nil, fmt.Errorf("proposal is nil")
+ }
+ if tx == nil {
+ return nil, fmt.Errorf("Transaction is nil")
+ }
+ // the original header
+ hdr, err := protos_utils.GetHeader(tx.proposal.proposal.Header)
+ if err != nil {
+ return nil, fmt.Errorf("Could not unmarshal the proposal header")
+ }
+ // serialize the tx
+ txBytes, err := protos_utils.GetBytesTransaction(tx.transaction)
+ if err != nil {
+ return nil, err
+ }
+
+ // create the payload
+ payl := &common.Payload{Header: hdr, Data: txBytes}
+ paylBytes, err := protos_utils.GetBytesPayload(payl)
+ if err != nil {
+ return nil, err
+ }
+
+ // here's the envelope
+ envelope, err := c.signPayload(paylBytes)
+ if err != nil {
+ return nil, err
+ }
+
+ transactionResponses, err := c.broadcastEnvelope(envelope)
+ if err != nil {
+ return nil, err
+ }
+
+ return transactionResponses, nil
+}
+
+// SendInstallProposal ...
+/**
+* Sends an install proposal to one or more endorsing peers.
+* @param {string} chaincodeName: required - The name of the chaincode.
+* @param {[]string} chaincodePath: required - string of the path to the location of the source code of the chaincode
+* @param {[]string} chaincodeVersion: required - string of the version of the chaincode
+* @param {[]string} chaincodeVersion: optional - Array of byte the chaincodePackage
+ */
+func (c *chain) SendInstallProposal(chaincodeName string, chaincodePath string, chaincodeVersion string, chaincodePackage []byte) ([]*TransactionProposalResponse, string, error) {
+
+ if chaincodeName == "" {
+ return nil, "", fmt.Errorf("Missing 'chaincodeName' parameter")
+ }
+ if chaincodePath == "" {
+ return nil, "", fmt.Errorf("Missing 'chaincodePath' parameter")
+ }
+ if chaincodeVersion == "" {
+ return nil, "", fmt.Errorf("Missing 'chaincodeVersion' parameter")
+ }
+
+ if chaincodePackage == nil {
+ var err error
+ chaincodePackage, err = PackageCC(chaincodePath, "")
+ if err != nil {
+ return nil, "", fmt.Errorf("PackageCC return error: %s", err)
+ }
+ }
+
+ now := time.Now()
+ cds := &pb.ChaincodeDeploymentSpec{ChaincodeSpec: &pb.ChaincodeSpec{
+ Type: pb.ChaincodeSpec_GOLANG, ChaincodeId: &pb.ChaincodeID{Name: chaincodeName, Path: chaincodePath, Version: chaincodeVersion}},
+ CodePackage: chaincodePackage, EffectiveDate: &google_protobuf.Timestamp{Seconds: int64(now.Second()), Nanos: int32(now.Nanosecond())}}
+
+ user, err := c.clientContext.GetUserContext("")
+ if err != nil {
+ return nil, "", fmt.Errorf("GetUserContext return error: %s", err)
+ }
+
+ creatorID, err := getSerializedIdentity(user.GetEnrollmentCertificate())
+ if err != nil {
+ return nil, "", err
+ }
+
+ // create an install from a chaincodeDeploymentSpec
+ proposal, txID, err := protos_utils.CreateInstallProposalFromCDS(cds, creatorID)
+ if err != nil {
+ return nil, "", fmt.Errorf("Could not create chaincode Deploy proposal, err %s", err)
+ }
+
+ signedProposal, err := c.signProposal(proposal)
+ if err != nil {
+ return nil, "", err
+ }
+
+ transactionProposalResponse, err := c.SendTransactionProposal(&TransactionProposal{
+ signedProposal: signedProposal,
+ proposal: proposal,
+ TransactionID: txID,
+ }, 0)
+ return transactionProposalResponse, txID, err
+}
+
+// SendInstantiateProposal ...
+/**
+* Sends an instantiate proposal to one or more endorsing peers.
+* @param {string} chaincodeName: required - The name of the chain.
+* @param {string} chainID: required - string of the name of the chain
+* @param {[]string} args: optional - string Array arguments specific to the chaincode being instantiated
+* @param {[]string} chaincodePath: required - string of the path to the location of the source code of the chaincode
+* @param {[]string} chaincodeVersion: required - string of the version of the chaincode
+ */
+func (c *chain) SendInstantiateProposal(chaincodeName string, chainID string,
+ args []string, chaincodePath string, chaincodeVersion string) ([]*TransactionProposalResponse, string, error) {
+
+ if chaincodeName == "" {
+ return nil, "", fmt.Errorf("Missing 'chaincodeName' parameter")
+ }
+ if chainID == "" {
+ return nil, "", fmt.Errorf("Missing 'chainID' parameter")
+ }
+ if chaincodePath == "" {
+ return nil, "", fmt.Errorf("Missing 'chaincodePath' parameter")
+ }
+ if chaincodeVersion == "" {
+ return nil, "", fmt.Errorf("Missing 'chaincodeVersion' parameter")
+ }
+
+ argsArray := make([][]byte, len(args))
+ for i, arg := range args {
+ argsArray[i] = []byte(arg)
+ }
+
+ ccds := &pb.ChaincodeDeploymentSpec{ChaincodeSpec: &pb.ChaincodeSpec{
+ Type: pb.ChaincodeSpec_GOLANG, ChaincodeId: &pb.ChaincodeID{Name: chaincodeName, Path: chaincodePath, Version: chaincodeVersion},
+ Input: &pb.ChaincodeInput{Args: argsArray}}}
+
+ user, err := c.clientContext.GetUserContext("")
+ if err != nil {
+ return nil, "", fmt.Errorf("GetUserContext return error: %s", err)
+ }
+
+ creatorID, err := getSerializedIdentity(user.GetEnrollmentCertificate())
+ if err != nil {
+ return nil, "", err
+ }
+ chaincodePolicy, err := buildChaincodePolicy(config.GetFabricCAID())
+ if err != nil {
+ return nil, "", err
+ }
+ chaincodePolicyBytes, err := protos_utils.Marshal(chaincodePolicy)
+ if err != nil {
+ return nil, "", err
+ }
+ // create a proposal from a chaincodeDeploymentSpec
+ proposal, txID, err := protos_utils.CreateDeployProposalFromCDS(chainID, ccds, creatorID, chaincodePolicyBytes, []byte("escc"), []byte("vscc"))
+ if err != nil {
+ return nil, "", fmt.Errorf("Could not create chaincode Deploy proposal, err %s", err)
+ }
+
+ signedProposal, err := c.signProposal(proposal)
+ if err != nil {
+ return nil, "", err
+ }
+
+ transactionProposalResponse, err := c.SendTransactionProposal(&TransactionProposal{
+ signedProposal: signedProposal,
+ proposal: proposal,
+ TransactionID: txID,
+ }, 0)
+
+ return transactionProposalResponse, txID, err
+}
+
+func (c *chain) signPayload(payload []byte) (*SignedEnvelope, error) {
+ //Get user info
+ user, err := c.clientContext.GetUserContext("")
+ if err != nil {
+ return nil, fmt.Errorf("GetUserContext returned error: %s", err)
+ }
+
+ signature, err := c.signObjectWithKey(payload, user.GetPrivateKey(),
+ &bccsp.SHAOpts{}, nil)
+ if err != nil {
+ return nil, err
+ }
+ // here's the envelope
+ return &SignedEnvelope{Payload: payload, signature: signature}, nil
+}
+
+//broadcastEnvelope will send the given envelope to each orderer
+func (c *chain) broadcastEnvelope(envelope *SignedEnvelope) ([]*TransactionResponse, error) {
+ // Check if orderers are defined
+ if c.orderers == nil || len(c.orderers) == 0 {
+ return nil, fmt.Errorf("orderers not set")
+ }
+
+ var responseMtx sync.Mutex
+ var transactionResponses []*TransactionResponse
+ var wg sync.WaitGroup
+
+ for _, o := range c.orderers {
+ wg.Add(1)
+ go func(orderer Orderer) {
+ defer wg.Done()
+ var transactionResponse *TransactionResponse
+
+ logger.Debugf("Broadcasting envelope to orderer :%s\n", orderer.GetURL())
+ if err := orderer.SendBroadcast(envelope); err != nil {
+ logger.Debugf("Receive Error Response from orderer :%v\n", err)
+ transactionResponse = &TransactionResponse{orderer.GetURL(),
+ fmt.Errorf("Error calling orderer '%s': %s", orderer.GetURL(), err)}
+ } else {
+ logger.Debugf("Receive Success Response from orderer\n")
+ transactionResponse = &TransactionResponse{orderer.GetURL(), nil}
+ }
+
+ responseMtx.Lock()
+ transactionResponses = append(transactionResponses, transactionResponse)
+ responseMtx.Unlock()
+ }(o)
+ }
+ wg.Wait()
+
+ return transactionResponses, nil
+}
+
+// signObjectWithKey will sign the given object with the given key,
+// hashOpts and signerOpts
+func (c *chain) signObjectWithKey(object []byte, key bccsp.Key,
+ hashOpts bccsp.HashOpts, signerOpts bccsp.SignerOpts) ([]byte, error) {
+ cryptoSuite := c.clientContext.GetCryptoSuite()
+ digest, err := cryptoSuite.Hash(object, hashOpts)
+ if err != nil {
+ return nil, err
+ }
+ signature, err := cryptoSuite.Sign(key, digest, signerOpts)
+ if err != nil {
+ return nil, err
+ }
+
+ return signature, nil
+}
+
+func (c *chain) signProposal(proposal *pb.Proposal) (*pb.SignedProposal, error) {
+ user, err := c.clientContext.GetUserContext("")
+ if err != nil {
+ return nil, fmt.Errorf("GetUserContext return error: %s", err)
+ }
+
+ proposalBytes, err := protos_utils.GetBytesProposal(proposal)
+ if err != nil {
+ return nil, err
+ }
+
+ signature, err := c.signObjectWithKey(proposalBytes, user.GetPrivateKey(), &bccsp.SHAOpts{}, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ return &pb.SignedProposal{ProposalBytes: proposalBytes, Signature: signature}, nil
+}
+
+func getSerializedIdentity(userCertificate []byte) ([]byte, error) {
+ serializedIdentity := &msp.SerializedIdentity{Mspid: config.GetFabricCAID(),
+ IdBytes: userCertificate}
+ creatorID, err := proto.Marshal(serializedIdentity)
+ if err != nil {
+ return nil, fmt.Errorf("Could not Marshal serializedIdentity, err %s", err)
+ }
+ return creatorID, nil
+}
+
+// internal utility method to build chaincode policy
+// FIXME: for now always construct a 'Signed By any member of an organization by mspid' policy
+func buildChaincodePolicy(mspid string) (*common.SignaturePolicyEnvelope, error) {
+ // Define MSPRole
+ memberRole, err := proto.Marshal(&common.MSPRole{Role: common.MSPRole_MEMBER, MspIdentifier: mspid})
+ if err != nil {
+ return nil, fmt.Errorf("Error marshal MSPRole: %s", err)
+ }
+
+ // construct a list of msp principals to select from using the 'n out of' operator
+ onePrn := &common.MSPPrincipal{
+ PrincipalClassification: common.MSPPrincipal_ROLE,
+ Principal: memberRole}
+
+ // construct 'signed by msp principal at index 0'
+ signedBy := &common.SignaturePolicy{Type: &common.SignaturePolicy_SignedBy{SignedBy: 0}}
+
+ // construct 'one of one' policy
+ oneOfone := &common.SignaturePolicy{Type: &common.SignaturePolicy_NOutOf_{NOutOf: &common.SignaturePolicy_NOutOf{
+ N: 1, Policies: []*common.SignaturePolicy{signedBy}}}}
+
+ p := &common.SignaturePolicyEnvelope{
+ Version: 0,
+ Policy: oneOfone,
+ Identities: []*common.MSPPrincipal{onePrn},
+ }
+ return p, nil
+}
diff --git a/fabric-client/chain_test.go b/fabric-client/chain_test.go
new file mode 100644
index 0000000000..0e385427e0
--- /dev/null
+++ b/fabric-client/chain_test.go
@@ -0,0 +1,122 @@
+/*
+Copyright SecureKey Technologies Inc. All Rights Reserved.
+
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package fabricclient
+
+import (
+ "fmt"
+ "testing"
+
+ mocks "github.com/hyperledger/fabric-sdk-go/fabric-client/mocks"
+ pb "github.com/hyperledger/fabric/protos/peer"
+)
+
+func TestChainMethods(t *testing.T) {
+ client := NewClient()
+ chain, err := NewChain("testChain", client)
+ if err != nil {
+ t.Fatalf("NewChain return error[%s]", err)
+ }
+ if chain.GetName() != "testChain" {
+ t.Fatalf("NewChain create wrong chain")
+ }
+
+ _, err = NewChain("", client)
+ if err == nil {
+ t.Fatalf("NewChain didn't return error")
+ }
+ if err.Error() != "Failed to create Chain. Missing requirement 'name' parameter." {
+ t.Fatalf("NewChain didn't return right error")
+ }
+
+ _, err = NewChain("testChain", nil)
+ if err == nil {
+ t.Fatalf("NewChain didn't return error")
+ }
+ if err.Error() != "Failed to create Chain. Missing requirement 'clientContext' parameter." {
+ t.Fatalf("NewChain didn't return right error")
+ }
+
+}
+
+func TestConcurrentPeers(t *testing.T) {
+ const numPeers = 10000
+ chain, err := setupMassiveTestChain(numPeers, 0)
+ if err != nil {
+ t.Fatalf("Failed to create massive chain: %s", err)
+ }
+
+ result, err := chain.SendTransactionProposal(&TransactionProposal{
+ signedProposal: &pb.SignedProposal{},
+ }, 1)
+ if err != nil {
+ t.Fatalf("SendTransactionProposal return error: %s", err)
+ }
+
+ if len(result) != numPeers {
+ t.Error("SendTransactionProposal returned an unexpected amount of responses")
+ }
+}
+
+func TestConcurrentOrderers(t *testing.T) {
+ const numOrderers = 10000
+ chain, err := setupMassiveTestChain(0, numOrderers)
+ if err != nil {
+ t.Fatalf("Failed to create massive chain: %s", err)
+ }
+
+ txn := Transaction{
+ proposal: &TransactionProposal{
+ proposal: &pb.Proposal{},
+ },
+ transaction: &pb.Transaction{},
+ }
+ _, err = chain.SendTransaction(&txn)
+ if err != nil {
+ t.Fatalf("SendTransaction returned error: %s", err)
+ }
+}
+
+func setupTestChain() (Chain, error) {
+ client := NewClient()
+ user := NewUser("test")
+ cryptoSuite := &mocks.MockCryptoSuite{}
+ client.SetUserContext(user, true)
+ client.SetCryptoSuite(cryptoSuite)
+ return NewChain("testChain", client)
+}
+
+func setupMassiveTestChain(numberOfPeers int, numberOfOrderers int) (Chain, error) {
+ chain, error := setupTestChain()
+ if error != nil {
+ return chain, error
+ }
+
+ for i := 0; i < numberOfPeers; i++ {
+ peer := mockPeer{fmt.Sprintf("MockPeer%d", i), fmt.Sprintf("http://mock%d.peers.r.us", i), []string{}, nil}
+ chain.AddPeer(&peer)
+ }
+
+ for i := 0; i < numberOfOrderers; i++ {
+ orderer := mockOrderer{fmt.Sprintf("http://mock%d.orderers.r.us", i), nil}
+ chain.AddOrderer(&orderer)
+ }
+
+ return chain, error
+}
diff --git a/fabric-client/client.go b/fabric-client/client.go
new file mode 100644
index 0000000000..044ad81664
--- /dev/null
+++ b/fabric-client/client.go
@@ -0,0 +1,234 @@
+/*
+Copyright SecureKey Technologies Inc. All Rights Reserved.
+
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package fabricclient
+
+import (
+ "encoding/json"
+ "fmt"
+
+ kvs "github.com/hyperledger/fabric-sdk-go/fabric-client/keyvaluestore"
+ "github.com/hyperledger/fabric/bccsp"
+)
+
+// Client ...
+/*
+ * Main interaction handler with end user. A client instance provides a handler to interact
+ * with a network of peers, orderers and optionally member services. An application using the
+ * SDK may need to interact with multiple networks, each through a separate instance of the Client.
+ *
+ * Each client when initially created should be initialized with configuration data from the
+ * consensus service, which includes a list of trusted roots, orderer certificates and IP addresses,
+ * and a list of peer certificates and IP addresses that it can access. This must be done out of band
+ * as part of bootstrapping the application environment. It is also the responsibility of the application
+ * to maintain the configuration of a client as the SDK does not persist this object.
+ *
+ * Each Client instance can maintain several {@link Chain} instances representing channels and the associated
+ * private ledgers.
+ *
+ *
+ */
+type Client interface {
+ NewChain(name string) (Chain, error)
+ GetChain(name string) Chain
+ QueryChainInfo(name string, peers []Peer) (Chain, error)
+ SetStateStore(stateStore kvs.KeyValueStore)
+ GetStateStore() kvs.KeyValueStore
+ SetCryptoSuite(cryptoSuite bccsp.BCCSP)
+ GetCryptoSuite() bccsp.BCCSP
+ SetUserContext(user User, skipPersistence bool) error
+ GetUserContext(name string) (User, error)
+}
+
+type client struct {
+ chains map[string]Chain
+ cryptoSuite bccsp.BCCSP
+ stateStore kvs.KeyValueStore
+ userContext User
+}
+
+// NewClient ...
+/*
+ * Returns a Client instance
+ */
+func NewClient() Client {
+ chains := make(map[string]Chain)
+ c := &client{chains: chains, cryptoSuite: nil, stateStore: nil, userContext: nil}
+ return c
+}
+
+// NewChain ...
+/*
+ * Returns a chain instance with the given name. This represents a channel and its associated ledger
+ * (as explained above), and this call returns an empty object. To initialize the chain in the blockchain network,
+ * a list of participating endorsers and orderer peers must be configured first on the returned object.
+ * @param {string} name The name of the chain. Recommend using namespaces to avoid collision.
+ * @returns {Chain} The uninitialized chain instance.
+ * @returns {Error} if the chain by that name already exists in the application's state store
+ */
+func (c *client) NewChain(name string) (Chain, error) {
+ if _, ok := c.chains[name]; ok {
+ return nil, fmt.Errorf("Chain %s already exists", name)
+ }
+ var err error
+ c.chains[name], err = NewChain(name, c)
+ if err != nil {
+ return nil, err
+ }
+ return c.chains[name], nil
+}
+
+// GetChain ...
+/*
+ * Get a {@link Chain} instance from the state storage. This allows existing chain instances to be saved
+ * for retrieval later and to be shared among instances of the application. Note that it’s the
+ * application/SDK’s responsibility to record the chain information. If an application is not able
+ * to look up the chain information from storage, it may call another API that queries one or more
+ * Peers for that information.
+ * @param {string} name The name of the chain.
+ * @returns {Chain} The chain instance
+ */
+func (c *client) GetChain(name string) Chain {
+ return c.chains[name]
+}
+
+// QueryChainInfo ...
+/*
+ * This is a network call to the designated Peer(s) to discover the chain information.
+ * The target Peer(s) must be part of the chain to be able to return the requested information.
+ * @param {string} name The name of the chain.
+ * @param {[]Peer} peers Array of target Peers to query.
+ * @returns {Chain} The chain instance for the name or error if the target Peer(s) does not know
+ * anything about the chain.
+ */
+func (c *client) QueryChainInfo(name string, peers []Peer) (Chain, error) {
+ return nil, fmt.Errorf("Not implemented yet")
+}
+
+// SetStateStore ...
+/*
+ * The SDK should have a built-in key value store implementation (suggest a file-based implementation to allow easy setup during
+ * development). But production systems would want a store backed by database for more robust storage and clustering,
+ * so that multiple app instances can share app state via the database (note that this doesn’t necessarily make the app stateful).
+ * This API makes this pluggable so that different store implementations can be selected by the application.
+ */
+func (c *client) SetStateStore(stateStore kvs.KeyValueStore) {
+ c.stateStore = stateStore
+}
+
+// GetStateStore ...
+/*
+ * A convenience method for obtaining the state store object in use for this client.
+ */
+func (c *client) GetStateStore() kvs.KeyValueStore {
+ return c.stateStore
+}
+
+// SetCryptoSuite ...
+/*
+ * A convenience method for obtaining the state store object in use for this client.
+ */
+func (c *client) SetCryptoSuite(cryptoSuite bccsp.BCCSP) {
+ c.cryptoSuite = cryptoSuite
+}
+
+// GetCryptoSuite ...
+/*
+ * A convenience method for obtaining the CryptoSuite object in use for this client.
+ */
+func (c *client) GetCryptoSuite() bccsp.BCCSP {
+ return c.cryptoSuite
+}
+
+// SetUserContext ...
+/*
+ * Sets an instance of the User class as the security context of this client instance. This user’s credentials (ECert) will be
+ * used to conduct transactions and queries with the blockchain network. Upon setting the user context, the SDK saves the object
+ * in a persistence cache if the “state store” has been set on the Client instance. If no state store has been set,
+ * this cache will not be established and the application is responsible for setting the user context again when the application
+ * crashed and is recovered.
+ */
+func (c *client) SetUserContext(user User, skipPersistence bool) error {
+ if user == nil {
+ return fmt.Errorf("user is nil")
+ }
+
+ if user.GetName() == "" {
+ return fmt.Errorf("user name is empty")
+ }
+ c.userContext = user
+ if !skipPersistence {
+ if c.stateStore == nil {
+ return fmt.Errorf("stateStore is nil")
+ }
+ userJSON := &UserJSON{PrivateKeySKI: user.GetPrivateKey().SKI(), EnrollmentCertificate: user.GetEnrollmentCertificate()}
+ data, err := json.Marshal(userJSON)
+ if err != nil {
+ return fmt.Errorf("Marshal json return error: %v", err)
+ }
+ err = c.stateStore.SetValue(user.GetName(), data)
+ if err != nil {
+ return fmt.Errorf("stateStore SetValue return error: %v", err)
+ }
+ }
+ return nil
+
+}
+
+// GetUserContext ...
+/*
+ * The client instance can have an optional state store. The SDK saves enrolled users in the storage which can be accessed by
+ * authorized users of the application (authentication is done by the application outside of the SDK).
+ * This function attempts to load the user by name from the local storage (via the KeyValueStore interface).
+ * The loaded user object must represent an enrolled user with a valid enrollment certificate signed by a trusted CA
+ * (such as the COP server).
+ */
+func (c *client) GetUserContext(name string) (User, error) {
+ if c.userContext != nil {
+ return c.userContext, nil
+ }
+ if name == "" {
+ return nil, nil
+ }
+ if c.stateStore == nil {
+ return nil, nil
+ }
+ if c.cryptoSuite == nil {
+ return nil, fmt.Errorf("cryptoSuite is nil")
+ }
+ value, err := c.stateStore.GetValue(name)
+ if err != nil {
+ return nil, nil
+ }
+ var userJSON UserJSON
+ err = json.Unmarshal(value, &userJSON)
+ if err != nil {
+ return nil, fmt.Errorf("stateStore GetValue return error: %v", err)
+ }
+ user := NewUser(name)
+ user.SetEnrollmentCertificate(userJSON.EnrollmentCertificate)
+ key, err := c.cryptoSuite.GetKey(userJSON.PrivateKeySKI)
+ if err != nil {
+ return nil, fmt.Errorf("cryptoSuite GetKey return error: %v", err)
+ }
+ user.SetPrivateKey(key)
+ c.userContext = user
+ return c.userContext, nil
+
+}
diff --git a/fabric-client/client_test.go b/fabric-client/client_test.go
new file mode 100644
index 0000000000..0ae68dbdd4
--- /dev/null
+++ b/fabric-client/client_test.go
@@ -0,0 +1,126 @@
+/*
+Copyright SecureKey Technologies Inc. All Rights Reserved.
+
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package fabricclient
+
+import (
+ "testing"
+
+ kvs "github.com/hyperledger/fabric-sdk-go/fabric-client/keyvaluestore"
+
+ bccspFactory "github.com/hyperledger/fabric/bccsp/factory"
+)
+
+func TestClientMethods(t *testing.T) {
+ client := NewClient()
+ if client.GetCryptoSuite() != nil {
+ t.Fatalf("Client getCryptoSuite should initially be nil")
+ }
+ err := bccspFactory.InitFactories(nil)
+ if err != nil {
+ t.Fatalf("Failed getting ephemeral software-based BCCSP [%s]", err)
+ }
+ cryptoSuite := bccspFactory.GetDefault()
+
+ client.SetCryptoSuite(cryptoSuite)
+ if client.GetCryptoSuite() == nil {
+ t.Fatalf("Client getCryptoSuite should not be nil after setCryptoSuite")
+ }
+
+ //Client tests: getUserContext successful nill user
+ user, err := client.GetUserContext("")
+ if err != nil {
+ t.Fatalf("client.GetUserContext return error[%s]", err)
+ }
+ if user != nil {
+ t.Fatalf("client.GetUserContext should return nil user")
+ }
+
+ //Client tests: Should return error "user is nil"
+ err = client.SetUserContext(nil, false)
+ if err == nil {
+ t.Fatalf("client.SetUserContext didn't return error")
+ }
+ if err.Error() != "user is nil" {
+ t.Fatalf("client.SetUserContext didn't return right error")
+ }
+
+ //Client tests: getUserContext with no context in memory or persisted returns nil
+ user, err = client.GetUserContext("someUser")
+ if err != nil {
+ t.Fatalf("client.GetUserContext return error[%s]", err)
+ }
+ if user != nil {
+ t.Fatalf("client.GetUserContext should return nil user")
+ }
+
+ //Client tests: successfully setUserContext with skipPersistence true
+ user = NewUser("someUser")
+ err = client.SetUserContext(user, true)
+ if err != nil {
+ t.Fatalf("client.SetUserContext return error[%s]", err)
+ }
+ user, err = client.GetUserContext("someUser")
+ if err != nil {
+ t.Fatalf("client.GetUserContext return error[%s]", err)
+ }
+ if user == nil {
+ t.Fatalf("client.GetUserContext return nil user")
+ }
+ if user.GetName() != "someUser" {
+ t.Fatalf("client.GetUserContext didn't return the right user")
+ }
+
+ //Client tests: Should throw "stateStore is nil"
+ err = client.SetUserContext(user, false)
+ if err == nil {
+ t.Fatalf("client.SetUserContext didn't return error")
+ }
+ if err.Error() != "stateStore is nil" {
+ t.Fatalf("client.SetUserContext didn't return right error")
+ }
+
+ //Client tests: Create new chain
+ chain, err := client.NewChain("someChain")
+ if err != nil {
+ t.Fatalf("client.NewChain return error[%s]", err)
+ }
+ if chain.GetName() != "someChain" {
+ t.Fatalf("client.NewChain create wrong chain")
+ }
+ chain1 := client.GetChain("someChain")
+ if chain1.GetName() != "someChain" {
+ t.Fatalf("client.NewChain create wrong chain")
+ }
+
+ stateStore, err := kvs.CreateNewFileKeyValueStore("/tmp/keyvaluestore")
+ if err != nil {
+ t.Fatalf("CreateNewFileKeyValueStore return error[%s]", err)
+ }
+ client.SetStateStore(stateStore)
+ client.GetStateStore().SetValue("testvalue", []byte("data"))
+ value, err := client.GetStateStore().GetValue("testvalue")
+ if err != nil {
+ t.Fatalf("client.GetStateStore().GetValue() return error[%s]", err)
+ }
+ if string(value) != "data" {
+ t.Fatalf("client.GetStateStore().GetValue() didn't return the right value")
+ }
+
+}
diff --git a/fabric-client/events/consumer/consumer.go b/fabric-client/events/consumer/consumer.go
new file mode 100644
index 0000000000..6eab7fbcfe
--- /dev/null
+++ b/fabric-client/events/consumer/consumer.go
@@ -0,0 +1,256 @@
+/*
+Copyright IBM Corp, SecureKey Technologies Inc. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package consumer
+
+import (
+ "fmt"
+ "io"
+ "sync"
+ "time"
+
+ config "github.com/hyperledger/fabric-sdk-go/config"
+ consumer "github.com/hyperledger/fabric/events/consumer"
+ ehpb "github.com/hyperledger/fabric/protos/peer"
+ "golang.org/x/net/context"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/credentials"
+)
+
+const defaultTimeout = time.Second * 3
+
+//EventsClient holds the stream and adapter for consumer to work with
+type EventsClient interface {
+ RegisterAsync(ies []*ehpb.Interest) error
+ UnregisterAsync(ies []*ehpb.Interest) error
+ Recv() (*ehpb.Event, error)
+ Start() error
+ Stop() error
+}
+
+type eventsClient struct {
+ sync.RWMutex
+ peerAddress string
+ regTimeout time.Duration
+ stream ehpb.Events_ChatClient
+ adapter consumer.EventAdapter
+}
+
+//NewEventsClient Returns a new grpc.ClientConn to the configured local PEER.
+func NewEventsClient(peerAddress string, regTimeout time.Duration, adapter consumer.EventAdapter) (EventsClient, error) {
+ var err error
+ if regTimeout < 100*time.Millisecond {
+ regTimeout = 100 * time.Millisecond
+ err = fmt.Errorf("regTimeout >= 0, setting to 100 msec")
+ } else if regTimeout > 60*time.Second {
+ regTimeout = 60 * time.Second
+ err = fmt.Errorf("regTimeout > 60, setting to 60 sec")
+ }
+ return &eventsClient{sync.RWMutex{}, peerAddress, regTimeout, nil, adapter}, err
+}
+
+//newEventsClientConnectionWithAddress Returns a new grpc.ClientConn to the configured local PEER.
+func newEventsClientConnectionWithAddress(peerAddress string) (*grpc.ClientConn, error) {
+ var opts []grpc.DialOption
+ opts = append(opts, grpc.WithTimeout(time.Second*3))
+ if config.IsTLSEnabled() {
+ creds := credentials.NewClientTLSFromCert(config.GetTLSCACertPool(), config.GetTLSServerHostOverride())
+ opts = append(opts, grpc.WithTransportCredentials(creds))
+ } else {
+ opts = append(opts, grpc.WithInsecure())
+ }
+ conn, err := grpc.Dial(peerAddress, opts...)
+ if err != nil {
+ return nil, err
+ }
+
+ return conn, err
+}
+
+func (ec *eventsClient) send(emsg *ehpb.Event) error {
+ ec.Lock()
+ defer ec.Unlock()
+ return ec.stream.Send(emsg)
+}
+
+// RegisterAsync - registers interest in a event and doesn't wait for a response
+func (ec *eventsClient) RegisterAsync(ies []*ehpb.Interest) error {
+ emsg := &ehpb.Event{Event: &ehpb.Event_Register{Register: &ehpb.Register{Events: ies}}}
+ var err error
+ if err = ec.send(emsg); err != nil {
+ fmt.Printf("error on Register send %s\n", err)
+ }
+ return err
+}
+
+// register - registers interest in a event
+func (ec *eventsClient) register(ies []*ehpb.Interest) error {
+ var err error
+ if err = ec.RegisterAsync(ies); err != nil {
+ return err
+ }
+
+ regChan := make(chan struct{})
+ go func() {
+ defer close(regChan)
+ in, inerr := ec.stream.Recv()
+ if inerr != nil {
+ err = inerr
+ return
+ }
+ switch in.Event.(type) {
+ case *ehpb.Event_Register:
+ case nil:
+ err = fmt.Errorf("invalid nil object for register")
+ default:
+ err = fmt.Errorf("invalid registration object")
+ }
+ }()
+ select {
+ case <-regChan:
+ case <-time.After(ec.regTimeout):
+ err = fmt.Errorf("timeout waiting for registration")
+ }
+ return err
+}
+
+// UnregisterAsync - Unregisters interest in a event and doesn't wait for a response
+func (ec *eventsClient) UnregisterAsync(ies []*ehpb.Interest) error {
+ emsg := &ehpb.Event{Event: &ehpb.Event_Unregister{Unregister: &ehpb.Unregister{Events: ies}}}
+ var err error
+ if err = ec.send(emsg); err != nil {
+ err = fmt.Errorf("error on unregister send %s\n", err)
+ }
+
+ return err
+}
+
+// unregister - unregisters interest in a event
+func (ec *eventsClient) unregister(ies []*ehpb.Interest) error {
+ var err error
+ if err = ec.UnregisterAsync(ies); err != nil {
+ return err
+ }
+
+ regChan := make(chan struct{})
+ go func() {
+ defer close(regChan)
+ in, inerr := ec.stream.Recv()
+ if inerr != nil {
+ err = inerr
+ return
+ }
+ switch in.Event.(type) {
+ case *ehpb.Event_Unregister:
+ case nil:
+ err = fmt.Errorf("invalid nil object for unregister")
+ default:
+ err = fmt.Errorf("invalid unregistration object")
+ }
+ }()
+ select {
+ case <-regChan:
+ case <-time.After(ec.regTimeout):
+ err = fmt.Errorf("timeout waiting for unregistration")
+ }
+ return err
+}
+
+// Recv recieves next event - use when client has not called Start
+func (ec *eventsClient) Recv() (*ehpb.Event, error) {
+ in, err := ec.stream.Recv()
+ if err == io.EOF {
+ // read done.
+ if ec.adapter != nil {
+ ec.adapter.Disconnected(nil)
+ }
+ return nil, err
+ }
+ if err != nil {
+ if ec.adapter != nil {
+ ec.adapter.Disconnected(err)
+ }
+ return nil, err
+ }
+ return in, nil
+}
+func (ec *eventsClient) processEvents() error {
+ defer ec.stream.CloseSend()
+ for {
+ in, err := ec.stream.Recv()
+ if err == io.EOF {
+ // read done.
+ if ec.adapter != nil {
+ ec.adapter.Disconnected(nil)
+ }
+ return nil
+ }
+ if err != nil {
+ if ec.adapter != nil {
+ ec.adapter.Disconnected(err)
+ }
+ return err
+ }
+ if ec.adapter != nil {
+ cont, err := ec.adapter.Recv(in)
+ if !cont {
+ return err
+ }
+ }
+ }
+}
+
+//Start establishes connection with Event hub and registers interested events with it
+func (ec *eventsClient) Start() error {
+ conn, err := newEventsClientConnectionWithAddress(ec.peerAddress)
+ if err != nil {
+ return fmt.Errorf("Could not create client conn to %s", ec.peerAddress)
+ }
+
+ ies, err := ec.adapter.GetInterestedEvents()
+ if err != nil {
+ return fmt.Errorf("error getting interested events:%s", err)
+ }
+
+ if len(ies) == 0 {
+ return fmt.Errorf("must supply interested events")
+ }
+
+ serverClient := ehpb.NewEventsClient(conn)
+ ec.stream, err = serverClient.Chat(context.Background())
+ if err != nil {
+ return fmt.Errorf("Could not create client conn to %s", ec.peerAddress)
+ }
+
+ if err = ec.register(ies); err != nil {
+ return err
+ }
+
+ go ec.processEvents()
+
+ return nil
+}
+
+//Stop terminates connection with event hub
+func (ec *eventsClient) Stop() error {
+ if ec.stream == nil {
+ // in case the stream/chat server has not been established earlier, we assume that it's closed, successfully
+ return nil
+ }
+ return ec.stream.CloseSend()
+}
diff --git a/fabric-client/events/eventhub.go b/fabric-client/events/eventhub.go
new file mode 100644
index 0000000000..53501c4d6c
--- /dev/null
+++ b/fabric-client/events/eventhub.go
@@ -0,0 +1,335 @@
+/*
+Copyright SecureKey Technologies Inc. All Rights Reserved.
+
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package events
+
+import (
+ "fmt"
+ "sync"
+
+ "github.com/golang/protobuf/proto"
+ consumer "github.com/hyperledger/fabric-sdk-go/fabric-client/events/consumer"
+ common "github.com/hyperledger/fabric/protos/common"
+ pb "github.com/hyperledger/fabric/protos/peer"
+ "github.com/hyperledger/fabric/protos/utils"
+ "github.com/op/go-logging"
+)
+
+var logger = logging.MustGetLogger("fabric_sdk_go")
+
+// EventHub ...
+type EventHub interface {
+ SetPeerAddr(peerURL string)
+ IsConnected() bool
+ Connect() error
+ RegisterChaincodeEvent(ccid string, eventname string, callback func(*pb.ChaincodeEvent)) *ChainCodeCBE
+ UnregisterChaincodeEvent(cbe *ChainCodeCBE)
+ RegisterTxEvent(txID string, callback func(string, error))
+ UnregisterTxEvent(txID string)
+}
+
+type eventHub struct {
+ // Protects chaincodeRegistrants, blockRegistrants and txRegistrants
+ mtx sync.RWMutex
+ // Map of clients registered for chaincode events
+ chaincodeRegistrants map[string][]*ChainCodeCBE
+ // Map of clients registered for block events
+ blockRegistrants []func(*common.Block, string, string)
+ // Map of clients registered for transactional events
+ txRegistrants map[string]func(string, error)
+ // peer addr to connect to
+ peerAddr string
+ // grpc event client interface
+ client consumer.EventsClient
+ // fabric connection state of this eventhub
+ connected bool
+ // List of events client is interested in
+ interestedEvents []*pb.Interest
+}
+
+// ChainCodeCBE ...
+/**
+ * The ChainCodeCBE is used internal to the EventHub to hold chaincode
+ * event registration callbacks.
+ */
+type ChainCodeCBE struct {
+ // chaincode id
+ CCID string
+ // event name regex filter
+ EventNameFilter string
+ // callback function to invoke on successful filter match
+ CallbackFunc func(*pb.ChaincodeEvent)
+}
+
+// NewEventHub ...
+func NewEventHub() EventHub {
+ chaincodeRegistrants := make(map[string][]*ChainCodeCBE)
+ blockRegistrants := make([]func(*common.Block, string, string), 0)
+ txRegistrants := make(map[string]func(string, error))
+
+ // default interested events
+ // TODO: set interestedEvents based on handler registration
+ interestedEvents := []*pb.Interest{{EventType: pb.EventType_BLOCK}, {EventType: pb.EventType_REJECTION}}
+
+ eventHub := &eventHub{chaincodeRegistrants: chaincodeRegistrants, blockRegistrants: blockRegistrants, txRegistrants: txRegistrants, interestedEvents: interestedEvents}
+
+ return eventHub
+}
+
+// SetPeerAddr ...
+/**
+ * Set peer url for event source
+ * Note: Only use this if creating your own EventHub. The chain
+ * creates a default eventHub that most Node clients can
+ * use (see eventHubConnect, eventHubDisconnect and getEventHub).
+ * @param {string} peeraddr peer url
+ */
+func (eventHub *eventHub) SetPeerAddr(peerURL string) {
+ eventHub.peerAddr = peerURL
+}
+
+// Isconnected ...
+/**
+ * Get connected state of eventhub
+ * @returns true if connected to event source, false otherwise
+ */
+func (eventHub *eventHub) IsConnected() bool {
+ return eventHub.connected
+}
+
+// Connect ...
+/**
+ * Establishes connection with peer event source
+ */
+func (eventHub *eventHub) Connect() error {
+ if eventHub.peerAddr == "" {
+ return fmt.Errorf("eventHub.peerAddr is empty")
+ }
+
+ eventHub.mtx.Lock()
+ defer eventHub.mtx.Unlock()
+
+ eventHub.blockRegistrants = make([]func(*common.Block, string, string), 0)
+ eventHub.blockRegistrants = append(eventHub.blockRegistrants, eventHub.txCallback)
+
+ eventsClient, _ := consumer.NewEventsClient(eventHub.peerAddr, 5, eventHub)
+ if err := eventsClient.Start(); err != nil {
+ eventsClient.Stop()
+ return fmt.Errorf("Error from eventsClient.Start (%s)", err.Error())
+
+ }
+ eventHub.connected = true
+ eventHub.client = eventsClient
+ return nil
+}
+
+//GetInterestedEvents implements consumer.EventAdapter interface for registering interested events
+func (eventHub *eventHub) GetInterestedEvents() ([]*pb.Interest, error) {
+ return eventHub.interestedEvents, nil
+}
+
+//Recv implements consumer.EventAdapter interface for receiving events
+func (eventHub *eventHub) Recv(msg *pb.Event) (bool, error) {
+ eventHub.mtx.RLock()
+ defer eventHub.mtx.RUnlock()
+
+ switch msg.Event.(type) {
+ case *pb.Event_Block:
+ blockEvent := msg.Event.(*pb.Event_Block)
+ logger.Debugf("Recv blockEvent:%v\n", blockEvent)
+ for _, v := range eventHub.blockRegistrants {
+ v(blockEvent.Block, "", "")
+ }
+ return true, nil
+ case *pb.Event_ChaincodeEvent:
+ ccEvent := msg.Event.(*pb.Event_ChaincodeEvent)
+ logger.Debugf("Recv ccEvent:%v\n", ccEvent)
+
+ cbeArray := eventHub.chaincodeRegistrants[ccEvent.ChaincodeEvent.ChaincodeId]
+ if len(cbeArray) <= 0 {
+ logger.Debugf("No event registration for ccid %s \n", ccEvent.ChaincodeEvent.ChaincodeId)
+ }
+
+ for _, v := range cbeArray {
+ if v.EventNameFilter == ccEvent.ChaincodeEvent.EventName {
+ callback := v.CallbackFunc
+ if callback != nil {
+ callback(ccEvent.ChaincodeEvent)
+ }
+ }
+ }
+ return true, nil
+ case *pb.Event_Rejection:
+ rejectionEvent := msg.Event.(*pb.Event_Rejection)
+ logger.Debugf("Recv rejectionEvent:%v\n", rejectionEvent)
+ for _, v := range eventHub.blockRegistrants {
+ v(nil, "", rejectionEvent.Rejection.ErrorMsg)
+ }
+ return true, nil
+ default:
+ return true, nil
+ }
+}
+
+// Disconnected implements consumer.EventAdapter interface for receiving events
+/**
+ * Disconnects peer event source
+ * Note: Only use this if creating your own EventHub. The chain
+ * class creates a default eventHub that most Node clients can
+ * use (see eventHubConnect, eventHubDisconnect and getEventHub).
+ */
+func (eventHub *eventHub) Disconnected(err error) {
+ if !eventHub.connected {
+ return
+ }
+ eventHub.client.Stop()
+ eventHub.connected = false
+
+}
+
+// RegisterChaincodeEvent ...
+/**
+ * Register a callback function to receive chaincode events.
+ * @param {string} ccid string chaincode id
+ * @param {string} eventname string The regex string used to filter events
+ * @param {function} callback Function Callback function for filter matches
+ * that takes a single parameter which is a json object representation
+ * of type "message ChaincodeEvent"
+ * @returns {object} ChainCodeCBE object that should be treated as an opaque
+ * handle used to unregister (see unregisterChaincodeEvent)
+ */
+func (eventHub *eventHub) RegisterChaincodeEvent(ccid string, eventname string, callback func(*pb.ChaincodeEvent)) *ChainCodeCBE {
+ if !eventHub.connected {
+ return nil
+ }
+
+ eventHub.mtx.Lock()
+ defer eventHub.mtx.Unlock()
+
+ cbe := ChainCodeCBE{CCID: ccid, EventNameFilter: eventname, CallbackFunc: callback}
+ cbeArray := eventHub.chaincodeRegistrants[ccid]
+ if cbeArray == nil && len(cbeArray) <= 0 {
+ cbeArray = make([]*ChainCodeCBE, 0)
+ cbeArray = append(cbeArray, &cbe)
+ eventHub.chaincodeRegistrants[ccid] = cbeArray
+ } else {
+ cbeArray = append(cbeArray, &cbe)
+ eventHub.chaincodeRegistrants[ccid] = cbeArray
+ }
+ return &cbe
+}
+
+// UnregisterChaincodeEvent ...
+/**
+ * Unregister chaincode event registration
+ * @param {object} ChainCodeCBE handle returned from call to
+ * registerChaincodeEvent.
+ */
+func (eventHub *eventHub) UnregisterChaincodeEvent(cbe *ChainCodeCBE) {
+ if !eventHub.connected {
+ return
+ }
+
+ eventHub.mtx.Lock()
+ defer eventHub.mtx.Unlock()
+
+ cbeArray := eventHub.chaincodeRegistrants[cbe.CCID]
+ if len(cbeArray) <= 0 {
+ logger.Debugf("No event registration for ccid %s \n", cbe.CCID)
+ return
+ }
+ for i, v := range cbeArray {
+ if v.EventNameFilter == cbe.EventNameFilter {
+
+ cbeArray = append(cbeArray[:i], cbeArray[i+1:]...)
+
+ }
+ }
+ if len(cbeArray) <= 0 {
+ delete(eventHub.chaincodeRegistrants, cbe.CCID)
+ }
+
+}
+
+// RegisterTxEvent ...
+/**
+ * Register a callback function to receive transactional events.
+ * Note: transactional event registration is primarily used by
+ * the sdk to track deploy and invoke completion events. Nodejs
+ * clients generally should not need to call directly.
+ * @param {string} txid string transaction id
+ * @param {function} callback Function that takes a single parameter which
+ * is a json object representation of type "message Transaction"
+ */
+func (eventHub *eventHub) RegisterTxEvent(txID string, callback func(string, error)) {
+ logger.Debugf("reg txid %s\n", txID)
+
+ eventHub.mtx.Lock()
+ eventHub.txRegistrants[txID] = callback
+ eventHub.mtx.Unlock()
+}
+
+// UnregisterTxEvent ...
+/**
+ * Unregister transactional event registration.
+ * @param txid string transaction id
+ */
+func (eventHub *eventHub) UnregisterTxEvent(txID string) {
+ eventHub.mtx.Lock()
+ delete(eventHub.txRegistrants, txID)
+ eventHub.mtx.Unlock()
+}
+
+/**
+ * private internal callback for processing tx events
+ * @param {object} block json object representing block of tx
+ * from the fabric
+ */
+func (eventHub *eventHub) txCallback(block *common.Block, txID string, errMsg string) {
+ logger.Debugf("txCallback block=%v\n", block)
+
+ eventHub.mtx.RLock()
+ defer eventHub.mtx.RUnlock()
+
+ for _, v := range block.Data.Data {
+ if env, err := utils.GetEnvelopeFromBlock(v); err != nil {
+ return
+ } else if env != nil {
+ // get the payload from the envelope
+ payload, err := utils.GetPayload(env)
+ if err != nil {
+ return
+ }
+
+ channelHeaderBytes := payload.Header.ChannelHeader
+ channelHeader := &common.ChannelHeader{}
+ err = proto.Unmarshal(channelHeaderBytes, channelHeader)
+ if err != nil {
+ return
+ }
+
+ callback := eventHub.txRegistrants[channelHeader.TxId]
+ if callback != nil {
+ callback(channelHeader.TxId, nil)
+ }
+ }
+
+ }
+
+}
diff --git a/fabric-client/keyvaluestore/filekeyvaluestore.go b/fabric-client/keyvaluestore/filekeyvaluestore.go
new file mode 100644
index 0000000000..42a0f35999
--- /dev/null
+++ b/fabric-client/keyvaluestore/filekeyvaluestore.go
@@ -0,0 +1,89 @@
+/*
+Copyright SecureKey Technologies Inc. All Rights Reserved.
+
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package keyvaluestore
+
+import (
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path"
+
+ "github.com/op/go-logging"
+
+ utils "github.com/hyperledger/fabric/bccsp/utils"
+)
+
+var logger = logging.MustGetLogger("fabric_sdk_go")
+
+// FileKeyValueStore ...
+type FileKeyValueStore struct {
+ path string
+}
+
+// CreateNewFileKeyValueStore ...
+func CreateNewFileKeyValueStore(path string) (*FileKeyValueStore, error) {
+ if len(path) == 0 {
+ return nil, fmt.Errorf("FileKeyValueStore path is empty")
+ }
+ createDirIfNotExists(path)
+ return &FileKeyValueStore{path: path}, nil
+}
+
+// GetValue ...
+/**
+ * Get the value associated with name.
+ * @param {string} name
+ * @returns []byte for the value
+ */
+func (fkvs *FileKeyValueStore) GetValue(key string) ([]byte, error) {
+ file := path.Join(fkvs.path, key+".json")
+ value, err := ioutil.ReadFile(file)
+ if err != nil {
+ return nil, err
+ }
+ return value, nil
+}
+
+// SetValue ...
+/**
+ * Set the value associated with name.
+ * @param {string} name of the key to save
+ * @param {[]byte} value to save
+ */
+func (fkvs *FileKeyValueStore) SetValue(key string, value []byte) error {
+ file := path.Join(fkvs.path, key+".json")
+ err := ioutil.WriteFile(file, value, 0600)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+// createDirIfNotExists
+func createDirIfNotExists(path string) error {
+ missing, err := utils.DirMissingOrEmpty(path)
+ logger.Infof("KeyStore path [%s] missing [%t]: [%s]", path, missing, err)
+
+ if missing {
+ os.MkdirAll(path, 0755)
+ }
+
+ return nil
+}
diff --git a/fabric-client/keyvaluestore/filekeyvaluestore_test.go b/fabric-client/keyvaluestore/filekeyvaluestore_test.go
new file mode 100644
index 0000000000..eae33848e0
--- /dev/null
+++ b/fabric-client/keyvaluestore/filekeyvaluestore_test.go
@@ -0,0 +1,40 @@
+/*
+Copyright SecureKey Technologies Inc. All Rights Reserved.
+
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package keyvaluestore
+
+import (
+ "testing"
+)
+
+func TestFKVSMethods(t *testing.T) {
+ stateStore, err := CreateNewFileKeyValueStore("/tmp/keyvaluestore")
+ if err != nil {
+ t.Fatalf("CreateNewFileKeyValueStore return error[%s]", err)
+ }
+ stateStore.SetValue("testvalue", []byte("data"))
+ value, err := stateStore.GetValue("testvalue")
+ if err != nil {
+ t.Fatalf("stateStore.SetValue return error[%s]", err)
+ }
+ if string(value) != "data" {
+ t.Fatalf("stateStore.GetValue didn't return the right value")
+ }
+
+}
diff --git a/fabric-client/keyvaluestore/keyvaluestore.go b/fabric-client/keyvaluestore/keyvaluestore.go
new file mode 100644
index 0000000000..0cc0112327
--- /dev/null
+++ b/fabric-client/keyvaluestore/keyvaluestore.go
@@ -0,0 +1,44 @@
+/*
+Copyright SecureKey Technologies Inc. All Rights Reserved.
+
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package keyvaluestore
+
+// KeyValueStore ...
+/**
+ * Abstract class for a Key-Value store. The Chain class uses this store
+ * to save sensitive information such as authenticated user's private keys,
+ * certificates, etc.
+ *
+ */
+type KeyValueStore interface {
+ /**
+ * Get the value associated with name.
+ *
+ * @param {string} name of the key
+ * @returns {[]byte}
+ */
+ GetValue(key string) ([]byte, error)
+
+ /**
+ * Set the value associated with name.
+ * @param {string} name of the key to save
+ * @param {[]byte} value to save
+ */
+ SetValue(key string, value []byte) error
+}
diff --git a/fabric-client/mockorderer.go b/fabric-client/mockorderer.go
new file mode 100644
index 0000000000..76410e7fcb
--- /dev/null
+++ b/fabric-client/mockorderer.go
@@ -0,0 +1,36 @@
+/*
+Copyright SecureKey Technologies Inc. All Rights Reserved.
+
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package fabricclient
+
+// mockOrderer is a mock fabricclient.Orderer
+type mockOrderer struct {
+ MockURL string
+ MockError error
+}
+
+// GetURL returns the mock URL of the mock Orderer
+func (o *mockOrderer) GetURL() string {
+ return o.MockURL
+}
+
+// SendBroadcast mocks sending a broadcast by sending nothing nowhere
+func (o *mockOrderer) SendBroadcast(envelope *SignedEnvelope) error {
+ return o.MockError
+}
diff --git a/fabric-client/mockpeer.go b/fabric-client/mockpeer.go
new file mode 100644
index 0000000000..cfabea9628
--- /dev/null
+++ b/fabric-client/mockpeer.go
@@ -0,0 +1,100 @@
+/*
+Copyright SecureKey Technologies Inc. All Rights Reserved.
+
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package fabricclient
+
+// TODO: Move protos to this library
+import (
+ "encoding/pem"
+ "errors"
+
+ pb "github.com/hyperledger/fabric/protos/peer"
+)
+
+// mockPeer is a mock fabricsdk.Peer.
+type mockPeer struct {
+ MockName string
+ MockURL string
+ MockRoles []string
+ MockCert *pem.Block
+}
+
+// ConnectEventSource does not connect anywhere
+func (p *mockPeer) ConnectEventSource() {
+ // done.
+}
+
+// IsEventListened always returns true
+func (p *mockPeer) IsEventListened(event string, chain Chain) (bool, error) {
+ return true, nil
+}
+
+// AddListener is not implemented
+func (p *mockPeer) AddListener(eventType string, eventTypeData interface{}, eventCallback interface{}) (string, error) {
+ return "", errors.New("Not implemented")
+}
+
+// RemoveListener is not implemented
+func (p *mockPeer) RemoveListener(eventListenerRef string) (bool, error) {
+ return false, errors.New("Not implemented")
+}
+
+// GetName returns the mock peer's mock name
+func (p mockPeer) GetName() string {
+ return p.MockName
+}
+
+// SetName sets the mock peer's mock name
+func (p *mockPeer) SetName(name string) {
+ p.MockName = name
+}
+
+// GetRoles returns the mock peer's mock roles
+func (p *mockPeer) GetRoles() []string {
+ return p.MockRoles
+}
+
+// SetRoles sets the mock peer's mock roles
+func (p *mockPeer) SetRoles(roles []string) {
+ p.MockRoles = roles
+}
+
+// GetEnrollmentCertificate returns the mock peer's mock enrollment certificate
+func (p *mockPeer) GetEnrollmentCertificate() *pem.Block {
+ return p.MockCert
+}
+
+// SetEnrollmentCertificate sets the mock peer's mock enrollment certificate
+func (p *mockPeer) SetEnrollmentCertificate(pem *pem.Block) {
+ p.MockCert = pem
+}
+
+// GetURL returns the mock peer's mock URL
+func (p *mockPeer) GetURL() string {
+ return p.MockURL
+}
+
+// SendProposal does not send anything anywhere but returns an empty mock ProposalResponse
+func (p *mockPeer) SendProposal(tp *TransactionProposal) (*TransactionProposalResponse, error) {
+ return &TransactionProposalResponse{
+ Endorser: p.MockURL,
+ proposal: tp,
+ proposalResponse: &pb.ProposalResponse{},
+ }, nil
+}
diff --git a/fabric-client/mocks/mockcryptosuite.go b/fabric-client/mocks/mockcryptosuite.go
new file mode 100644
index 0000000000..e0b8f06203
--- /dev/null
+++ b/fabric-client/mocks/mockcryptosuite.go
@@ -0,0 +1,67 @@
+package mocks
+
+import (
+ "hash"
+
+ "github.com/hyperledger/fabric/bccsp"
+)
+
+// MockCryptoSuite implementation
+type MockCryptoSuite struct {
+}
+
+// KeyGen mock key gen
+func (m *MockCryptoSuite) KeyGen(opts bccsp.KeyGenOpts) (k bccsp.Key, err error) {
+ return nil, nil
+}
+
+// KeyDeriv mock key derivation
+func (m *MockCryptoSuite) KeyDeriv(k bccsp.Key,
+ opts bccsp.KeyDerivOpts) (dk bccsp.Key, err error) {
+ return nil, nil
+}
+
+// KeyImport mock key import
+func (m *MockCryptoSuite) KeyImport(raw interface{},
+ opts bccsp.KeyImportOpts) (k bccsp.Key, err error) {
+ return nil, nil
+}
+
+// GetKey mock get key
+func (m *MockCryptoSuite) GetKey(ski []byte) (k bccsp.Key, err error) {
+ return nil, nil
+}
+
+// Hash mock hash
+func (m *MockCryptoSuite) Hash(msg []byte, opts bccsp.HashOpts) (hash []byte, err error) {
+ return nil, nil
+}
+
+// GetHash mock get hash
+func (m *MockCryptoSuite) GetHash(opts bccsp.HashOpts) (h hash.Hash, err error) {
+ return nil, nil
+}
+
+// Sign mock signing
+func (m *MockCryptoSuite) Sign(k bccsp.Key, digest []byte,
+ opts bccsp.SignerOpts) (signature []byte, err error) {
+ return []byte("testSignature"), nil
+}
+
+// Verify mock verify
+func (m *MockCryptoSuite) Verify(k bccsp.Key, signature, digest []byte,
+ opts bccsp.SignerOpts) (valid bool, err error) {
+ return false, nil
+}
+
+// Encrypt mock encrypt
+func (m *MockCryptoSuite) Encrypt(k bccsp.Key, plaintext []byte,
+ opts bccsp.EncrypterOpts) (ciphertext []byte, err error) {
+ return nil, nil
+}
+
+// Decrypt mock decrypt
+func (m *MockCryptoSuite) Decrypt(k bccsp.Key, ciphertext []byte,
+ opts bccsp.DecrypterOpts) (plaintext []byte, err error) {
+ return nil, nil
+}
diff --git a/fabric-client/orderer.go b/fabric-client/orderer.go
new file mode 100644
index 0000000000..4e241dfa76
--- /dev/null
+++ b/fabric-client/orderer.go
@@ -0,0 +1,120 @@
+/*
+Copyright SecureKey Technologies Inc. All Rights Reserved.
+
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package fabricclient
+
+import (
+ "fmt"
+ "io"
+ "strings"
+ "time"
+
+ config "github.com/hyperledger/fabric-sdk-go/config"
+ "github.com/hyperledger/fabric/protos/common"
+ ab "github.com/hyperledger/fabric/protos/orderer"
+ "golang.org/x/net/context"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/credentials"
+)
+
+// Orderer ...
+/**
+ * The Orderer class represents a peer in the target blockchain network to which
+ * HFC sends a block of transactions of endorsed proposals requiring ordering.
+ *
+ */
+type Orderer interface {
+ GetURL() string
+ SendBroadcast(envelope *SignedEnvelope) error
+}
+
+type orderer struct {
+ url string
+ grpcDialOption []grpc.DialOption
+}
+
+// CreateNewOrderer ...
+/**
+ * Returns a Orderer instance
+ */
+func CreateNewOrderer(url string) Orderer {
+ var opts []grpc.DialOption
+ opts = append(opts, grpc.WithTimeout(time.Second*3))
+ if config.IsTLSEnabled() {
+ creds := credentials.NewClientTLSFromCert(config.GetTLSCACertPool(), config.GetTLSServerHostOverride())
+ opts = append(opts, grpc.WithTransportCredentials(creds))
+ } else {
+ opts = append(opts, grpc.WithInsecure())
+ }
+ return &orderer{url: url, grpcDialOption: opts}
+}
+
+// GetURL ...
+/**
+ * Get the Orderer url. Required property for the instance objects.
+ * @returns {string} The address of the Orderer
+ */
+func (o *orderer) GetURL() string {
+ return o.url
+}
+
+// SendBroadcast ...
+/**
+ * Send the created transaction to Orderer.
+ */
+func (o *orderer) SendBroadcast(envelope *SignedEnvelope) error {
+ conn, err := grpc.Dial(o.url, o.grpcDialOption...)
+ if err != nil {
+ return err
+ }
+ defer conn.Close()
+
+ broadcastStream, err := ab.NewAtomicBroadcastClient(conn).Broadcast(context.Background())
+ if err != nil {
+ return fmt.Errorf("Error Create NewAtomicBroadcastClient %v", err)
+ }
+ done := make(chan bool)
+ var broadcastErr error
+ go func() {
+ for {
+ broadcastResponse, err := broadcastStream.Recv()
+ logger.Debugf("Orderer.broadcastStream - response:%v, error:%v\n", broadcastResponse, err)
+ if err != nil {
+ if strings.Contains(err.Error(), io.EOF.Error()) {
+ done <- true
+ return
+ }
+ broadcastErr = fmt.Errorf("error broadcast response : %v", err)
+ continue
+ }
+ if broadcastResponse.Status != common.Status_SUCCESS {
+ broadcastErr = fmt.Errorf("broadcast respone is not success : %v", broadcastResponse.Status)
+ }
+ }
+ }()
+ if err := broadcastStream.Send(&common.Envelope{
+ Payload: envelope.Payload,
+ Signature: envelope.signature,
+ }); err != nil {
+ return fmt.Errorf("Failed to send a envelope to orderer: %v", err)
+ }
+ broadcastStream.CloseSend()
+ <-done
+ return broadcastErr
+}
diff --git a/fabric-client/orderer_test.go b/fabric-client/orderer_test.go
new file mode 100644
index 0000000000..1f83b40836
--- /dev/null
+++ b/fabric-client/orderer_test.go
@@ -0,0 +1,102 @@
+/*
+Copyright SecureKey Technologies Inc. All Rights Reserved.
+
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package fabricclient
+
+import (
+ "testing"
+)
+
+//
+// Orderer via chain setOrderer/getOrderer
+//
+// Set the orderer URL through the chain setOrderer method. Verify that the
+// orderer URL was set correctly through the getOrderer method. Repeat the
+// process by updating the orderer URL to a different address.
+//
+func TestOrdererViaChain(t *testing.T) {
+ client := NewClient()
+ chain, err := client.NewChain("testChain-orderer-member")
+ if err != nil {
+ t.Fatalf("error from NewChain %v", err)
+ }
+ orderer := CreateNewOrderer("localhost:7050")
+ chain.AddOrderer(orderer)
+
+ orderers := chain.GetOrderers()
+ if orderers == nil || len(orderers) != 1 || orderers[0].GetURL() != "localhost:7050" {
+ t.Fatalf("Failed to retieve the new orderer URL from the chain")
+ }
+ chain.RemoveOrderer(orderer)
+ orderer2 := CreateNewOrderer("localhost:7054")
+ chain.AddOrderer(orderer2)
+ orderers = chain.GetOrderers()
+
+ if orderers == nil || len(orderers) != 1 || orderers[0].GetURL() != "localhost:7054" {
+ t.Fatalf("Failed to retieve the new orderer URL from the chain")
+ }
+
+}
+
+//
+// Orderer via chain missing orderer
+//
+// Attempt to send a request to the orderer with the sendTransaction method
+// before the orderer URL was set. Verify that an error is reported when tying
+// to send the request.
+//
+func TestPeerViaChainMissingOrderer(t *testing.T) {
+ client := NewClient()
+ chain, err := client.NewChain("testChain-orderer-member2")
+ if err != nil {
+ t.Fatalf("error from NewChain %v", err)
+ }
+ _, err = chain.SendTransaction(nil)
+ if err == nil {
+ t.Fatalf("SendTransaction didn't return error")
+ }
+ if err.Error() != "orderers is nil" {
+ t.Fatalf("SendTransaction didn't return right error")
+ }
+
+}
+
+//
+// Orderer via chain nil data
+//
+// Attempt to send a request to the orderer with the sendTransaction method
+// with the data set to null. Verify that an error is reported when tying
+// to send null data.
+//
+func TestOrdererViaChainNilData(t *testing.T) {
+ client := NewClient()
+ chain, err := client.NewChain("testChain-orderer-member2")
+ if err != nil {
+ t.Fatalf("error from NewChain %v", err)
+ }
+ orderer := CreateNewOrderer("localhost:7050")
+ chain.AddOrderer(orderer)
+ _, err = chain.SendTransaction(nil)
+ if err == nil {
+ t.Fatalf("SendTransaction didn't return error")
+ }
+ if err.Error() != "proposal is nil" {
+ t.Fatalf("SendTransaction didn't return right error")
+ }
+}
diff --git a/fabric-client/packager.go b/fabric-client/packager.go
new file mode 100644
index 0000000000..56ae71d828
--- /dev/null
+++ b/fabric-client/packager.go
@@ -0,0 +1,52 @@
+/*
+Copyright SecureKey Technologies Inc. All Rights Reserved.
+
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package fabricclient
+
+import (
+ "fmt"
+
+ packager "github.com/hyperledger/fabric-sdk-go/fabric-client/packager"
+)
+
+// PackageCC ...
+/**
+ * Utility function to package a chaincode. The contents will be returned as a byte array.
+ *
+ * @param {string} chaincodePath required - String of the path to location of
+ * the source code of the chaincode
+ * @param {string} chaincodeType optional - String of the type of chaincode
+ * ['golang', 'car', 'java'] (default 'golang')
+ * @returns {[]byte} byte array
+ */
+func PackageCC(chaincodePath string, chaincodeType string) ([]byte, error) {
+ logger.Debugf("packager: chaincodePath: %s, chaincodeType: %s", chaincodePath, chaincodeType)
+ if chaincodePath == "" {
+ return nil, fmt.Errorf("Missing 'chaincodePath' parameter")
+ }
+ if chaincodeType == "" {
+ chaincodeType = "golang"
+ }
+ logger.Debugf("packager: type %s ", chaincodeType)
+ switch chaincodeType {
+ case "golang":
+ return packager.PackageGoLangCC(chaincodePath)
+ }
+ return nil, fmt.Errorf("Undefined 'chaincodeType' value")
+}
diff --git a/fabric-client/packager/golang.go b/fabric-client/packager/golang.go
new file mode 100644
index 0000000000..6ac2d41e96
--- /dev/null
+++ b/fabric-client/packager/golang.go
@@ -0,0 +1,187 @@
+/*
+Copyright SecureKey Technologies Inc. All Rights Reserved.
+
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package packager
+
+import (
+ "archive/tar"
+ "bytes"
+ "compress/gzip"
+ "fmt"
+ "io"
+ "os"
+ "path"
+ "path/filepath"
+ "time"
+
+ "github.com/op/go-logging"
+)
+
+// Descriptor ...
+type Descriptor struct {
+ name string
+ fqp string
+}
+
+// A list of file extensions that should be packaged into the .tar.gz.
+// Files with all other file extenstions will be excluded to minimize the size
+// of the install payload.
+var keep = []string{".go", ".c", ".h"}
+
+var logger = logging.MustGetLogger("fabric_sdk_go")
+
+// PackageGoLangCC ...
+func PackageGoLangCC(chaincodePath string) ([]byte, error) {
+
+ // Determine the user's $GOPATH
+ goPath := os.Getenv("GOPATH")
+ if goPath == "" {
+ return nil, fmt.Errorf("GOPATH environment variable not defined")
+ }
+ logger.Debugf("GOPATH environment variable=%s", goPath)
+
+ // Compose the path to the chaincode project directory
+ projDir := path.Join(goPath, "src", chaincodePath)
+
+ logger.Debugf("projDir variable=%s", projDir)
+
+ // We generate the tar in two phases: First grab a list of descriptors,
+ // and then pack them into an archive. While the two phases aren't
+ // strictly necessary yet, they pave the way for the future where we
+ // will need to assemble sources from multiple packages
+ descriptors, err := findSource(goPath, projDir)
+ if err != nil {
+ return nil, err
+ }
+ tarBytes, err := generateTarGz(descriptors)
+ if err != nil {
+ return nil, err
+ }
+ return tarBytes, nil
+}
+
+// -------------------------------------------------------------------------
+// findSource(goPath, filePath)
+// -------------------------------------------------------------------------
+// Given an input 'filePath', recursively parse the filesystem for any files
+// that fit the criteria for being valid golang source (ISREG + (*.(go|c|h)))
+// As a convenience, we also formulate a tar-friendly "name" for each file
+// based on relative position to 'goPath'.
+// -------------------------------------------------------------------------
+func findSource(goPath string, filePath string) ([]*Descriptor, error) {
+ var descriptors []*Descriptor
+ err := filepath.Walk(filePath,
+ func(path string, fileInfo os.FileInfo, err error) error {
+ if err != nil {
+ return err
+ }
+ if fileInfo.Mode().IsRegular() && isSource(path) {
+ relPath, err := filepath.Rel(goPath, path)
+ if err != nil {
+ return err
+ }
+ descriptors = append(descriptors, &Descriptor{name: relPath, fqp: path})
+ }
+ return nil
+
+ })
+ if err != nil {
+ return descriptors, err
+ }
+ return descriptors, nil
+}
+
+// -------------------------------------------------------------------------
+// isSource(path)
+// -------------------------------------------------------------------------
+// predicate function for determining whether a given path should be
+// considered valid source code, based entirely on the extension. It is
+// assumed that other checks for file type have already been performed.
+// -------------------------------------------------------------------------
+func isSource(filePath string) bool {
+ var extension = filepath.Ext(filePath)
+ for _, v := range keep {
+ if v == extension {
+ return true
+ }
+ }
+ return false
+}
+
+// -------------------------------------------------------------------------
+// generateTarGz(descriptors)
+// -------------------------------------------------------------------------
+// creates an .tar.gz stream from the provided descriptor entries
+// -------------------------------------------------------------------------
+func generateTarGz(descriptors []*Descriptor) ([]byte, error) {
+ // set up the gzip writer
+ var codePackage bytes.Buffer
+ gw := gzip.NewWriter(&codePackage)
+ tw := tar.NewWriter(gw)
+ for _, v := range descriptors {
+ logger.Debugf("generateTarGz for %s", v.fqp)
+ err := packEntry(tw, gw, v)
+ if err != nil {
+ closeStream(tw, gw)
+ return nil, fmt.Errorf("error from packEntry for %s error:", v.fqp, err.Error())
+ }
+ }
+ closeStream(tw, gw)
+ return codePackage.Bytes(), nil
+
+}
+
+func closeStream(tw *tar.Writer, gw *gzip.Writer) {
+ tw.Close()
+ gw.Close()
+}
+
+func packEntry(tw *tar.Writer, gw *gzip.Writer, descriptor *Descriptor) error {
+ file, err := os.Open(descriptor.fqp)
+ if err != nil {
+ return err
+ }
+ defer file.Close()
+ if stat, err := file.Stat(); err == nil {
+
+ // now lets create the header as needed for this file within the tarball
+ header := new(tar.Header)
+ header.Name = descriptor.name
+ header.Size = stat.Size()
+ header.Mode = int64(stat.Mode())
+ // Use a deterministic "zero-time" for all date fields
+ header.ModTime = time.Time{}
+ header.AccessTime = time.Time{}
+ header.ChangeTime = time.Time{}
+ // write the header to the tarball archive
+ if err := tw.WriteHeader(header); err != nil {
+ return err
+ }
+
+ // copy the file data to the tarball
+
+ if _, err := io.Copy(tw, file); err != nil {
+ return err
+ }
+ tw.Flush()
+ gw.Flush()
+
+ }
+ return nil
+}
diff --git a/fabric-client/packager/golang_test.go b/fabric-client/packager/golang_test.go
new file mode 100644
index 0000000000..dfd6aa552f
--- /dev/null
+++ b/fabric-client/packager/golang_test.go
@@ -0,0 +1,78 @@
+/*
+Copyright SecureKey Technologies Inc. All Rights Reserved.
+
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package packager
+
+import (
+ "archive/tar"
+ "bytes"
+ "compress/gzip"
+ "fmt"
+ "io"
+ "os"
+ "path"
+ "testing"
+)
+
+// Test Package Go ChainCode
+func TestPackageGoLangCC(t *testing.T) {
+ pwd, err := os.Getwd()
+ if err != nil {
+ t.Fatalf("error from os.Getwd %v", err)
+ }
+ os.Setenv("GOPATH", path.Join(pwd, "../../test/fixtures"))
+
+ ccPackage, err := PackageGoLangCC("github.com")
+ if err != nil {
+ t.Fatalf("error from PackageGoLangCC %v", err)
+ }
+
+ r := bytes.NewReader(ccPackage)
+ gzf, err := gzip.NewReader(r)
+ if err != nil {
+ t.Fatalf("error from gzip.NewReader %v", err)
+ }
+ tarReader := tar.NewReader(gzf)
+ i := 0
+ exampleccExist := false
+ for {
+ header, err := tarReader.Next()
+
+ if err == io.EOF {
+ break
+ }
+
+ if err != nil {
+ t.Fatalf("error from tarReader.Next() %v", err)
+ }
+ fmt.Println(header.Name)
+ if header.Name == "src/github.com/example_cc/example_cc.go" {
+ exampleccExist = true
+ }
+ i++
+ }
+
+ if !exampleccExist {
+ t.Fatalf("src/github.com/example_cc/example_cc.go not exist in tar file")
+ }
+ if i != 1 {
+ t.Fatalf("number of header in tar file should be 1")
+ }
+
+}
diff --git a/fabric-client/peer.go b/fabric-client/peer.go
new file mode 100644
index 0000000000..442cd623aa
--- /dev/null
+++ b/fabric-client/peer.go
@@ -0,0 +1,237 @@
+/*
+Copyright SecureKey Technologies Inc. All Rights Reserved.
+
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package fabricclient
+
+import (
+ "encoding/pem"
+ "time"
+
+ pb "github.com/hyperledger/fabric/protos/peer"
+ "golang.org/x/net/context"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/credentials"
+
+ config "github.com/hyperledger/fabric-sdk-go/config"
+)
+
+// Peer ...
+/**
+ * The Peer class represents a peer in the target blockchain network to which
+ * HFC sends endorsement proposals, transaction ordering or query requests.
+ *
+ * The Peer class represents the remote Peer node and its network membership materials,
+ * aka the ECert used to verify signatures. Peer membership represents organizations,
+ * unlike User membership which represents individuals.
+ *
+ * When constructed, a Peer instance can be designated as an event source, in which case
+ * a “eventSourceUrl” attribute should be configured. This allows the SDK to automatically
+ * attach transaction event listeners to the event stream.
+ *
+ * It should be noted that Peer event streams function at the Peer level and not at the
+ * chain and chaincode levels.
+ */
+type Peer interface {
+ ConnectEventSource()
+ IsEventListened(event string, chain Chain) (bool, error)
+ AddListener(eventType string, eventTypeData interface{}, eventCallback interface{}) (string, error)
+ RemoveListener(eventListenerRef string) (bool, error)
+ GetName() string
+ SetName(name string)
+ GetRoles() []string
+ SetRoles(roles []string)
+ GetEnrollmentCertificate() *pem.Block
+ SetEnrollmentCertificate(pem *pem.Block)
+ GetURL() string
+ SendProposal(proposal *TransactionProposal) (*TransactionProposalResponse, error)
+}
+
+type peer struct {
+ url string
+ grpcDialOption []grpc.DialOption
+ name string
+ roles []string
+ enrollmentCertificate *pem.Block
+}
+
+// CreateNewPeer ...
+/**
+ * Constructs a Peer given its endpoint configuration settings.
+ *
+ * @param {string} url The URL with format of "host:port".
+ */
+func CreateNewPeer(url string) Peer {
+ var opts []grpc.DialOption
+ opts = append(opts, grpc.WithTimeout(time.Second*3))
+ if config.IsTLSEnabled() {
+ creds := credentials.NewClientTLSFromCert(config.GetTLSCACertPool(), config.GetTLSServerHostOverride())
+ opts = append(opts, grpc.WithTransportCredentials(creds))
+ } else {
+ opts = append(opts, grpc.WithInsecure())
+ }
+ return &peer{url: url, grpcDialOption: opts, name: "", roles: nil}
+}
+
+// ConnectEventSource ...
+/**
+ * Since practically all Peers are event producers, when constructing a Peer instance,
+ * an application can designate it as the event source for the application. Typically
+ * only one of the Peers on a Chain needs to be the event source, because all Peers on
+ * the Chain produce the same events. This method tells the SDK which Peer(s) to use as
+ * the event source for the client application. It is the responsibility of the SDK to
+ * manage the connection lifecycle to the Peer’s EventHub. It is the responsibility of
+ * the Client Application to understand and inform the selected Peer as to which event
+ * types it wants to receive and the call back functions to use.
+ * @returns {Future} This gives the app a handle to attach “success” and “error” listeners
+ */
+func (p *peer) ConnectEventSource() {
+ //to do
+}
+
+// IsEventListened ...
+/**
+ * A network call that discovers if at least one listener has been connected to the target
+ * Peer for a given event. This helps application instance to decide whether it needs to
+ * connect to the event source in a crash recovery or multiple instance deployment.
+ * @param {string} eventName required
+ * @param {Chain} chain optional
+ * @result {bool} Whether the said event has been listened on by some application instance on that chain.
+ */
+func (p *peer) IsEventListened(event string, chain Chain) (bool, error) {
+ //to do
+ return false, nil
+}
+
+// AddListener ...
+/**
+ * For a Peer that is connected to eventSource, the addListener registers an EventCallBack for a
+ * set of event types. addListener can be invoked multiple times to support differing EventCallBack
+ * functions receiving different types of events.
+ *
+ * Note that the parameters below are optional in certain languages, like Java, that constructs an
+ * instance of a listener interface, and pass in that instance as the parameter.
+ * @param {string} eventType : ie. Block, Chaincode, Transaction
+ * @param {object} eventTypeData : Object Specific for event type as necessary, currently needed
+ * for “Chaincode” event type, specifying a matching pattern to the event name set in the chaincode(s)
+ * being executed on the target Peer, and for “Transaction” event type, specifying the transaction ID
+ * @param {struct} eventCallback Client Application class registering for the callback.
+ * @returns {string} An ID reference to the event listener.
+ */
+func (p *peer) AddListener(eventType string, eventTypeData interface{}, eventCallback interface{}) (string, error) {
+ //to do
+ return "", nil
+}
+
+// RemoveListener ...
+/**
+ * Unregisters a listener.
+ * @param {string} eventListenerRef Reference returned by SDK for event listener.
+ * @return {bool} Success / Failure status
+ */
+func (p *peer) RemoveListener(eventListenerRef string) (bool, error) {
+ return false, nil
+ //to do
+}
+
+// GetName ...
+/**
+ * Get the Peer name. Required property for the instance objects.
+ * @returns {string} The name of the Peer
+ */
+func (p *peer) GetName() string {
+ return p.name
+}
+
+// SetName ...
+/**
+ * Set the Peer name / id.
+ * @param {string} name
+ */
+func (p *peer) SetName(name string) {
+ p.name = name
+}
+
+// GetRoles ...
+/**
+ * Get the user’s roles the Peer participates in. It’s an array of possible values
+ * in “client”, and “auditor”. The member service defines two more roles reserved
+ * for peer membership: “peer” and “validator”, which are not exposed to the applications.
+ * @returns {[]string} The roles for this user.
+ */
+func (p *peer) GetRoles() []string {
+ return p.roles
+}
+
+// SetRoles ...
+/**
+ * Set the user’s roles the Peer participates in. See getRoles() for legitimate values.
+ * @param {[]string} roles The list of roles for the user.
+ */
+func (p *peer) SetRoles(roles []string) {
+ p.roles = roles
+}
+
+// GetEnrollmentCertificate ...
+/**
+ * Returns the Peer's enrollment certificate.
+ * @returns {pem.Block} Certificate in PEM format signed by the trusted CA
+ */
+func (p *peer) GetEnrollmentCertificate() *pem.Block {
+ return p.enrollmentCertificate
+}
+
+// SetEnrollmentCertificate ...
+/**
+ * Set the Peer’s enrollment certificate.
+ * @param {pem.Block} enrollment Certificate in PEM format signed by the trusted CA
+ */
+func (p *peer) SetEnrollmentCertificate(pem *pem.Block) {
+ p.enrollmentCertificate = pem
+}
+
+// GetURL ...
+/**
+ * Get the Peer url. Required property for the instance objects.
+ * @returns {string} The address of the Peer
+ */
+func (p *peer) GetURL() string {
+ return p.url
+}
+
+// SendProposal ...
+/**
+ * Send the created proposal to peer for endorsement.
+ */
+func (p *peer) SendProposal(proposal *TransactionProposal) (*TransactionProposalResponse, error) {
+ conn, err := grpc.Dial(p.url, p.grpcDialOption...)
+ if err != nil {
+ return nil, err
+ }
+ defer conn.Close()
+ endorserClient := pb.NewEndorserClient(conn)
+ proposalResponse, err := endorserClient.ProcessProposal(context.Background(), proposal.signedProposal)
+ if err != nil {
+ return nil, err
+ }
+ return &TransactionProposalResponse{
+ proposal: proposal,
+ proposalResponse: proposalResponse,
+ Endorser: p.url,
+ }, nil
+}
diff --git a/fabric-client/peer_test.go b/fabric-client/peer_test.go
new file mode 100644
index 0000000000..abc49f4491
--- /dev/null
+++ b/fabric-client/peer_test.go
@@ -0,0 +1,100 @@
+/*
+Copyright SecureKey Technologies Inc. All Rights Reserved.
+
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package fabricclient
+
+import (
+ "testing"
+)
+
+//
+// Peer via chain setPeer/getPeer
+//
+// Set the Perr URL through the chain setPeer method. Verify that the
+// Peer URL was set correctly through the getPeer method. Repeat the
+// process by updating the Peer URL to a different address.
+//
+func TestPeerViaChain(t *testing.T) {
+ client := NewClient()
+ chain, err := client.NewChain("testChain-peer")
+ if err != nil {
+ t.Fatalf("error from NewChain %v", err)
+ }
+ peer := CreateNewPeer("localhost:7050")
+ chain.AddPeer(peer)
+
+ peers := chain.GetPeers()
+ if peers == nil || len(peers) != 1 || peers[0].GetURL() != "localhost:7050" {
+ t.Fatalf("Failed to retieve the new peers URL from the chain")
+ }
+ chain.RemovePeer(peer)
+ peer2 := CreateNewPeer("localhost:7054")
+ chain.AddPeer(peer2)
+ peers = chain.GetPeers()
+
+ if peers == nil || len(peers) != 1 || peers[0].GetURL() != "localhost:7054" {
+ t.Fatalf("Failed to retieve the new peers URL from the chain")
+ }
+}
+
+//
+// Peer via chain missing peer
+//
+// Attempt to send a request to the peer with the SendTransactionProposal method
+// before the peer was set. Verify that an error is reported when tying
+// to send the request.
+//
+func TestOrdererViaChainMissingOrderer(t *testing.T) {
+ client := NewClient()
+ chain, err := client.NewChain("testChain-peer")
+ if err != nil {
+ t.Fatalf("error from NewChain %v", err)
+ }
+ _, err = chain.SendTransactionProposal(nil, 0)
+ if err == nil {
+ t.Fatalf("SendTransactionProposal didn't return error")
+ }
+ if err.Error() != "peers is nil" {
+ t.Fatalf("SendTransactionProposal didn't return right error")
+ }
+}
+
+//
+// Peer via chain nil data
+//
+// Attempt to send a request to the peers with the SendTransactionProposal method
+// with the data set to null. Verify that an error is reported when tying
+// to send null data.
+//
+func TestPeerViaChainNilData(t *testing.T) {
+ client := NewClient()
+ chain, err := client.NewChain("testChain-peer")
+ if err != nil {
+ t.Fatalf("error from NewChain %v", err)
+ }
+ peer := CreateNewPeer("localhost:7050")
+ chain.AddPeer(peer)
+ _, err = chain.SendTransactionProposal(nil, 0)
+ if err == nil {
+ t.Fatalf("SendTransaction didn't return error")
+ }
+ if err.Error() != "signedProposal is nil" {
+ t.Fatalf("SendTransactionProposal didn't return right error")
+ }
+}
diff --git a/fabric-client/user.go b/fabric-client/user.go
new file mode 100644
index 0000000000..e869b1b391
--- /dev/null
+++ b/fabric-client/user.go
@@ -0,0 +1,147 @@
+/*
+Copyright SecureKey Technologies Inc. All Rights Reserved.
+
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package fabricclient
+
+import (
+ "github.com/hyperledger/fabric/bccsp"
+)
+
+// User ...
+/**
+ * The User struct represents users that have been enrolled and represented by
+ * an enrollment certificate (ECert) and a signing key. The ECert must have
+ * been signed by one of the CAs the blockchain network has been configured to trust.
+ * An enrolled user (having a signing key and ECert) can conduct chaincode deployments,
+ * transactions and queries with the Chain.
+ *
+ * User ECerts can be obtained from a CA beforehand as part of deploying the application,
+ * or it can be obtained from the optional Fabric COP service via its enrollment process.
+ *
+ * Sometimes User identities are confused with Peer identities. User identities represent
+ * signing capability because it has access to the private key, while Peer identities in
+ * the context of the application/SDK only has the certificate for verifying signatures.
+ * An application cannot use the Peer identity to sign things because the application doesn’t
+ * have access to the Peer identity’s private key.
+ *
+ */
+type User interface {
+ GetName() string
+ GetRoles() []string
+ SetRoles([]string)
+ GetEnrollmentCertificate() []byte
+ SetEnrollmentCertificate(cert []byte)
+ SetPrivateKey(privateKey bccsp.Key)
+ GetPrivateKey() bccsp.Key
+ GenerateTcerts(count int, attributes []string)
+}
+
+type user struct {
+ name string
+ roles []string
+ PrivateKey bccsp.Key // ****This key is temporary We use it to sign transaction until we have tcerts
+ enrollmentCertificate []byte
+}
+
+// UserJSON ...
+type UserJSON struct {
+ PrivateKeySKI []byte
+ EnrollmentCertificate []byte
+}
+
+// NewUser ...
+/**
+ * Constructor for a user.
+ *
+ * @param {string} name - The user name
+ */
+func NewUser(name string) User {
+ return &user{name: name}
+}
+
+// GetName ...
+/**
+ * Get the user name.
+ * @returns {string} The user name.
+ */
+func (u *user) GetName() string {
+ return u.name
+}
+
+// GetRoles ...
+/**
+ * Get the roles.
+ * @returns {[]string} The roles.
+ */
+func (u *user) GetRoles() []string {
+ return u.roles
+}
+
+// SetRoles ...
+/**
+ * Set the roles.
+ * @param roles {[]string} The roles.
+ */
+func (u *user) SetRoles(roles []string) {
+ u.roles = roles
+}
+
+// GetEnrollmentCertificate ...
+/**
+ * Returns the underlying ECert representing this user’s identity.
+ */
+func (u *user) GetEnrollmentCertificate() []byte {
+ return u.enrollmentCertificate
+}
+
+// SetEnrollmentCertificate ...
+/**
+ * Set the user’s Enrollment Certificate.
+ */
+func (u *user) SetEnrollmentCertificate(cert []byte) {
+ u.enrollmentCertificate = cert
+}
+
+// SetPrivateKey ...
+/**
+ * deprecated.
+ */
+func (u *user) SetPrivateKey(privateKey bccsp.Key) {
+ u.PrivateKey = privateKey
+}
+
+// GetPrivateKey ...
+/**
+ * deprecated.
+ */
+func (u *user) GetPrivateKey() bccsp.Key {
+ return u.PrivateKey
+}
+
+// GenerateTcerts ...
+/**
+ * Gets a batch of TCerts to use for transaction. there is a 1-to-1 relationship between
+ * TCert and Transaction. The TCert can be generated locally by the SDK using the user’s crypto materials.
+ * @param {int} count how many in the batch to obtain
+ * @param {[]string} attributes list of attributes to include in the TCert
+ * @return {[]tcert} An array of TCerts
+ */
+func (u *user) GenerateTcerts(count int, attributes []string) {
+
+}
diff --git a/fabric-client/user_test.go b/fabric-client/user_test.go
new file mode 100644
index 0000000000..7eaf02ef88
--- /dev/null
+++ b/fabric-client/user_test.go
@@ -0,0 +1,43 @@
+/*
+Copyright SecureKey Technologies Inc. All Rights Reserved.
+
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package fabricclient
+
+import (
+ "testing"
+)
+
+func TestUserMethods(t *testing.T) {
+ user := NewUser("testUser")
+ if user.GetName() != "testUser" {
+ t.Fatalf("NewUser create wrong user")
+ }
+ var roles []string
+ roles = append(roles, "admin")
+ roles = append(roles, "user")
+ user.SetRoles(roles)
+
+ if user.GetRoles()[0] != "admin" {
+ t.Fatalf("user.GetRoles() return wrong user")
+ }
+ if user.GetRoles()[1] != "user" {
+ t.Fatalf("user.GetRoles() return wrong user")
+ }
+
+}
diff --git a/test/fixtures/config/config_test.yaml b/test/fixtures/config/config_test.yaml
new file mode 100644
index 0000000000..0fba2a2c83
--- /dev/null
+++ b/test/fixtures/config/config_test.yaml
@@ -0,0 +1,46 @@
+client:
+ peers:
+ peer1:
+ host: "localhost"
+ port: 7051
+ event_host: "localhost"
+ event_port: 7053
+
+ peer2:
+ host: "localhost"
+ port: 7056
+ event_host: "localhost"
+ event_port: 7053
+
+ tls:
+ enabled: false
+ certificate:
+ serverhostoverride:
+
+ security:
+ enabled: true
+ hashAlgorithm: "SHA2"
+ level: 256
+
+ tcert:
+ batch:
+ size: 200
+
+ orderer:
+ host: "localhost"
+ port: 7050
+
+ logging:
+ level: info
+
+ fabricCA:
+ id: "DEFAULT"
+ serverURL: "http://localhost:7054"
+ certfiles :
+ - "../test/fixtures/root.pem"
+ client:
+ keyfile: "../test/fixtures/tls_client-key.pem"
+ certfile: "../test/fixtures/tls_client-cert.pem"
+
+ keystore:
+ path: "/tmp/keystore"
diff --git a/test/fixtures/root.pem b/test/fixtures/root.pem
new file mode 100644
index 0000000000..8d98dfa59e
--- /dev/null
+++ b/test/fixtures/root.pem
@@ -0,0 +1,15 @@
+-----BEGIN CERTIFICATE-----
+MIICYjCCAgmgAwIBAgIUB3CTDOU47sUC5K4kn/Caqnh114YwCgYIKoZIzj0EAwIw
+fzELMAkGA1UEBhMCVVMxEzARBgNVBAgTCkNhbGlmb3JuaWExFjAUBgNVBAcTDVNh
+biBGcmFuY2lzY28xHzAdBgNVBAoTFkludGVybmV0IFdpZGdldHMsIEluYy4xDDAK
+BgNVBAsTA1dXVzEUMBIGA1UEAxMLZXhhbXBsZS5jb20wHhcNMTYxMDEyMTkzMTAw
+WhcNMjExMDExMTkzMTAwWjB/MQswCQYDVQQGEwJVUzETMBEGA1UECBMKQ2FsaWZv
+cm5pYTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEfMB0GA1UEChMWSW50ZXJuZXQg
+V2lkZ2V0cywgSW5jLjEMMAoGA1UECxMDV1dXMRQwEgYDVQQDEwtleGFtcGxlLmNv
+bTBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABKIH5b2JaSmqiQXHyqC+cmknICcF
+i5AddVjsQizDV6uZ4v6s+PWiJyzfA/rTtMvYAPq/yeEHpBUB1j053mxnpMujYzBh
+MA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBQXZ0I9
+qp6CP8TFHZ9bw5nRtZxIEDAfBgNVHSMEGDAWgBQXZ0I9qp6CP8TFHZ9bw5nRtZxI
+EDAKBggqhkjOPQQDAgNHADBEAiAHp5Rbp9Em1G/UmKn8WsCbqDfWecVbZPQj3RK4
+oG5kQQIgQAe4OOKYhJdh3f7URaKfGTf492/nmRmtK+ySKjpHSrU=
+-----END CERTIFICATE-----
diff --git a/test/fixtures/src/github.com/example_cc/example_cc.go b/test/fixtures/src/github.com/example_cc/example_cc.go
new file mode 100644
index 0000000000..6da5213878
--- /dev/null
+++ b/test/fixtures/src/github.com/example_cc/example_cc.go
@@ -0,0 +1,211 @@
+/*
+Copyright IBM Corp. 2016 All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package main
+
+import (
+ "fmt"
+ "strconv"
+
+ "github.com/hyperledger/fabric/core/chaincode/shim"
+ pb "github.com/hyperledger/fabric/protos/peer"
+)
+
+// SimpleChaincode example simple Chaincode implementation
+type SimpleChaincode struct {
+}
+
+func (t *SimpleChaincode) Init(stub shim.ChaincodeStubInterface) pb.Response {
+ fmt.Println("########### example_cc Init ###########")
+ _, args := stub.GetFunctionAndParameters()
+ var A, B string // Entities
+ var Aval, Bval int // Asset holdings
+ var err error
+
+ if len(args) != 4 {
+ return shim.Error("Incorrect number of arguments. Expecting 4")
+ }
+
+ // Initialize the chaincode
+ A = args[0]
+ Aval, err = strconv.Atoi(args[1])
+ if err != nil {
+ return shim.Error("Expecting integer value for asset holding")
+ }
+ B = args[2]
+ Bval, err = strconv.Atoi(args[3])
+ if err != nil {
+ return shim.Error("Expecting integer value for asset holding")
+ }
+ fmt.Printf("Aval = %d, Bval = %d\n", Aval, Bval)
+
+ // Write the state to the ledger
+ err = stub.PutState(A, []byte(strconv.Itoa(Aval)))
+ if err != nil {
+ return shim.Error(err.Error())
+ }
+
+ err = stub.PutState(B, []byte(strconv.Itoa(Bval)))
+ if err != nil {
+ return shim.Error(err.Error())
+ }
+
+ return shim.Success(nil)
+
+}
+
+func (t *SimpleChaincode) Query(stub shim.ChaincodeStubInterface) pb.Response {
+ return shim.Error("Unknown supported call")
+}
+
+// Transaction makes payment of X units from A to B
+func (t *SimpleChaincode) Invoke(stub shim.ChaincodeStubInterface) pb.Response {
+ fmt.Println("########### example_cc Invoke ###########")
+ function, args := stub.GetFunctionAndParameters()
+
+ if function != "invoke" {
+ return shim.Error("Unknown function call")
+ }
+
+ if len(args) < 2 {
+ return shim.Error("Incorrect number of arguments. Expecting at least 2")
+ }
+
+ if args[0] == "delete" {
+ // Deletes an entity from its state
+ return t.delete(stub, args)
+ }
+
+ if args[0] == "query" {
+ // queries an entity state
+ return t.query(stub, args)
+ }
+ if args[0] == "move" {
+ // Deletes an entity from its state
+ return t.move(stub, args)
+ }
+ return shim.Error("Unknown action, check the first argument, must be one of 'delete', 'query', or 'move'")
+}
+
+func (t *SimpleChaincode) move(stub shim.ChaincodeStubInterface, args []string) pb.Response {
+ // must be an invoke
+ var A, B string // Entities
+ var Aval, Bval int // Asset holdings
+ var X int // Transaction value
+ var err error
+
+ if len(args) != 4 {
+ return shim.Error("Incorrect number of arguments. Expecting 4, function followed by 2 names and 1 value")
+ }
+
+ A = args[1]
+ B = args[2]
+
+ // Get the state from the ledger
+ // TODO: will be nice to have a GetAllState call to ledger
+ Avalbytes, err := stub.GetState(A)
+ if err != nil {
+ return shim.Error("Failed to get state")
+ }
+ if Avalbytes == nil {
+ return shim.Error("Entity not found")
+ }
+ Aval, _ = strconv.Atoi(string(Avalbytes))
+
+ Bvalbytes, err := stub.GetState(B)
+ if err != nil {
+ return shim.Error("Failed to get state")
+ }
+ if Bvalbytes == nil {
+ return shim.Error("Entity not found")
+ }
+ Bval, _ = strconv.Atoi(string(Bvalbytes))
+
+ // Perform the execution
+ X, err = strconv.Atoi(args[3])
+ if err != nil {
+ return shim.Error("Invalid transaction amount, expecting a integer value")
+ }
+ Aval = Aval - X
+ Bval = Bval + X
+ fmt.Printf("Aval = %d, Bval = %d\n", Aval, Bval)
+
+ // Write the state back to the ledger
+ err = stub.PutState(A, []byte(strconv.Itoa(Aval)))
+ if err != nil {
+ return shim.Error(err.Error())
+ }
+
+ err = stub.PutState(B, []byte(strconv.Itoa(Bval)))
+ if err != nil {
+ return shim.Error(err.Error())
+ }
+
+ return shim.Success(nil)
+}
+
+// Deletes an entity from state
+func (t *SimpleChaincode) delete(stub shim.ChaincodeStubInterface, args []string) pb.Response {
+ if len(args) != 1 {
+ return shim.Error("Incorrect number of arguments. Expecting 1")
+ }
+
+ A := args[1]
+
+ // Delete the key from the state in ledger
+ err := stub.DelState(A)
+ if err != nil {
+ return shim.Error("Failed to delete state")
+ }
+
+ return shim.Success(nil)
+}
+
+// Query callback representing the query of a chaincode
+func (t *SimpleChaincode) query(stub shim.ChaincodeStubInterface, args []string) pb.Response {
+
+ var A string // Entities
+ var err error
+
+ if len(args) != 2 {
+ return shim.Error("Incorrect number of arguments. Expecting name of the person to query")
+ }
+
+ A = args[1]
+
+ // Get the state from the ledger
+ Avalbytes, err := stub.GetState(A)
+ if err != nil {
+ jsonResp := "{\"Error\":\"Failed to get state for " + A + "\"}"
+ return shim.Error(jsonResp)
+ }
+
+ if Avalbytes == nil {
+ jsonResp := "{\"Error\":\"Nil amount for " + A + "\"}"
+ return shim.Error(jsonResp)
+ }
+
+ jsonResp := "{\"Name\":\"" + A + "\",\"Amount\":\"" + string(Avalbytes) + "\"}"
+ fmt.Printf("Query Response:%s\n", jsonResp)
+ return shim.Success(Avalbytes)
+}
+
+func main() {
+ err := shim.Start(new(SimpleChaincode))
+ if err != nil {
+ fmt.Printf("Error starting Simple chaincode: %s", err)
+ }
+}
diff --git a/test/fixtures/tls_client-cert.pem b/test/fixtures/tls_client-cert.pem
new file mode 100644
index 0000000000..831e8d801c
--- /dev/null
+++ b/test/fixtures/tls_client-cert.pem
@@ -0,0 +1,16 @@
+-----BEGIN CERTIFICATE-----
+MIICkDCCAjWgAwIBAgIUG8ebdjiChYrkdDXXAfd0piCE0EUwCgYIKoZIzj0EAwIw
+fzELMAkGA1UEBhMCVVMxEzARBgNVBAgTCkNhbGlmb3JuaWExFjAUBgNVBAcTDVNh
+biBGcmFuY2lzY28xHzAdBgNVBAoTFkludGVybmV0IFdpZGdldHMsIEluYy4xDDAK
+BgNVBAsTA1dXVzEUMBIGA1UEAxMLZXhhbXBsZS5jb20wHhcNMTYxMjE1MTUwMTAw
+WhcNMTcxMjE1MTUwMTAwWjB3MQswCQYDVQQGEwJVUzEXMBUGA1UECBMOTm9ydGgg
+Q2Fyb2xpbmExEDAOBgNVBAcTB1JhbGVpZ2gxGzAZBgNVBAoTEkh5cGVybGVkZ2Vy
+IEZhYnJpYzEMMAoGA1UECxMDQ09QMRIwEAYDVQQDEwlsb2NhbGhvc3QwWTATBgcq
+hkjOPQIBBggqhkjOPQMBBwNCAARTkTXPc8PXZD+0Hr68B8RycPw0WEho97cMw/oq
+iw1C6sRHEKZ73a6N4nK7vCHgI0CNZpdcIS/c0p7PfZLUNJnPo4GWMIGTMA4GA1Ud
+DwEB/wQEAwIFoDAdBgNVHSUEFjAUBggrBgEFBQcDAQYIKwYBBQUHAwIwDAYDVR0T
+AQH/BAIwADAdBgNVHQ4EFgQUw8umSj56g4BFpMfP2UOZMI39NEowHwYDVR0jBBgw
+FoAUF2dCPaqegj/ExR2fW8OZ0bWcSBAwFAYDVR0RBA0wC4IJbG9jYWxob3N0MAoG
+CCqGSM49BAMCA0kAMEYCIQCF/eJ6/e05PxqeDxAag50HWF/G8EklrTdLWWMYJuHY
+HgIhAPlZk/aSYO5Eat0Ywg9FgmyFey2yPiWbtxs+ameOrdAC
+-----END CERTIFICATE-----
diff --git a/test/fixtures/tls_client-key.pem b/test/fixtures/tls_client-key.pem
new file mode 100644
index 0000000000..618d82f0ef
--- /dev/null
+++ b/test/fixtures/tls_client-key.pem
@@ -0,0 +1,5 @@
+-----BEGIN EC PRIVATE KEY-----
+MHcCAQEEIDeqhgW+fezuKdH/rZyvfHcDLP16olZ6Ny+eotH30UODoAoGCCqGSM49
+AwEHoUQDQgAEU5E1z3PD12Q/tB6+vAfEcnD8NFhIaPe3DMP6KosNQurERxCme92u
+jeJyu7wh4CNAjWaXXCEv3NKez32S1DSZzw==
+-----END EC PRIVATE KEY-----
diff --git a/test/integration/base_test_setup.go b/test/integration/base_test_setup.go
new file mode 100644
index 0000000000..b062264182
--- /dev/null
+++ b/test/integration/base_test_setup.go
@@ -0,0 +1,168 @@
+/*
+Copyright SecureKey Technologies Inc. All Rights Reserved.
+
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package integration
+
+import (
+ "encoding/pem"
+ "fmt"
+ "os"
+ "path"
+ "testing"
+
+ "github.com/hyperledger/fabric-sdk-go/config"
+ fabric_ca_client "github.com/hyperledger/fabric-sdk-go/fabric-ca-client"
+ "github.com/hyperledger/fabric-sdk-go/fabric-client/events"
+ "github.com/hyperledger/fabric/bccsp"
+
+ fabric_client "github.com/hyperledger/fabric-sdk-go/fabric-client"
+ kvs "github.com/hyperledger/fabric-sdk-go/fabric-client/keyvaluestore"
+ bccspFactory "github.com/hyperledger/fabric/bccsp/factory"
+ pb "github.com/hyperledger/fabric/protos/peer"
+)
+
+// BaseTestSetup is an interface used by the integration tests
+// it performs setup activities like user enrollment, chain creation,
+// crypto suite selection, and event hub initialization
+type BaseTestSetup interface {
+ GetChains(t *testing.T) (*fabric_client.Chain, *fabric_client.Chain)
+ GetEventHub(t *testing.T, interestedEvents []*pb.Interest) *events.EventHub
+}
+
+// BaseSetupImpl implementation of BaseTestSetup
+type BaseSetupImpl struct {
+}
+
+// GetChains initializes and returns a query chain and invoke chain
+func (setup *BaseSetupImpl) GetChains(t *testing.T) (fabric_client.Chain, fabric_client.Chain) {
+ client := fabric_client.NewClient()
+
+ err := bccspFactory.InitFactories(&bccspFactory.FactoryOpts{
+ ProviderName: "SW",
+ SwOpts: &bccspFactory.SwOpts{
+ HashFamily: config.GetSecurityAlgorithm(),
+ SecLevel: config.GetSecurityLevel(),
+ FileKeystore: &bccspFactory.FileKeystoreOpts{
+ KeyStorePath: config.GetKeyStorePath(),
+ },
+ Ephemeral: false,
+ },
+ })
+ if err != nil {
+ t.Fatalf("Failed getting ephemeral software-based BCCSP [%s]", err)
+ }
+ cryptoSuite := bccspFactory.GetDefault()
+
+ client.SetCryptoSuite(cryptoSuite)
+ stateStore, err := kvs.CreateNewFileKeyValueStore("/tmp/enroll_user")
+ if err != nil {
+ t.Fatalf("CreateNewFileKeyValueStore return error[%s]", err)
+ }
+ client.SetStateStore(stateStore)
+ user, err := client.GetUserContext("testUser")
+ if err != nil {
+ t.Fatalf("client.GetUserContext return error: %v", err)
+ }
+ if user == nil {
+ fabricCAClient, err1 := fabric_ca_client.NewFabricCAClient()
+ if err1 != nil {
+ t.Fatalf("NewFabricCAClient return error: %v", err)
+ }
+ key, cert, err1 := fabricCAClient.Enroll("testUser", "user1")
+ keyPem, _ := pem.Decode(key)
+ if err1 != nil {
+ t.Fatalf("Enroll return error: %v", err1)
+ }
+ user := fabric_client.NewUser("testUser")
+ k, err1 := client.GetCryptoSuite().KeyImport(keyPem.Bytes, &bccsp.ECDSAPrivateKeyImportOpts{Temporary: false})
+ if err1 != nil {
+ t.Fatalf("KeyImport return error: %v", err)
+ }
+ user.SetPrivateKey(k)
+ user.SetEnrollmentCertificate(cert)
+ err = client.SetUserContext(user, false)
+ if err != nil {
+ t.Fatalf("client.SetUserContext return error: %v", err)
+ }
+ }
+
+ querychain, err := client.NewChain("querychain")
+ if err != nil {
+ t.Fatalf("NewChain return error: %v", err)
+ }
+
+ for _, p := range config.GetPeersConfig() {
+ endorser := fabric_client.CreateNewPeer(fmt.Sprintf("%s:%s", p.Host, p.Port))
+ querychain.AddPeer(endorser)
+ break
+ }
+
+ invokechain, err := client.NewChain("invokechain")
+ if err != nil {
+ t.Fatalf("NewChain return error: %v", err)
+ }
+ orderer := fabric_client.CreateNewOrderer(fmt.Sprintf("%s:%s", config.GetOrdererHost(), config.GetOrdererPort()))
+ invokechain.AddOrderer(orderer)
+
+ for _, p := range config.GetPeersConfig() {
+ endorser := fabric_client.CreateNewPeer(fmt.Sprintf("%s:%s", p.Host, p.Port))
+ invokechain.AddPeer(endorser)
+ }
+
+ return querychain, invokechain
+
+}
+
+// GetEventHub initilizes the event hub
+func (setup *BaseSetupImpl) GetEventHub(t *testing.T,
+ interestedEvents []*pb.Interest) events.EventHub {
+ eventHub := events.NewEventHub()
+ foundEventHub := false
+ for _, p := range config.GetPeersConfig() {
+ if p.EventHost != "" && p.EventPort != "" {
+ eventHub.SetPeerAddr(fmt.Sprintf("%s:%s", p.EventHost, p.EventPort))
+ foundEventHub = true
+ break
+ }
+ }
+
+ if !foundEventHub {
+ t.Fatalf("No EventHub configuration found")
+ }
+
+ // TODO: this is coming back in some other form
+ /*if interestedEvents != nil {
+ eventHub.SetInterestedEvents(interestedEvents)
+ }*/
+
+ if err := eventHub.Connect(); err != nil {
+ t.Fatalf("Failed eventHub.Connect() [%s]", err)
+ }
+
+ return eventHub
+}
+
+func (setup *BaseSetupImpl) SetupChaincodeDeploy() {
+ pwd, err := os.Getwd()
+ if err != nil {
+ fmt.Println(err)
+ os.Exit(1)
+ }
+ os.Setenv("GOPATH", path.Join(pwd, "../fixtures"))
+}
diff --git a/test/integration/end_to_end_test.go b/test/integration/end_to_end_test.go
new file mode 100644
index 0000000000..9f662387c4
--- /dev/null
+++ b/test/integration/end_to_end_test.go
@@ -0,0 +1,252 @@
+/*
+Copyright SecureKey Technologies Inc. All Rights Reserved.
+
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package integration
+
+import (
+ "fmt"
+ "math/rand"
+ "strconv"
+ "testing"
+ "time"
+
+ fabric_client "github.com/hyperledger/fabric-sdk-go/fabric-client"
+ events "github.com/hyperledger/fabric-sdk-go/fabric-client/events"
+
+ config "github.com/hyperledger/fabric-sdk-go/config"
+)
+
+var chainCodeId = ""
+var chainId = "testchainid"
+var chainCodePath = "github.com/example_cc"
+var chainCodeVersion = "v0"
+
+func TestChainCodeInvoke(t *testing.T) {
+ InitConfigForEndToEnd()
+ testSetup := BaseSetupImpl{}
+
+ eventHub := testSetup.GetEventHub(t, nil)
+ querychain, invokechain := testSetup.GetChains(t)
+ testSetup.SetupChaincodeDeploy()
+ err := installCC(invokechain)
+ if err != nil {
+ t.Fatalf("installCC return error: %v", err)
+ }
+ err = instantiateCC(invokechain, eventHub)
+ if err != nil {
+ t.Fatalf("installCC return error: %v", err)
+ }
+ // Get Query value before invoke
+ value, err := getQueryValue(t, querychain)
+ if err != nil {
+ t.Fatalf("getQueryValue return error: %v", err)
+ }
+ fmt.Printf("*** QueryValue before invoke %s\n", value)
+
+ err = invoke(t, invokechain, eventHub)
+ if err != nil {
+ t.Fatalf("invoke return error: %v", err)
+ }
+
+ valueAfterInvoke, err := getQueryValue(t, querychain)
+ if err != nil {
+ t.Errorf("getQueryValue return error: %v", err)
+ return
+ }
+ fmt.Printf("*** QueryValue after invoke %s\n", valueAfterInvoke)
+
+ valueInt, _ := strconv.Atoi(value)
+ valueInt = valueInt + 1
+ valueAfterInvokeInt, _ := strconv.Atoi(valueAfterInvoke)
+ if valueInt != valueAfterInvokeInt {
+ t.Fatalf("SendTransaction didn't change the QueryValue")
+
+ }
+
+}
+
+func getQueryValue(t *testing.T, chain fabric_client.Chain) (string, error) {
+
+ var args []string
+ args = append(args, "invoke")
+ args = append(args, "query")
+ args = append(args, "b")
+
+ signedProposal, err := chain.CreateTransactionProposal(chainCodeId, chainId, args, true, nil)
+ if err != nil {
+ return "", fmt.Errorf("SendTransactionProposal return error: %v", err)
+ }
+ transactionProposalResponses, err := chain.SendTransactionProposal(signedProposal, 0)
+ if err != nil {
+ return "", fmt.Errorf("SendTransactionProposal return error: %v", err)
+ }
+
+ for _, v := range transactionProposalResponses {
+ if v.Err != nil {
+ return "", fmt.Errorf("query Endorser %s return error: %v", v.Endorser, v.Err)
+ }
+ return string(v.GetResponsePayload()), nil
+ }
+ return "", nil
+}
+
+func invoke(t *testing.T, chain fabric_client.Chain, eventHub events.EventHub) error {
+
+ var args []string
+ args = append(args, "invoke")
+ args = append(args, "move")
+ args = append(args, "a")
+ args = append(args, "b")
+ args = append(args, "1")
+
+ signedProposal, err := chain.CreateTransactionProposal(chainCodeId, chainId, args, true, nil)
+ if err != nil {
+ return fmt.Errorf("SendTransactionProposal return error: %v", err)
+ }
+ transactionProposalResponse, err := chain.SendTransactionProposal(signedProposal, 0)
+ if err != nil {
+ return fmt.Errorf("SendTransactionProposal return error: %v", err)
+ }
+
+ for _, v := range transactionProposalResponse {
+ if v.Err != nil {
+ return fmt.Errorf("invoke Endorser %s return error: %v", v.Endorser, v.Err)
+ }
+ fmt.Printf("invoke Endorser '%s' return ProposalResponse:%v\n", v.Endorser, v.GetResponsePayload())
+ }
+
+ tx, err := chain.CreateTransaction(transactionProposalResponse)
+ if err != nil {
+ return fmt.Errorf("CreateTransaction return error: %v", err)
+
+ }
+ transactionResponse, err := chain.SendTransaction(tx)
+ if err != nil {
+ return fmt.Errorf("SendTransaction return error: %v", err)
+
+ }
+ for _, v := range transactionResponse {
+ if v.Err != nil {
+ return fmt.Errorf("Orderer %s return error: %v", v.Orderer, v.Err)
+ }
+ }
+ done := make(chan bool)
+ eventHub.RegisterTxEvent(signedProposal.TransactionID, func(txId string, err error) {
+ fmt.Printf("receive success event for txid(%s)\n", txId)
+ done <- true
+ })
+
+ select {
+ case <-done:
+ case <-time.After(time.Second * 20):
+ return fmt.Errorf("Didn't receive block event for txid(%s)\n", signedProposal.TransactionID)
+ }
+ return nil
+
+}
+
+func installCC(chain fabric_client.Chain) error {
+
+ transactionProposalResponse, _, err := chain.SendInstallProposal(chainCodeId, chainCodePath, chainCodeVersion, nil)
+ if err != nil {
+ return fmt.Errorf("SendInstallProposal return error: %v", err)
+ }
+
+ for _, v := range transactionProposalResponse {
+ if v.Err != nil {
+ return fmt.Errorf("SendInstallProposal Endorser %s return error: %v", v.Endorser, v.Err)
+ }
+ fmt.Printf("SendInstallProposal Endorser '%s' return ProposalResponse:%v\n", v.Endorser, v.GetResponsePayload())
+ }
+
+ return nil
+
+}
+
+func instantiateCC(chain fabric_client.Chain, eventHub events.EventHub) error {
+
+ var args []string
+ args = append(args, "init")
+ args = append(args, "a")
+ args = append(args, "100")
+ args = append(args, "b")
+ args = append(args, "200")
+
+ transactionProposalResponse, txID, err := chain.SendInstantiateProposal(chainCodeId, chainId, args, chainCodePath, chainCodeVersion)
+ if err != nil {
+ return fmt.Errorf("SendInstantiateProposal return error: %v", err)
+ }
+
+ for _, v := range transactionProposalResponse {
+ if v.Err != nil {
+ return fmt.Errorf("SendInstantiateProposal Endorser %s return error: %v", v.Endorser, v.Err)
+ }
+ fmt.Printf("SendInstantiateProposal Endorser '%s' return ProposalResponse:%v\n", v.Endorser, v.GetResponsePayload())
+ }
+
+ tx, err := chain.CreateTransaction(transactionProposalResponse)
+ if err != nil {
+ return fmt.Errorf("CreateTransaction return error: %v", err)
+
+ }
+ transactionResponse, err := chain.SendTransaction(tx)
+ if err != nil {
+ return fmt.Errorf("SendTransaction return error: %v", err)
+
+ }
+ for _, v := range transactionResponse {
+ if v.Err != nil {
+ return fmt.Errorf("Orderer %s return error: %v", v.Orderer, v.Err)
+ }
+ }
+ done := make(chan bool)
+ eventHub.RegisterTxEvent(txID, func(txId string, err error) {
+ fmt.Printf("receive success event for txid(%s)\n", txId)
+ done <- true
+ })
+
+ select {
+ case <-done:
+ case <-time.After(time.Second * 20):
+ return fmt.Errorf("Didn't receive block event for txid(%s)\n", txID)
+ }
+ return nil
+
+}
+
+func InitConfigForEndToEnd() {
+ err := config.InitConfig("../fixtures/config/config_test.yaml")
+ if err != nil {
+ fmt.Println(err.Error())
+ }
+}
+
+func randomString(strlen int) string {
+ const chars = "abcdefghijklmnopqrstuvwxyz0123456789"
+ result := make([]byte, strlen)
+ for i := 0; i < strlen; i++ {
+ result[i] = chars[rand.Intn(len(chars))]
+ }
+ return string(result)
+}
+
+func init() {
+ rand.Seed(time.Now().UnixNano())
+ chainCodeId = randomString(10)
+}
diff --git a/test/integration/fabric_ca_test.go b/test/integration/fabric_ca_test.go
new file mode 100644
index 0000000000..a603fba501
--- /dev/null
+++ b/test/integration/fabric_ca_test.go
@@ -0,0 +1,123 @@
+/*
+Copyright SecureKey Technologies Inc. All Rights Reserved.
+
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package integration
+
+import (
+ "crypto/x509"
+ "encoding/pem"
+ "testing"
+
+ config "github.com/hyperledger/fabric-sdk-go/config"
+ fabric_client "github.com/hyperledger/fabric-sdk-go/fabric-client"
+ kvs "github.com/hyperledger/fabric-sdk-go/fabric-client/keyvaluestore"
+ "github.com/hyperledger/fabric/bccsp"
+ bccspFactory "github.com/hyperledger/fabric/bccsp/factory"
+
+ fabric_ca_client "github.com/hyperledger/fabric-sdk-go/fabric-ca-client"
+)
+
+// this test uses the FabricCAServices to enroll a user, and
+// saves the enrollment materials into a key value store.
+// then uses the Client class to load the member from the
+// key value store
+func TestEnroll(t *testing.T) {
+ InitConfigForFabricCA()
+ client := fabric_client.NewClient()
+
+ err := bccspFactory.InitFactories(&bccspFactory.FactoryOpts{
+ ProviderName: "SW",
+ SwOpts: &bccspFactory.SwOpts{
+ HashFamily: config.GetSecurityAlgorithm(),
+ SecLevel: config.GetSecurityLevel(),
+ FileKeystore: &bccspFactory.FileKeystoreOpts{
+ KeyStorePath: config.GetKeyStorePath(),
+ },
+ Ephemeral: false,
+ },
+ })
+ if err != nil {
+ t.Fatalf("Failed getting ephemeral software-based BCCSP [%s]", err)
+ }
+
+ cryptoSuite := bccspFactory.GetDefault()
+
+ client.SetCryptoSuite(cryptoSuite)
+ stateStore, err := kvs.CreateNewFileKeyValueStore("/tmp/enroll_user")
+ if err != nil {
+ t.Fatalf("CreateNewFileKeyValueStore return error[%s]", err)
+ }
+ client.SetStateStore(stateStore)
+
+ fabricCAClient, err := fabric_ca_client.NewFabricCAClient()
+ if err != nil {
+ t.Fatalf("NewFabricCAClient return error: %v", err)
+ }
+ key, cert, err := fabricCAClient.Enroll("testUser2", "user2")
+ if err != nil {
+ t.Fatalf("Enroll return error: %v", err)
+ }
+ if key == nil {
+ t.Fatalf("private key return from Enroll is nil")
+ }
+ if cert == nil {
+ t.Fatalf("cert return from Enroll is nil")
+ }
+
+ certPem, _ := pem.Decode(cert)
+ if err != nil {
+ t.Fatalf("pem Decode return error: %v", err)
+ }
+
+ cert509, err := x509.ParseCertificate(certPem.Bytes)
+ if err != nil {
+ t.Fatalf("x509 ParseCertificate return error: %v", err)
+ }
+ if cert509.Subject.CommonName != "testUser2" {
+ t.Fatalf("CommonName in x509 cert is not the enrollmentID")
+ }
+
+ keyPem, _ := pem.Decode(key)
+ if err != nil {
+ t.Fatalf("pem Decode return error: %v", err)
+ }
+ user := fabric_client.NewUser("testUser2")
+ k, err := client.GetCryptoSuite().KeyImport(keyPem.Bytes, &bccsp.ECDSAPrivateKeyImportOpts{Temporary: false})
+ if err != nil {
+ t.Fatalf("KeyImport return error: %v", err)
+ }
+ user.SetPrivateKey(k)
+ user.SetEnrollmentCertificate(cert)
+ err = client.SetUserContext(user, false)
+ if err != nil {
+ t.Fatalf("client.SetUserContext return error: %v", err)
+ }
+ user, err = client.GetUserContext("testUser2")
+ if err != nil {
+ t.Fatalf("client.GetUserContext return error: %v", err)
+ }
+ if user == nil {
+ t.Fatalf("client.GetUserContext return nil")
+ }
+
+}
+
+func InitConfigForFabricCA() {
+ config.InitConfig("./test_resources/config/config_test.yaml")
+}
diff --git a/vendor/github.com/BurntSushi/toml/COMPATIBLE b/vendor/github.com/BurntSushi/toml/COMPATIBLE
new file mode 100644
index 0000000000..21e0938cae
--- /dev/null
+++ b/vendor/github.com/BurntSushi/toml/COMPATIBLE
@@ -0,0 +1,3 @@
+Compatible with TOML version
+[v0.2.0](https://github.com/mojombo/toml/blob/master/versions/toml-v0.2.0.md)
+
diff --git a/vendor/github.com/BurntSushi/toml/COPYING b/vendor/github.com/BurntSushi/toml/COPYING
new file mode 100644
index 0000000000..5a8e332545
--- /dev/null
+++ b/vendor/github.com/BurntSushi/toml/COPYING
@@ -0,0 +1,14 @@
+ DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
+ Version 2, December 2004
+
+ Copyright (C) 2004 Sam Hocevar
+
+ Everyone is permitted to copy and distribute verbatim or modified
+ copies of this license document, and changing it is allowed as long
+ as the name is changed.
+
+ DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
+ TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+ 0. You just DO WHAT THE FUCK YOU WANT TO.
+
diff --git a/vendor/github.com/BurntSushi/toml/Makefile b/vendor/github.com/BurntSushi/toml/Makefile
new file mode 100644
index 0000000000..3600848d33
--- /dev/null
+++ b/vendor/github.com/BurntSushi/toml/Makefile
@@ -0,0 +1,19 @@
+install:
+ go install ./...
+
+test: install
+ go test -v
+ toml-test toml-test-decoder
+ toml-test -encoder toml-test-encoder
+
+fmt:
+ gofmt -w *.go */*.go
+ colcheck *.go */*.go
+
+tags:
+ find ./ -name '*.go' -print0 | xargs -0 gotags > TAGS
+
+push:
+ git push origin master
+ git push github master
+
diff --git a/vendor/github.com/BurntSushi/toml/README.md b/vendor/github.com/BurntSushi/toml/README.md
new file mode 100644
index 0000000000..5a5df63709
--- /dev/null
+++ b/vendor/github.com/BurntSushi/toml/README.md
@@ -0,0 +1,220 @@
+## TOML parser and encoder for Go with reflection
+
+TOML stands for Tom's Obvious, Minimal Language. This Go package provides a
+reflection interface similar to Go's standard library `json` and `xml`
+packages. This package also supports the `encoding.TextUnmarshaler` and
+`encoding.TextMarshaler` interfaces so that you can define custom data
+representations. (There is an example of this below.)
+
+Spec: https://github.com/mojombo/toml
+
+Compatible with TOML version
+[v0.2.0](https://github.com/toml-lang/toml/blob/master/versions/en/toml-v0.2.0.md)
+
+Documentation: http://godoc.org/github.com/BurntSushi/toml
+
+Installation:
+
+```bash
+go get github.com/BurntSushi/toml
+```
+
+Try the toml validator:
+
+```bash
+go get github.com/BurntSushi/toml/cmd/tomlv
+tomlv some-toml-file.toml
+```
+
+[![Build status](https://api.travis-ci.org/BurntSushi/toml.png)](https://travis-ci.org/BurntSushi/toml)
+
+
+### Testing
+
+This package passes all tests in
+[toml-test](https://github.com/BurntSushi/toml-test) for both the decoder
+and the encoder.
+
+### Examples
+
+This package works similarly to how the Go standard library handles `XML`
+and `JSON`. Namely, data is loaded into Go values via reflection.
+
+For the simplest example, consider some TOML file as just a list of keys
+and values:
+
+```toml
+Age = 25
+Cats = [ "Cauchy", "Plato" ]
+Pi = 3.14
+Perfection = [ 6, 28, 496, 8128 ]
+DOB = 1987-07-05T05:45:00Z
+```
+
+Which could be defined in Go as:
+
+```go
+type Config struct {
+ Age int
+ Cats []string
+ Pi float64
+ Perfection []int
+ DOB time.Time // requires `import time`
+}
+```
+
+And then decoded with:
+
+```go
+var conf Config
+if _, err := toml.Decode(tomlData, &conf); err != nil {
+ // handle error
+}
+```
+
+You can also use struct tags if your struct field name doesn't map to a TOML
+key value directly:
+
+```toml
+some_key_NAME = "wat"
+```
+
+```go
+type TOML struct {
+ ObscureKey string `toml:"some_key_NAME"`
+}
+```
+
+### Using the `encoding.TextUnmarshaler` interface
+
+Here's an example that automatically parses duration strings into
+`time.Duration` values:
+
+```toml
+[[song]]
+name = "Thunder Road"
+duration = "4m49s"
+
+[[song]]
+name = "Stairway to Heaven"
+duration = "8m03s"
+```
+
+Which can be decoded with:
+
+```go
+type song struct {
+ Name string
+ Duration duration
+}
+type songs struct {
+ Song []song
+}
+var favorites songs
+if _, err := toml.Decode(blob, &favorites); err != nil {
+ log.Fatal(err)
+}
+
+for _, s := range favorites.Song {
+ fmt.Printf("%s (%s)\n", s.Name, s.Duration)
+}
+```
+
+And you'll also need a `duration` type that satisfies the
+`encoding.TextUnmarshaler` interface:
+
+```go
+type duration struct {
+ time.Duration
+}
+
+func (d *duration) UnmarshalText(text []byte) error {
+ var err error
+ d.Duration, err = time.ParseDuration(string(text))
+ return err
+}
+```
+
+### More complex usage
+
+Here's an example of how to load the example from the official spec page:
+
+```toml
+# This is a TOML document. Boom.
+
+title = "TOML Example"
+
+[owner]
+name = "Tom Preston-Werner"
+organization = "GitHub"
+bio = "GitHub Cofounder & CEO\nLikes tater tots and beer."
+dob = 1979-05-27T07:32:00Z # First class dates? Why not?
+
+[database]
+server = "192.168.1.1"
+ports = [ 8001, 8001, 8002 ]
+connection_max = 5000
+enabled = true
+
+[servers]
+
+ # You can indent as you please. Tabs or spaces. TOML don't care.
+ [servers.alpha]
+ ip = "10.0.0.1"
+ dc = "eqdc10"
+
+ [servers.beta]
+ ip = "10.0.0.2"
+ dc = "eqdc10"
+
+[clients]
+data = [ ["gamma", "delta"], [1, 2] ] # just an update to make sure parsers support it
+
+# Line breaks are OK when inside arrays
+hosts = [
+ "alpha",
+ "omega"
+]
+```
+
+And the corresponding Go types are:
+
+```go
+type tomlConfig struct {
+ Title string
+ Owner ownerInfo
+ DB database `toml:"database"`
+ Servers map[string]server
+ Clients clients
+}
+
+type ownerInfo struct {
+ Name string
+ Org string `toml:"organization"`
+ Bio string
+ DOB time.Time
+}
+
+type database struct {
+ Server string
+ Ports []int
+ ConnMax int `toml:"connection_max"`
+ Enabled bool
+}
+
+type server struct {
+ IP string
+ DC string
+}
+
+type clients struct {
+ Data [][]interface{}
+ Hosts []string
+}
+```
+
+Note that a case insensitive match will be tried if an exact match can't be
+found.
+
+A working example of the above can be found in `_examples/example.{go,toml}`.
+
diff --git a/vendor/github.com/BurntSushi/toml/decode.go b/vendor/github.com/BurntSushi/toml/decode.go
new file mode 100644
index 0000000000..6c7d398b89
--- /dev/null
+++ b/vendor/github.com/BurntSushi/toml/decode.go
@@ -0,0 +1,492 @@
+package toml
+
+import (
+ "fmt"
+ "io"
+ "io/ioutil"
+ "math"
+ "reflect"
+ "strings"
+ "time"
+)
+
+var e = fmt.Errorf
+
+// Unmarshaler is the interface implemented by objects that can unmarshal a
+// TOML description of themselves.
+type Unmarshaler interface {
+ UnmarshalTOML(interface{}) error
+}
+
+// Unmarshal decodes the contents of `p` in TOML format into a pointer `v`.
+func Unmarshal(p []byte, v interface{}) error {
+ _, err := Decode(string(p), v)
+ return err
+}
+
+// Primitive is a TOML value that hasn't been decoded into a Go value.
+// When using the various `Decode*` functions, the type `Primitive` may
+// be given to any value, and its decoding will be delayed.
+//
+// A `Primitive` value can be decoded using the `PrimitiveDecode` function.
+//
+// The underlying representation of a `Primitive` value is subject to change.
+// Do not rely on it.
+//
+// N.B. Primitive values are still parsed, so using them will only avoid
+// the overhead of reflection. They can be useful when you don't know the
+// exact type of TOML data until run time.
+type Primitive struct {
+ undecoded interface{}
+ context Key
+}
+
+// DEPRECATED!
+//
+// Use MetaData.PrimitiveDecode instead.
+func PrimitiveDecode(primValue Primitive, v interface{}) error {
+ md := MetaData{decoded: make(map[string]bool)}
+ return md.unify(primValue.undecoded, rvalue(v))
+}
+
+// PrimitiveDecode is just like the other `Decode*` functions, except it
+// decodes a TOML value that has already been parsed. Valid primitive values
+// can *only* be obtained from values filled by the decoder functions,
+// including this method. (i.e., `v` may contain more `Primitive`
+// values.)
+//
+// Meta data for primitive values is included in the meta data returned by
+// the `Decode*` functions with one exception: keys returned by the Undecoded
+// method will only reflect keys that were decoded. Namely, any keys hidden
+// behind a Primitive will be considered undecoded. Executing this method will
+// update the undecoded keys in the meta data. (See the example.)
+func (md *MetaData) PrimitiveDecode(primValue Primitive, v interface{}) error {
+ md.context = primValue.context
+ defer func() { md.context = nil }()
+ return md.unify(primValue.undecoded, rvalue(v))
+}
+
+// Decode will decode the contents of `data` in TOML format into a pointer
+// `v`.
+//
+// TOML hashes correspond to Go structs or maps. (Dealer's choice. They can be
+// used interchangeably.)
+//
+// TOML arrays of tables correspond to either a slice of structs or a slice
+// of maps.
+//
+// TOML datetimes correspond to Go `time.Time` values.
+//
+// All other TOML types (float, string, int, bool and array) correspond
+// to the obvious Go types.
+//
+// An exception to the above rules is if a type implements the
+// encoding.TextUnmarshaler interface. In this case, any primitive TOML value
+// (floats, strings, integers, booleans and datetimes) will be converted to
+// a byte string and given to the value's UnmarshalText method. See the
+// Unmarshaler example for a demonstration with time duration strings.
+//
+// Key mapping
+//
+// TOML keys can map to either keys in a Go map or field names in a Go
+// struct. The special `toml` struct tag may be used to map TOML keys to
+// struct fields that don't match the key name exactly. (See the example.)
+// A case insensitive match to struct names will be tried if an exact match
+// can't be found.
+//
+// The mapping between TOML values and Go values is loose. That is, there
+// may exist TOML values that cannot be placed into your representation, and
+// there may be parts of your representation that do not correspond to
+// TOML values. This loose mapping can be made stricter by using the IsDefined
+// and/or Undecoded methods on the MetaData returned.
+//
+// This decoder will not handle cyclic types. If a cyclic type is passed,
+// `Decode` will not terminate.
+func Decode(data string, v interface{}) (MetaData, error) {
+ p, err := parse(data)
+ if err != nil {
+ return MetaData{}, err
+ }
+ md := MetaData{
+ p.mapping, p.types, p.ordered,
+ make(map[string]bool, len(p.ordered)), nil,
+ }
+ return md, md.unify(p.mapping, rvalue(v))
+}
+
+// DecodeFile is just like Decode, except it will automatically read the
+// contents of the file at `fpath` and decode it for you.
+func DecodeFile(fpath string, v interface{}) (MetaData, error) {
+ bs, err := ioutil.ReadFile(fpath)
+ if err != nil {
+ return MetaData{}, err
+ }
+ return Decode(string(bs), v)
+}
+
+// DecodeReader is just like Decode, except it will consume all bytes
+// from the reader and decode it for you.
+func DecodeReader(r io.Reader, v interface{}) (MetaData, error) {
+ bs, err := ioutil.ReadAll(r)
+ if err != nil {
+ return MetaData{}, err
+ }
+ return Decode(string(bs), v)
+}
+
+// unify performs a sort of type unification based on the structure of `rv`,
+// which is the client representation.
+//
+// Any type mismatch produces an error. Finding a type that we don't know
+// how to handle produces an unsupported type error.
+func (md *MetaData) unify(data interface{}, rv reflect.Value) error {
+
+ // Special case. Look for a `Primitive` value.
+ if rv.Type() == reflect.TypeOf((*Primitive)(nil)).Elem() {
+ // Save the undecoded data and the key context into the primitive
+ // value.
+ context := make(Key, len(md.context))
+ copy(context, md.context)
+ rv.Set(reflect.ValueOf(Primitive{
+ undecoded: data,
+ context: context,
+ }))
+ return nil
+ }
+
+ // Special case. Unmarshaler Interface support.
+ if rv.CanAddr() {
+ if v, ok := rv.Addr().Interface().(Unmarshaler); ok {
+ return v.UnmarshalTOML(data)
+ }
+ }
+
+ // Special case. Handle time.Time values specifically.
+ // TODO: Remove this code when we decide to drop support for Go 1.1.
+ // This isn't necessary in Go 1.2 because time.Time satisfies the encoding
+ // interfaces.
+ if rv.Type().AssignableTo(rvalue(time.Time{}).Type()) {
+ return md.unifyDatetime(data, rv)
+ }
+
+ // Special case. Look for a value satisfying the TextUnmarshaler interface.
+ if v, ok := rv.Interface().(TextUnmarshaler); ok {
+ return md.unifyText(data, v)
+ }
+ // BUG(burntsushi)
+ // The behavior here is incorrect whenever a Go type satisfies the
+ // encoding.TextUnmarshaler interface but also corresponds to a TOML
+ // hash or array. In particular, the unmarshaler should only be applied
+ // to primitive TOML values. But at this point, it will be applied to
+ // all kinds of values and produce an incorrect error whenever those values
+ // are hashes or arrays (including arrays of tables).
+
+ k := rv.Kind()
+
+ // laziness
+ if k >= reflect.Int && k <= reflect.Uint64 {
+ return md.unifyInt(data, rv)
+ }
+ switch k {
+ case reflect.Ptr:
+ elem := reflect.New(rv.Type().Elem())
+ err := md.unify(data, reflect.Indirect(elem))
+ if err != nil {
+ return err
+ }
+ rv.Set(elem)
+ return nil
+ case reflect.Struct:
+ return md.unifyStruct(data, rv)
+ case reflect.Map:
+ return md.unifyMap(data, rv)
+ case reflect.Array:
+ return md.unifyArray(data, rv)
+ case reflect.Slice:
+ return md.unifySlice(data, rv)
+ case reflect.String:
+ return md.unifyString(data, rv)
+ case reflect.Bool:
+ return md.unifyBool(data, rv)
+ case reflect.Interface:
+ // we only support empty interfaces.
+ if rv.NumMethod() > 0 {
+ return e("Unsupported type '%s'.", rv.Kind())
+ }
+ return md.unifyAnything(data, rv)
+ case reflect.Float32:
+ fallthrough
+ case reflect.Float64:
+ return md.unifyFloat64(data, rv)
+ }
+ return e("Unsupported type '%s'.", rv.Kind())
+}
+
+func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error {
+ tmap, ok := mapping.(map[string]interface{})
+ if !ok {
+ return mismatch(rv, "map", mapping)
+ }
+
+ for key, datum := range tmap {
+ var f *field
+ fields := cachedTypeFields(rv.Type())
+ for i := range fields {
+ ff := &fields[i]
+ if ff.name == key {
+ f = ff
+ break
+ }
+ if f == nil && strings.EqualFold(ff.name, key) {
+ f = ff
+ }
+ }
+ if f != nil {
+ subv := rv
+ for _, i := range f.index {
+ subv = indirect(subv.Field(i))
+ }
+ if isUnifiable(subv) {
+ md.decoded[md.context.add(key).String()] = true
+ md.context = append(md.context, key)
+ if err := md.unify(datum, subv); err != nil {
+ return e("Type mismatch for '%s.%s': %s",
+ rv.Type().String(), f.name, err)
+ }
+ md.context = md.context[0 : len(md.context)-1]
+ } else if f.name != "" {
+ // Bad user! No soup for you!
+ return e("Field '%s.%s' is unexported, and therefore cannot "+
+ "be loaded with reflection.", rv.Type().String(), f.name)
+ }
+ }
+ }
+ return nil
+}
+
+func (md *MetaData) unifyMap(mapping interface{}, rv reflect.Value) error {
+ tmap, ok := mapping.(map[string]interface{})
+ if !ok {
+ return badtype("map", mapping)
+ }
+ if rv.IsNil() {
+ rv.Set(reflect.MakeMap(rv.Type()))
+ }
+ for k, v := range tmap {
+ md.decoded[md.context.add(k).String()] = true
+ md.context = append(md.context, k)
+
+ rvkey := indirect(reflect.New(rv.Type().Key()))
+ rvval := reflect.Indirect(reflect.New(rv.Type().Elem()))
+ if err := md.unify(v, rvval); err != nil {
+ return err
+ }
+ md.context = md.context[0 : len(md.context)-1]
+
+ rvkey.SetString(k)
+ rv.SetMapIndex(rvkey, rvval)
+ }
+ return nil
+}
+
+func (md *MetaData) unifyArray(data interface{}, rv reflect.Value) error {
+ datav := reflect.ValueOf(data)
+ if datav.Kind() != reflect.Slice {
+ return badtype("slice", data)
+ }
+ sliceLen := datav.Len()
+ if sliceLen != rv.Len() {
+ return e("expected array length %d; got TOML array of length %d",
+ rv.Len(), sliceLen)
+ }
+ return md.unifySliceArray(datav, rv)
+}
+
+func (md *MetaData) unifySlice(data interface{}, rv reflect.Value) error {
+ datav := reflect.ValueOf(data)
+ if datav.Kind() != reflect.Slice {
+ return badtype("slice", data)
+ }
+ sliceLen := datav.Len()
+ if rv.IsNil() {
+ rv.Set(reflect.MakeSlice(rv.Type(), sliceLen, sliceLen))
+ }
+ return md.unifySliceArray(datav, rv)
+}
+
+func (md *MetaData) unifySliceArray(data, rv reflect.Value) error {
+ sliceLen := data.Len()
+ for i := 0; i < sliceLen; i++ {
+ v := data.Index(i).Interface()
+ sliceval := indirect(rv.Index(i))
+ if err := md.unify(v, sliceval); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (md *MetaData) unifyDatetime(data interface{}, rv reflect.Value) error {
+ if _, ok := data.(time.Time); ok {
+ rv.Set(reflect.ValueOf(data))
+ return nil
+ }
+ return badtype("time.Time", data)
+}
+
+func (md *MetaData) unifyString(data interface{}, rv reflect.Value) error {
+ if s, ok := data.(string); ok {
+ rv.SetString(s)
+ return nil
+ }
+ return badtype("string", data)
+}
+
+func (md *MetaData) unifyFloat64(data interface{}, rv reflect.Value) error {
+ if num, ok := data.(float64); ok {
+ switch rv.Kind() {
+ case reflect.Float32:
+ fallthrough
+ case reflect.Float64:
+ rv.SetFloat(num)
+ default:
+ panic("bug")
+ }
+ return nil
+ }
+ return badtype("float", data)
+}
+
+func (md *MetaData) unifyInt(data interface{}, rv reflect.Value) error {
+ if num, ok := data.(int64); ok {
+ if rv.Kind() >= reflect.Int && rv.Kind() <= reflect.Int64 {
+ switch rv.Kind() {
+ case reflect.Int, reflect.Int64:
+ // No bounds checking necessary.
+ case reflect.Int8:
+ if num < math.MinInt8 || num > math.MaxInt8 {
+ return e("Value '%d' is out of range for int8.", num)
+ }
+ case reflect.Int16:
+ if num < math.MinInt16 || num > math.MaxInt16 {
+ return e("Value '%d' is out of range for int16.", num)
+ }
+ case reflect.Int32:
+ if num < math.MinInt32 || num > math.MaxInt32 {
+ return e("Value '%d' is out of range for int32.", num)
+ }
+ }
+ rv.SetInt(num)
+ } else if rv.Kind() >= reflect.Uint && rv.Kind() <= reflect.Uint64 {
+ unum := uint64(num)
+ switch rv.Kind() {
+ case reflect.Uint, reflect.Uint64:
+ // No bounds checking necessary.
+ case reflect.Uint8:
+ if num < 0 || unum > math.MaxUint8 {
+ return e("Value '%d' is out of range for uint8.", num)
+ }
+ case reflect.Uint16:
+ if num < 0 || unum > math.MaxUint16 {
+ return e("Value '%d' is out of range for uint16.", num)
+ }
+ case reflect.Uint32:
+ if num < 0 || unum > math.MaxUint32 {
+ return e("Value '%d' is out of range for uint32.", num)
+ }
+ }
+ rv.SetUint(unum)
+ } else {
+ panic("unreachable")
+ }
+ return nil
+ }
+ return badtype("integer", data)
+}
+
+func (md *MetaData) unifyBool(data interface{}, rv reflect.Value) error {
+ if b, ok := data.(bool); ok {
+ rv.SetBool(b)
+ return nil
+ }
+ return badtype("boolean", data)
+}
+
+func (md *MetaData) unifyAnything(data interface{}, rv reflect.Value) error {
+ rv.Set(reflect.ValueOf(data))
+ return nil
+}
+
+func (md *MetaData) unifyText(data interface{}, v TextUnmarshaler) error {
+ var s string
+ switch sdata := data.(type) {
+ case TextMarshaler:
+ text, err := sdata.MarshalText()
+ if err != nil {
+ return err
+ }
+ s = string(text)
+ case fmt.Stringer:
+ s = sdata.String()
+ case string:
+ s = sdata
+ case bool:
+ s = fmt.Sprintf("%v", sdata)
+ case int64:
+ s = fmt.Sprintf("%d", sdata)
+ case float64:
+ s = fmt.Sprintf("%f", sdata)
+ default:
+ return badtype("primitive (string-like)", data)
+ }
+ if err := v.UnmarshalText([]byte(s)); err != nil {
+ return err
+ }
+ return nil
+}
+
+// rvalue returns a reflect.Value of `v`. All pointers are resolved.
+func rvalue(v interface{}) reflect.Value {
+ return indirect(reflect.ValueOf(v))
+}
+
+// indirect returns the value pointed to by a pointer.
+// Pointers are followed until the value is not a pointer.
+// New values are allocated for each nil pointer.
+//
+// An exception to this rule is if the value satisfies an interface of
+// interest to us (like encoding.TextUnmarshaler).
+func indirect(v reflect.Value) reflect.Value {
+ if v.Kind() != reflect.Ptr {
+ if v.CanAddr() {
+ pv := v.Addr()
+ if _, ok := pv.Interface().(TextUnmarshaler); ok {
+ return pv
+ }
+ }
+ return v
+ }
+ if v.IsNil() {
+ v.Set(reflect.New(v.Type().Elem()))
+ }
+ return indirect(reflect.Indirect(v))
+}
+
+func isUnifiable(rv reflect.Value) bool {
+ if rv.CanSet() {
+ return true
+ }
+ if _, ok := rv.Interface().(TextUnmarshaler); ok {
+ return true
+ }
+ return false
+}
+
+func badtype(expected string, data interface{}) error {
+ return e("Expected %s but found '%T'.", expected, data)
+}
+
+func mismatch(user reflect.Value, expected string, data interface{}) error {
+ return e("Type mismatch for %s. Expected %s but found '%T'.",
+ user.Type().String(), expected, data)
+}
diff --git a/vendor/github.com/BurntSushi/toml/decode_meta.go b/vendor/github.com/BurntSushi/toml/decode_meta.go
new file mode 100644
index 0000000000..ef6f545fa1
--- /dev/null
+++ b/vendor/github.com/BurntSushi/toml/decode_meta.go
@@ -0,0 +1,122 @@
+package toml
+
+import "strings"
+
+// MetaData allows access to meta information about TOML data that may not
+// be inferrable via reflection. In particular, whether a key has been defined
+// and the TOML type of a key.
+type MetaData struct {
+ mapping map[string]interface{}
+ types map[string]tomlType
+ keys []Key
+ decoded map[string]bool
+ context Key // Used only during decoding.
+}
+
+// IsDefined returns true if the key given exists in the TOML data. The key
+// should be specified hierarchially. e.g.,
+//
+// // access the TOML key 'a.b.c'
+// IsDefined("a", "b", "c")
+//
+// IsDefined will return false if an empty key given. Keys are case sensitive.
+func (md *MetaData) IsDefined(key ...string) bool {
+ if len(key) == 0 {
+ return false
+ }
+
+ var hash map[string]interface{}
+ var ok bool
+ var hashOrVal interface{} = md.mapping
+ for _, k := range key {
+ if hash, ok = hashOrVal.(map[string]interface{}); !ok {
+ return false
+ }
+ if hashOrVal, ok = hash[k]; !ok {
+ return false
+ }
+ }
+ return true
+}
+
+// Type returns a string representation of the type of the key specified.
+//
+// Type will return the empty string if given an empty key or a key that
+// does not exist. Keys are case sensitive.
+func (md *MetaData) Type(key ...string) string {
+ fullkey := strings.Join(key, ".")
+ if typ, ok := md.types[fullkey]; ok {
+ return typ.typeString()
+ }
+ return ""
+}
+
+// Key is the type of any TOML key, including key groups. Use (MetaData).Keys
+// to get values of this type.
+type Key []string
+
+func (k Key) String() string {
+ return strings.Join(k, ".")
+}
+
+func (k Key) maybeQuotedAll() string {
+ var ss []string
+ for i := range k {
+ ss = append(ss, k.maybeQuoted(i))
+ }
+ return strings.Join(ss, ".")
+}
+
+func (k Key) maybeQuoted(i int) string {
+ quote := false
+ for _, c := range k[i] {
+ if !isBareKeyChar(c) {
+ quote = true
+ break
+ }
+ }
+ if quote {
+ return "\"" + strings.Replace(k[i], "\"", "\\\"", -1) + "\""
+ } else {
+ return k[i]
+ }
+}
+
+func (k Key) add(piece string) Key {
+ newKey := make(Key, len(k)+1)
+ copy(newKey, k)
+ newKey[len(k)] = piece
+ return newKey
+}
+
+// Keys returns a slice of every key in the TOML data, including key groups.
+// Each key is itself a slice, where the first element is the top of the
+// hierarchy and the last is the most specific.
+//
+// The list will have the same order as the keys appeared in the TOML data.
+//
+// All keys returned are non-empty.
+func (md *MetaData) Keys() []Key {
+ return md.keys
+}
+
+// Undecoded returns all keys that have not been decoded in the order in which
+// they appear in the original TOML document.
+//
+// This includes keys that haven't been decoded because of a Primitive value.
+// Once the Primitive value is decoded, the keys will be considered decoded.
+//
+// Also note that decoding into an empty interface will result in no decoding,
+// and so no keys will be considered decoded.
+//
+// In this sense, the Undecoded keys correspond to keys in the TOML document
+// that do not have a concrete type in your representation.
+func (md *MetaData) Undecoded() []Key {
+ undecoded := make([]Key, 0, len(md.keys))
+ for _, key := range md.keys {
+ if !md.decoded[key.String()] {
+ undecoded = append(undecoded, key)
+ }
+ }
+ return undecoded
+}
diff --git a/vendor/github.com/BurntSushi/toml/doc.go b/vendor/github.com/BurntSushi/toml/doc.go
new file mode 100644
index 0000000000..fe26800041
--- /dev/null
+++ b/vendor/github.com/BurntSushi/toml/doc.go
@@ -0,0 +1,27 @@
+/*
+Package toml provides facilities for decoding and encoding TOML configuration
+files via reflection. There is also support for delaying decoding with
+the Primitive type, and querying the set of keys in a TOML document with the
+MetaData type.
+
+The specification implemented: https://github.com/mojombo/toml
+
+The sub-command github.com/BurntSushi/toml/cmd/tomlv can be used to verify
+whether a file is a valid TOML document. It can also be used to print the
+type of each key in a TOML document.
+
+Testing
+
+There are two important types of tests used for this package. The first is
+contained inside '*_test.go' files and uses the standard Go unit testing
+framework. These tests are primarily devoted to holistically testing the
+decoder and encoder.
+
+The second type of testing is used to verify the implementation's adherence
+to the TOML specification. These tests have been factored into their own
+project: https://github.com/BurntSushi/toml-test
+
+The reason the tests are in a separate project is so that they can be used by
+any implementation of TOML. Namely, it is language agnostic.
+*/
+package toml
diff --git a/vendor/github.com/BurntSushi/toml/encode.go b/vendor/github.com/BurntSushi/toml/encode.go
new file mode 100644
index 0000000000..c7e227c7f4
--- /dev/null
+++ b/vendor/github.com/BurntSushi/toml/encode.go
@@ -0,0 +1,551 @@
+package toml
+
+import (
+ "bufio"
+ "errors"
+ "fmt"
+ "io"
+ "reflect"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+)
+
+type tomlEncodeError struct{ error }
+
+var (
+ errArrayMixedElementTypes = errors.New(
+ "can't encode array with mixed element types")
+ errArrayNilElement = errors.New(
+ "can't encode array with nil element")
+ errNonString = errors.New(
+ "can't encode a map with non-string key type")
+ errAnonNonStruct = errors.New(
+ "can't encode an anonymous field that is not a struct")
+ errArrayNoTable = errors.New(
+ "TOML array element can't contain a table")
+ errNoKey = errors.New(
+ "top-level values must be a Go map or struct")
+ errAnything = errors.New("") // used in testing
+)
+
+var quotedReplacer = strings.NewReplacer(
+ "\t", "\\t",
+ "\n", "\\n",
+ "\r", "\\r",
+ "\"", "\\\"",
+ "\\", "\\\\",
+)
+
+// Encoder controls the encoding of Go values to a TOML document to some
+// io.Writer.
+//
+// The indentation level can be controlled with the Indent field.
+type Encoder struct {
+ // A single indentation level. By default it is two spaces.
+ Indent string
+
+ // hasWritten is whether we have written any output to w yet.
+ hasWritten bool
+ w *bufio.Writer
+}
+
+// NewEncoder returns a TOML encoder that encodes Go values to the io.Writer
+// given. By default, a single indentation level is 2 spaces.
+func NewEncoder(w io.Writer) *Encoder {
+ return &Encoder{
+ w: bufio.NewWriter(w),
+ Indent: " ",
+ }
+}
+
+// Encode writes a TOML representation of the Go value to the underlying
+// io.Writer. If the value given cannot be encoded to a valid TOML document,
+// then an error is returned.
+//
+// The mapping between Go values and TOML values should be precisely the same
+// as for the Decode* functions. Similarly, the TextMarshaler interface is
+// supported by encoding the resulting bytes as strings. (If you want to write
+// arbitrary binary data then you will need to use something like base64 since
+// TOML does not have any binary types.)
+//
+// When encoding TOML hashes (i.e., Go maps or structs), keys without any
+// sub-hashes are encoded first.
+//
+// If a Go map is encoded, then its keys are sorted alphabetically for
+// deterministic output. More control over this behavior may be provided if
+// there is demand for it.
+//
+// Encoding Go values without a corresponding TOML representation---like map
+// types with non-string keys---will cause an error to be returned. Similarly
+// for mixed arrays/slices, arrays/slices with nil elements, embedded
+// non-struct types and nested slices containing maps or structs.
+// (e.g., [][]map[string]string is not allowed but []map[string]string is OK
+// and so is []map[string][]string.)
+func (enc *Encoder) Encode(v interface{}) error {
+ rv := eindirect(reflect.ValueOf(v))
+ if err := enc.safeEncode(Key([]string{}), rv); err != nil {
+ return err
+ }
+ return enc.w.Flush()
+}
+
+func (enc *Encoder) safeEncode(key Key, rv reflect.Value) (err error) {
+ defer func() {
+ if r := recover(); r != nil {
+ if terr, ok := r.(tomlEncodeError); ok {
+ err = terr.error
+ return
+ }
+ panic(r)
+ }
+ }()
+ enc.encode(key, rv)
+ return nil
+}
+
+func (enc *Encoder) encode(key Key, rv reflect.Value) {
+ // Special case. Time needs to be in ISO8601 format.
+ // Special case. If we can marshal the type to text, then we used that.
+ // Basically, this prevents the encoder for handling these types as
+ // generic structs (or whatever the underlying type of a TextMarshaler is).
+ switch rv.Interface().(type) {
+ case time.Time, TextMarshaler:
+ enc.keyEqElement(key, rv)
+ return
+ }
+
+ k := rv.Kind()
+ switch k {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32,
+ reflect.Int64,
+ reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32,
+ reflect.Uint64,
+ reflect.Float32, reflect.Float64, reflect.String, reflect.Bool:
+ enc.keyEqElement(key, rv)
+ case reflect.Array, reflect.Slice:
+ if typeEqual(tomlArrayHash, tomlTypeOfGo(rv)) {
+ enc.eArrayOfTables(key, rv)
+ } else {
+ enc.keyEqElement(key, rv)
+ }
+ case reflect.Interface:
+ if rv.IsNil() {
+ return
+ }
+ enc.encode(key, rv.Elem())
+ case reflect.Map:
+ if rv.IsNil() {
+ return
+ }
+ enc.eTable(key, rv)
+ case reflect.Ptr:
+ if rv.IsNil() {
+ return
+ }
+ enc.encode(key, rv.Elem())
+ case reflect.Struct:
+ enc.eTable(key, rv)
+ default:
+ panic(e("Unsupported type for key '%s': %s", key, k))
+ }
+}
+
+// eElement encodes any value that can be an array element (primitives and
+// arrays).
+func (enc *Encoder) eElement(rv reflect.Value) {
+ switch v := rv.Interface().(type) {
+ case time.Time:
+ // Special case time.Time as a primitive. Has to come before
+ // TextMarshaler below because time.Time implements
+ // encoding.TextMarshaler, but we need to always use UTC.
+ enc.wf(v.In(time.FixedZone("UTC", 0)).Format("2006-01-02T15:04:05Z"))
+ return
+ case TextMarshaler:
+ // Special case. Use text marshaler if it's available for this value.
+ if s, err := v.MarshalText(); err != nil {
+ encPanic(err)
+ } else {
+ enc.writeQuoted(string(s))
+ }
+ return
+ }
+ switch rv.Kind() {
+ case reflect.Bool:
+ enc.wf(strconv.FormatBool(rv.Bool()))
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32,
+ reflect.Int64:
+ enc.wf(strconv.FormatInt(rv.Int(), 10))
+ case reflect.Uint, reflect.Uint8, reflect.Uint16,
+ reflect.Uint32, reflect.Uint64:
+ enc.wf(strconv.FormatUint(rv.Uint(), 10))
+ case reflect.Float32:
+ enc.wf(floatAddDecimal(strconv.FormatFloat(rv.Float(), 'f', -1, 32)))
+ case reflect.Float64:
+ enc.wf(floatAddDecimal(strconv.FormatFloat(rv.Float(), 'f', -1, 64)))
+ case reflect.Array, reflect.Slice:
+ enc.eArrayOrSliceElement(rv)
+ case reflect.Interface:
+ enc.eElement(rv.Elem())
+ case reflect.String:
+ enc.writeQuoted(rv.String())
+ default:
+ panic(e("Unexpected primitive type: %s", rv.Kind()))
+ }
+}
+
+// By the TOML spec, all floats must have a decimal with at least one
+// number on either side.
+func floatAddDecimal(fstr string) string {
+ if !strings.Contains(fstr, ".") {
+ return fstr + ".0"
+ }
+ return fstr
+}
+
+func (enc *Encoder) writeQuoted(s string) {
+ enc.wf("\"%s\"", quotedReplacer.Replace(s))
+}
+
+func (enc *Encoder) eArrayOrSliceElement(rv reflect.Value) {
+ length := rv.Len()
+ enc.wf("[")
+ for i := 0; i < length; i++ {
+ elem := rv.Index(i)
+ enc.eElement(elem)
+ if i != length-1 {
+ enc.wf(", ")
+ }
+ }
+ enc.wf("]")
+}
+
+func (enc *Encoder) eArrayOfTables(key Key, rv reflect.Value) {
+ if len(key) == 0 {
+ encPanic(errNoKey)
+ }
+ for i := 0; i < rv.Len(); i++ {
+ trv := rv.Index(i)
+ if isNil(trv) {
+ continue
+ }
+ panicIfInvalidKey(key)
+ enc.newline()
+ enc.wf("%s[[%s]]", enc.indentStr(key), key.maybeQuotedAll())
+ enc.newline()
+ enc.eMapOrStruct(key, trv)
+ }
+}
+
+func (enc *Encoder) eTable(key Key, rv reflect.Value) {
+ panicIfInvalidKey(key)
+ if len(key) == 1 {
+ // Output an extra new line between top-level tables.
+ // (The newline isn't written if nothing else has been written though.)
+ enc.newline()
+ }
+ if len(key) > 0 {
+ enc.wf("%s[%s]", enc.indentStr(key), key.maybeQuotedAll())
+ enc.newline()
+ }
+ enc.eMapOrStruct(key, rv)
+}
+
+func (enc *Encoder) eMapOrStruct(key Key, rv reflect.Value) {
+ switch rv := eindirect(rv); rv.Kind() {
+ case reflect.Map:
+ enc.eMap(key, rv)
+ case reflect.Struct:
+ enc.eStruct(key, rv)
+ default:
+ panic("eTable: unhandled reflect.Value Kind: " + rv.Kind().String())
+ }
+}
+
+func (enc *Encoder) eMap(key Key, rv reflect.Value) {
+ rt := rv.Type()
+ if rt.Key().Kind() != reflect.String {
+ encPanic(errNonString)
+ }
+
+ // Sort keys so that we have deterministic output. And write keys directly
+ // underneath this key first, before writing sub-structs or sub-maps.
+ var mapKeysDirect, mapKeysSub []string
+ for _, mapKey := range rv.MapKeys() {
+ k := mapKey.String()
+ if typeIsHash(tomlTypeOfGo(rv.MapIndex(mapKey))) {
+ mapKeysSub = append(mapKeysSub, k)
+ } else {
+ mapKeysDirect = append(mapKeysDirect, k)
+ }
+ }
+
+ var writeMapKeys = func(mapKeys []string) {
+ sort.Strings(mapKeys)
+ for _, mapKey := range mapKeys {
+ mrv := rv.MapIndex(reflect.ValueOf(mapKey))
+ if isNil(mrv) {
+ // Don't write anything for nil fields.
+ continue
+ }
+ enc.encode(key.add(mapKey), mrv)
+ }
+ }
+ writeMapKeys(mapKeysDirect)
+ writeMapKeys(mapKeysSub)
+}
+
+func (enc *Encoder) eStruct(key Key, rv reflect.Value) {
+ // Write keys for fields directly under this key first, because if we write
+ // a field that creates a new table, then all keys under it will be in that
+ // table (not the one we're writing here).
+ rt := rv.Type()
+ var fieldsDirect, fieldsSub [][]int
+ var addFields func(rt reflect.Type, rv reflect.Value, start []int)
+ addFields = func(rt reflect.Type, rv reflect.Value, start []int) {
+ for i := 0; i < rt.NumField(); i++ {
+ f := rt.Field(i)
+ // skip unexporded fields
+ if f.PkgPath != "" {
+ continue
+ }
+ frv := rv.Field(i)
+ if f.Anonymous {
+ frv := eindirect(frv)
+ t := frv.Type()
+ if t.Kind() != reflect.Struct {
+ encPanic(errAnonNonStruct)
+ }
+ addFields(t, frv, f.Index)
+ } else if typeIsHash(tomlTypeOfGo(frv)) {
+ fieldsSub = append(fieldsSub, append(start, f.Index...))
+ } else {
+ fieldsDirect = append(fieldsDirect, append(start, f.Index...))
+ }
+ }
+ }
+ addFields(rt, rv, nil)
+
+ var writeFields = func(fields [][]int) {
+ for _, fieldIndex := range fields {
+ sft := rt.FieldByIndex(fieldIndex)
+ sf := rv.FieldByIndex(fieldIndex)
+ if isNil(sf) {
+ // Don't write anything for nil fields.
+ continue
+ }
+
+ keyName := sft.Tag.Get("toml")
+ if keyName == "-" {
+ continue
+ }
+ if keyName == "" {
+ keyName = sft.Name
+ }
+
+ keyName, opts := getOptions(keyName)
+ if _, ok := opts["omitempty"]; ok && isEmpty(sf) {
+ continue
+ } else if _, ok := opts["omitzero"]; ok && isZero(sf) {
+ continue
+ }
+
+ enc.encode(key.add(keyName), sf)
+ }
+ }
+ writeFields(fieldsDirect)
+ writeFields(fieldsSub)
+}
+
+// tomlTypeName returns the TOML type name of the Go value's type. It is
+// used to determine whether the types of array elements are mixed (which is
+// forbidden). If the Go value is nil, then it is illegal for it to be an array
+// element, and valueIsNil is returned as true.
+
+// Returns the TOML type of a Go value. The type may be `nil`, which means
+// no concrete TOML type could be found.
+func tomlTypeOfGo(rv reflect.Value) tomlType {
+ if isNil(rv) || !rv.IsValid() {
+ return nil
+ }
+ switch rv.Kind() {
+ case reflect.Bool:
+ return tomlBool
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32,
+ reflect.Int64,
+ reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32,
+ reflect.Uint64:
+ return tomlInteger
+ case reflect.Float32, reflect.Float64:
+ return tomlFloat
+ case reflect.Array, reflect.Slice:
+ if typeEqual(tomlHash, tomlArrayType(rv)) {
+ return tomlArrayHash
+ } else {
+ return tomlArray
+ }
+ case reflect.Ptr, reflect.Interface:
+ return tomlTypeOfGo(rv.Elem())
+ case reflect.String:
+ return tomlString
+ case reflect.Map:
+ return tomlHash
+ case reflect.Struct:
+ switch rv.Interface().(type) {
+ case time.Time:
+ return tomlDatetime
+ case TextMarshaler:
+ return tomlString
+ default:
+ return tomlHash
+ }
+ default:
+ panic("unexpected reflect.Kind: " + rv.Kind().String())
+ }
+}
+
+// tomlArrayType returns the element type of a TOML array. The type returned
+// may be nil if it cannot be determined (e.g., a nil slice or a zero length
+// slize). This function may also panic if it finds a type that cannot be
+// expressed in TOML (such as nil elements, heterogeneous arrays or directly
+// nested arrays of tables).
+func tomlArrayType(rv reflect.Value) tomlType {
+ if isNil(rv) || !rv.IsValid() || rv.Len() == 0 {
+ return nil
+ }
+ firstType := tomlTypeOfGo(rv.Index(0))
+ if firstType == nil {
+ encPanic(errArrayNilElement)
+ }
+
+ rvlen := rv.Len()
+ for i := 1; i < rvlen; i++ {
+ elem := rv.Index(i)
+ switch elemType := tomlTypeOfGo(elem); {
+ case elemType == nil:
+ encPanic(errArrayNilElement)
+ case !typeEqual(firstType, elemType):
+ encPanic(errArrayMixedElementTypes)
+ }
+ }
+ // If we have a nested array, then we must make sure that the nested
+ // array contains ONLY primitives.
+ // This checks arbitrarily nested arrays.
+ if typeEqual(firstType, tomlArray) || typeEqual(firstType, tomlArrayHash) {
+ nest := tomlArrayType(eindirect(rv.Index(0)))
+ if typeEqual(nest, tomlHash) || typeEqual(nest, tomlArrayHash) {
+ encPanic(errArrayNoTable)
+ }
+ }
+ return firstType
+}
+
+func getOptions(keyName string) (string, map[string]struct{}) {
+ opts := make(map[string]struct{})
+ ss := strings.Split(keyName, ",")
+ name := ss[0]
+ if len(ss) > 1 {
+ for _, opt := range ss {
+ opts[opt] = struct{}{}
+ }
+ }
+
+ return name, opts
+}
+
+func isZero(rv reflect.Value) bool {
+ switch rv.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ if rv.Int() == 0 {
+ return true
+ }
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ if rv.Uint() == 0 {
+ return true
+ }
+ case reflect.Float32, reflect.Float64:
+ if rv.Float() == 0.0 {
+ return true
+ }
+ }
+
+ return false
+}
+
+func isEmpty(rv reflect.Value) bool {
+ switch rv.Kind() {
+ case reflect.String:
+ if len(strings.TrimSpace(rv.String())) == 0 {
+ return true
+ }
+ case reflect.Array, reflect.Slice, reflect.Map:
+ if rv.Len() == 0 {
+ return true
+ }
+ }
+
+ return false
+}
+
+func (enc *Encoder) newline() {
+ if enc.hasWritten {
+ enc.wf("\n")
+ }
+}
+
+func (enc *Encoder) keyEqElement(key Key, val reflect.Value) {
+ if len(key) == 0 {
+ encPanic(errNoKey)
+ }
+ panicIfInvalidKey(key)
+ enc.wf("%s%s = ", enc.indentStr(key), key.maybeQuoted(len(key)-1))
+ enc.eElement(val)
+ enc.newline()
+}
+
+func (enc *Encoder) wf(format string, v ...interface{}) {
+ if _, err := fmt.Fprintf(enc.w, format, v...); err != nil {
+ encPanic(err)
+ }
+ enc.hasWritten = true
+}
+
+func (enc *Encoder) indentStr(key Key) string {
+ return strings.Repeat(enc.Indent, len(key)-1)
+}
+
+func encPanic(err error) {
+ panic(tomlEncodeError{err})
+}
+
+func eindirect(v reflect.Value) reflect.Value {
+ switch v.Kind() {
+ case reflect.Ptr, reflect.Interface:
+ return eindirect(v.Elem())
+ default:
+ return v
+ }
+}
+
+func isNil(rv reflect.Value) bool {
+ switch rv.Kind() {
+ case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
+ return rv.IsNil()
+ default:
+ return false
+ }
+}
+
+func panicIfInvalidKey(key Key) {
+ for _, k := range key {
+ if len(k) == 0 {
+ encPanic(e("Key '%s' is not a valid table name. Key names "+
+ "cannot be empty.", key.maybeQuotedAll()))
+ }
+ }
+}
+
+func isValidKeyName(s string) bool {
+ return len(s) != 0
+}
diff --git a/vendor/github.com/BurntSushi/toml/encoding_types.go b/vendor/github.com/BurntSushi/toml/encoding_types.go
new file mode 100644
index 0000000000..d36e1dd600
--- /dev/null
+++ b/vendor/github.com/BurntSushi/toml/encoding_types.go
@@ -0,0 +1,19 @@
+// +build go1.2
+
+package toml
+
+// In order to support Go 1.1, we define our own TextMarshaler and
+// TextUnmarshaler types. For Go 1.2+, we just alias them with the
+// standard library interfaces.
+
+import (
+ "encoding"
+)
+
+// TextMarshaler is a synonym for encoding.TextMarshaler. It is defined here
+// so that Go 1.1 can be supported.
+type TextMarshaler encoding.TextMarshaler
+
+// TextUnmarshaler is a synonym for encoding.TextUnmarshaler. It is defined
+// here so that Go 1.1 can be supported.
+type TextUnmarshaler encoding.TextUnmarshaler
diff --git a/vendor/github.com/BurntSushi/toml/encoding_types_1.1.go b/vendor/github.com/BurntSushi/toml/encoding_types_1.1.go
new file mode 100644
index 0000000000..e8d503d046
--- /dev/null
+++ b/vendor/github.com/BurntSushi/toml/encoding_types_1.1.go
@@ -0,0 +1,18 @@
+// +build !go1.2
+
+package toml
+
+// These interfaces were introduced in Go 1.2, so we add them manually when
+// compiling for Go 1.1.
+
+// TextMarshaler is a synonym for encoding.TextMarshaler. It is defined here
+// so that Go 1.1 can be supported.
+type TextMarshaler interface {
+ MarshalText() (text []byte, err error)
+}
+
+// TextUnmarshaler is a synonym for encoding.TextUnmarshaler. It is defined
+// here so that Go 1.1 can be supported.
+type TextUnmarshaler interface {
+ UnmarshalText(text []byte) error
+}
diff --git a/vendor/github.com/BurntSushi/toml/lex.go b/vendor/github.com/BurntSushi/toml/lex.go
new file mode 100644
index 0000000000..219122857e
--- /dev/null
+++ b/vendor/github.com/BurntSushi/toml/lex.go
@@ -0,0 +1,874 @@
+package toml
+
+import (
+ "fmt"
+ "strings"
+ "unicode/utf8"
+)
+
+type itemType int
+
+const (
+ itemError itemType = iota
+ itemNIL // used in the parser to indicate no type
+ itemEOF
+ itemText
+ itemString
+ itemRawString
+ itemMultilineString
+ itemRawMultilineString
+ itemBool
+ itemInteger
+ itemFloat
+ itemDatetime
+ itemArray // the start of an array
+ itemArrayEnd
+ itemTableStart
+ itemTableEnd
+ itemArrayTableStart
+ itemArrayTableEnd
+ itemKeyStart
+ itemCommentStart
+)
+
+const (
+ eof = 0
+ tableStart = '['
+ tableEnd = ']'
+ arrayTableStart = '['
+ arrayTableEnd = ']'
+ tableSep = '.'
+ keySep = '='
+ arrayStart = '['
+ arrayEnd = ']'
+ arrayValTerm = ','
+ commentStart = '#'
+ stringStart = '"'
+ stringEnd = '"'
+ rawStringStart = '\''
+ rawStringEnd = '\''
+)
+
+type stateFn func(lx *lexer) stateFn
+
+type lexer struct {
+ input string
+ start int
+ pos int
+ width int
+ line int
+ state stateFn
+ items chan item
+
+ // A stack of state functions used to maintain context.
+ // The idea is to reuse parts of the state machine in various places.
+ // For example, values can appear at the top level or within arbitrarily
+ // nested arrays. The last state on the stack is used after a value has
+ // been lexed. Similarly for comments.
+ stack []stateFn
+}
+
+type item struct {
+ typ itemType
+ val string
+ line int
+}
+
+func (lx *lexer) nextItem() item {
+ for {
+ select {
+ case item := <-lx.items:
+ return item
+ default:
+ lx.state = lx.state(lx)
+ }
+ }
+}
+
+func lex(input string) *lexer {
+ lx := &lexer{
+ input: input + "\n",
+ state: lexTop,
+ line: 1,
+ items: make(chan item, 10),
+ stack: make([]stateFn, 0, 10),
+ }
+ return lx
+}
+
+func (lx *lexer) push(state stateFn) {
+ lx.stack = append(lx.stack, state)
+}
+
+func (lx *lexer) pop() stateFn {
+ if len(lx.stack) == 0 {
+ return lx.errorf("BUG in lexer: no states to pop.")
+ }
+ last := lx.stack[len(lx.stack)-1]
+ lx.stack = lx.stack[0 : len(lx.stack)-1]
+ return last
+}
+
+func (lx *lexer) current() string {
+ return lx.input[lx.start:lx.pos]
+}
+
+func (lx *lexer) emit(typ itemType) {
+ lx.items <- item{typ, lx.current(), lx.line}
+ lx.start = lx.pos
+}
+
+func (lx *lexer) emitTrim(typ itemType) {
+ lx.items <- item{typ, strings.TrimSpace(lx.current()), lx.line}
+ lx.start = lx.pos
+}
+
+func (lx *lexer) next() (r rune) {
+ if lx.pos >= len(lx.input) {
+ lx.width = 0
+ return eof
+ }
+
+ if lx.input[lx.pos] == '\n' {
+ lx.line++
+ }
+ r, lx.width = utf8.DecodeRuneInString(lx.input[lx.pos:])
+ lx.pos += lx.width
+ return r
+}
+
+// ignore skips over the pending input before this point.
+func (lx *lexer) ignore() {
+ lx.start = lx.pos
+}
+
+// backup steps back one rune. Can be called only once per call of next.
+func (lx *lexer) backup() {
+ lx.pos -= lx.width
+ if lx.pos < len(lx.input) && lx.input[lx.pos] == '\n' {
+ lx.line--
+ }
+}
+
+// accept consumes the next rune if it's equal to `valid`.
+func (lx *lexer) accept(valid rune) bool {
+ if lx.next() == valid {
+ return true
+ }
+ lx.backup()
+ return false
+}
+
+// peek returns but does not consume the next rune in the input.
+func (lx *lexer) peek() rune {
+ r := lx.next()
+ lx.backup()
+ return r
+}
+
+// errorf stops all lexing by emitting an error and returning `nil`.
+// Note that any value that is a character is escaped if it's a special
+// character (new lines, tabs, etc.).
+func (lx *lexer) errorf(format string, values ...interface{}) stateFn {
+ lx.items <- item{
+ itemError,
+ fmt.Sprintf(format, values...),
+ lx.line,
+ }
+ return nil
+}
+
+// lexTop consumes elements at the top level of TOML data.
+func lexTop(lx *lexer) stateFn {
+ r := lx.next()
+ if isWhitespace(r) || isNL(r) {
+ return lexSkip(lx, lexTop)
+ }
+
+ switch r {
+ case commentStart:
+ lx.push(lexTop)
+ return lexCommentStart
+ case tableStart:
+ return lexTableStart
+ case eof:
+ if lx.pos > lx.start {
+ return lx.errorf("Unexpected EOF.")
+ }
+ lx.emit(itemEOF)
+ return nil
+ }
+
+ // At this point, the only valid item can be a key, so we back up
+ // and let the key lexer do the rest.
+ lx.backup()
+ lx.push(lexTopEnd)
+ return lexKeyStart
+}
+
+// lexTopEnd is entered whenever a top-level item has been consumed. (A value
+// or a table.) It must see only whitespace, and will turn back to lexTop
+// upon a new line. If it sees EOF, it will quit the lexer successfully.
+func lexTopEnd(lx *lexer) stateFn {
+ r := lx.next()
+ switch {
+ case r == commentStart:
+ // a comment will read to a new line for us.
+ lx.push(lexTop)
+ return lexCommentStart
+ case isWhitespace(r):
+ return lexTopEnd
+ case isNL(r):
+ lx.ignore()
+ return lexTop
+ case r == eof:
+ lx.ignore()
+ return lexTop
+ }
+ return lx.errorf("Expected a top-level item to end with a new line, "+
+ "comment or EOF, but got %q instead.", r)
+}
+
+// lexTable lexes the beginning of a table. Namely, it makes sure that
+// it starts with a character other than '.' and ']'.
+// It assumes that '[' has already been consumed.
+// It also handles the case that this is an item in an array of tables.
+// e.g., '[[name]]'.
+func lexTableStart(lx *lexer) stateFn {
+ if lx.peek() == arrayTableStart {
+ lx.next()
+ lx.emit(itemArrayTableStart)
+ lx.push(lexArrayTableEnd)
+ } else {
+ lx.emit(itemTableStart)
+ lx.push(lexTableEnd)
+ }
+ return lexTableNameStart
+}
+
+func lexTableEnd(lx *lexer) stateFn {
+ lx.emit(itemTableEnd)
+ return lexTopEnd
+}
+
+func lexArrayTableEnd(lx *lexer) stateFn {
+ if r := lx.next(); r != arrayTableEnd {
+ return lx.errorf("Expected end of table array name delimiter %q, "+
+ "but got %q instead.", arrayTableEnd, r)
+ }
+ lx.emit(itemArrayTableEnd)
+ return lexTopEnd
+}
+
+func lexTableNameStart(lx *lexer) stateFn {
+ switch r := lx.peek(); {
+ case r == tableEnd || r == eof:
+ return lx.errorf("Unexpected end of table name. (Table names cannot " +
+ "be empty.)")
+ case r == tableSep:
+ return lx.errorf("Unexpected table separator. (Table names cannot " +
+ "be empty.)")
+ case r == stringStart || r == rawStringStart:
+ lx.ignore()
+ lx.push(lexTableNameEnd)
+ return lexValue // reuse string lexing
+ case isWhitespace(r):
+ return lexTableNameStart
+ default:
+ return lexBareTableName
+ }
+}
+
+// lexTableName lexes the name of a table. It assumes that at least one
+// valid character for the table has already been read.
+func lexBareTableName(lx *lexer) stateFn {
+ switch r := lx.next(); {
+ case isBareKeyChar(r):
+ return lexBareTableName
+ case r == tableSep || r == tableEnd:
+ lx.backup()
+ lx.emitTrim(itemText)
+ return lexTableNameEnd
+ default:
+ return lx.errorf("Bare keys cannot contain %q.", r)
+ }
+}
+
+// lexTableNameEnd reads the end of a piece of a table name, optionally
+// consuming whitespace.
+func lexTableNameEnd(lx *lexer) stateFn {
+ switch r := lx.next(); {
+ case isWhitespace(r):
+ return lexTableNameEnd
+ case r == tableSep:
+ lx.ignore()
+ return lexTableNameStart
+ case r == tableEnd:
+ return lx.pop()
+ default:
+ return lx.errorf("Expected '.' or ']' to end table name, but got %q "+
+ "instead.", r)
+ }
+}
+
+// lexKeyStart consumes a key name up until the first non-whitespace character.
+// lexKeyStart will ignore whitespace.
+func lexKeyStart(lx *lexer) stateFn {
+ r := lx.peek()
+ switch {
+ case r == keySep:
+ return lx.errorf("Unexpected key separator %q.", keySep)
+ case isWhitespace(r) || isNL(r):
+ lx.next()
+ return lexSkip(lx, lexKeyStart)
+ case r == stringStart || r == rawStringStart:
+ lx.ignore()
+ lx.emit(itemKeyStart)
+ lx.push(lexKeyEnd)
+ return lexValue // reuse string lexing
+ default:
+ lx.ignore()
+ lx.emit(itemKeyStart)
+ return lexBareKey
+ }
+}
+
+// lexBareKey consumes the text of a bare key. Assumes that the first character
+// (which is not whitespace) has not yet been consumed.
+func lexBareKey(lx *lexer) stateFn {
+ switch r := lx.next(); {
+ case isBareKeyChar(r):
+ return lexBareKey
+ case isWhitespace(r):
+ lx.emitTrim(itemText)
+ return lexKeyEnd
+ case r == keySep:
+ lx.backup()
+ lx.emitTrim(itemText)
+ return lexKeyEnd
+ default:
+ return lx.errorf("Bare keys cannot contain %q.", r)
+ }
+}
+
+// lexKeyEnd consumes the end of a key and trims whitespace (up to the key
+// separator).
+func lexKeyEnd(lx *lexer) stateFn {
+ switch r := lx.next(); {
+ case r == keySep:
+ return lexSkip(lx, lexValue)
+ case isWhitespace(r):
+ return lexSkip(lx, lexKeyEnd)
+ default:
+ return lx.errorf("Expected key separator %q, but got %q instead.",
+ keySep, r)
+ }
+}
+
+// lexValue starts the consumption of a value anywhere a value is expected.
+// lexValue will ignore whitespace.
+// After a value is lexed, the last state on the next is popped and returned.
+func lexValue(lx *lexer) stateFn {
+ // We allow whitespace to precede a value, but NOT new lines.
+ // In array syntax, the array states are responsible for ignoring new
+ // lines.
+ r := lx.next()
+ if isWhitespace(r) {
+ return lexSkip(lx, lexValue)
+ }
+
+ switch {
+ case r == arrayStart:
+ lx.ignore()
+ lx.emit(itemArray)
+ return lexArrayValue
+ case r == stringStart:
+ if lx.accept(stringStart) {
+ if lx.accept(stringStart) {
+ lx.ignore() // Ignore """
+ return lexMultilineString
+ }
+ lx.backup()
+ }
+ lx.ignore() // ignore the '"'
+ return lexString
+ case r == rawStringStart:
+ if lx.accept(rawStringStart) {
+ if lx.accept(rawStringStart) {
+ lx.ignore() // Ignore """
+ return lexMultilineRawString
+ }
+ lx.backup()
+ }
+ lx.ignore() // ignore the "'"
+ return lexRawString
+ case r == 't':
+ return lexTrue
+ case r == 'f':
+ return lexFalse
+ case r == '-':
+ return lexNumberStart
+ case isDigit(r):
+ lx.backup() // avoid an extra state and use the same as above
+ return lexNumberOrDateStart
+ case r == '.': // special error case, be kind to users
+ return lx.errorf("Floats must start with a digit, not '.'.")
+ }
+ return lx.errorf("Expected value but found %q instead.", r)
+}
+
+// lexArrayValue consumes one value in an array. It assumes that '[' or ','
+// have already been consumed. All whitespace and new lines are ignored.
+func lexArrayValue(lx *lexer) stateFn {
+ r := lx.next()
+ switch {
+ case isWhitespace(r) || isNL(r):
+ return lexSkip(lx, lexArrayValue)
+ case r == commentStart:
+ lx.push(lexArrayValue)
+ return lexCommentStart
+ case r == arrayValTerm:
+ return lx.errorf("Unexpected array value terminator %q.",
+ arrayValTerm)
+ case r == arrayEnd:
+ return lexArrayEnd
+ }
+
+ lx.backup()
+ lx.push(lexArrayValueEnd)
+ return lexValue
+}
+
+// lexArrayValueEnd consumes the cruft between values of an array. Namely,
+// it ignores whitespace and expects either a ',' or a ']'.
+func lexArrayValueEnd(lx *lexer) stateFn {
+ r := lx.next()
+ switch {
+ case isWhitespace(r) || isNL(r):
+ return lexSkip(lx, lexArrayValueEnd)
+ case r == commentStart:
+ lx.push(lexArrayValueEnd)
+ return lexCommentStart
+ case r == arrayValTerm:
+ lx.ignore()
+ return lexArrayValue // move on to the next value
+ case r == arrayEnd:
+ return lexArrayEnd
+ }
+ return lx.errorf("Expected an array value terminator %q or an array "+
+ "terminator %q, but got %q instead.", arrayValTerm, arrayEnd, r)
+}
+
+// lexArrayEnd finishes the lexing of an array. It assumes that a ']' has
+// just been consumed.
+func lexArrayEnd(lx *lexer) stateFn {
+ lx.ignore()
+ lx.emit(itemArrayEnd)
+ return lx.pop()
+}
+
+// lexString consumes the inner contents of a string. It assumes that the
+// beginning '"' has already been consumed and ignored.
+func lexString(lx *lexer) stateFn {
+ r := lx.next()
+ switch {
+ case isNL(r):
+ return lx.errorf("Strings cannot contain new lines.")
+ case r == '\\':
+ lx.push(lexString)
+ return lexStringEscape
+ case r == stringEnd:
+ lx.backup()
+ lx.emit(itemString)
+ lx.next()
+ lx.ignore()
+ return lx.pop()
+ }
+ return lexString
+}
+
+// lexMultilineString consumes the inner contents of a string. It assumes that
+// the beginning '"""' has already been consumed and ignored.
+func lexMultilineString(lx *lexer) stateFn {
+ r := lx.next()
+ switch {
+ case r == '\\':
+ return lexMultilineStringEscape
+ case r == stringEnd:
+ if lx.accept(stringEnd) {
+ if lx.accept(stringEnd) {
+ lx.backup()
+ lx.backup()
+ lx.backup()
+ lx.emit(itemMultilineString)
+ lx.next()
+ lx.next()
+ lx.next()
+ lx.ignore()
+ return lx.pop()
+ }
+ lx.backup()
+ }
+ }
+ return lexMultilineString
+}
+
+// lexRawString consumes a raw string. Nothing can be escaped in such a string.
+// It assumes that the beginning "'" has already been consumed and ignored.
+func lexRawString(lx *lexer) stateFn {
+ r := lx.next()
+ switch {
+ case isNL(r):
+ return lx.errorf("Strings cannot contain new lines.")
+ case r == rawStringEnd:
+ lx.backup()
+ lx.emit(itemRawString)
+ lx.next()
+ lx.ignore()
+ return lx.pop()
+ }
+ return lexRawString
+}
+
+// lexMultilineRawString consumes a raw string. Nothing can be escaped in such
+// a string. It assumes that the beginning "'" has already been consumed and
+// ignored.
+func lexMultilineRawString(lx *lexer) stateFn {
+ r := lx.next()
+ switch {
+ case r == rawStringEnd:
+ if lx.accept(rawStringEnd) {
+ if lx.accept(rawStringEnd) {
+ lx.backup()
+ lx.backup()
+ lx.backup()
+ lx.emit(itemRawMultilineString)
+ lx.next()
+ lx.next()
+ lx.next()
+ lx.ignore()
+ return lx.pop()
+ }
+ lx.backup()
+ }
+ }
+ return lexMultilineRawString
+}
+
+// lexMultilineStringEscape consumes an escaped character. It assumes that the
+// preceding '\\' has already been consumed.
+func lexMultilineStringEscape(lx *lexer) stateFn {
+ // Handle the special case first:
+ if isNL(lx.next()) {
+ lx.next()
+ return lexMultilineString
+ } else {
+ lx.backup()
+ lx.push(lexMultilineString)
+ return lexStringEscape(lx)
+ }
+}
+
+func lexStringEscape(lx *lexer) stateFn {
+ r := lx.next()
+ switch r {
+ case 'b':
+ fallthrough
+ case 't':
+ fallthrough
+ case 'n':
+ fallthrough
+ case 'f':
+ fallthrough
+ case 'r':
+ fallthrough
+ case '"':
+ fallthrough
+ case '\\':
+ return lx.pop()
+ case 'u':
+ return lexShortUnicodeEscape
+ case 'U':
+ return lexLongUnicodeEscape
+ }
+ return lx.errorf("Invalid escape character %q. Only the following "+
+ "escape characters are allowed: "+
+ "\\b, \\t, \\n, \\f, \\r, \\\", \\/, \\\\, "+
+ "\\uXXXX and \\UXXXXXXXX.", r)
+}
+
+func lexShortUnicodeEscape(lx *lexer) stateFn {
+ var r rune
+ for i := 0; i < 4; i++ {
+ r = lx.next()
+ if !isHexadecimal(r) {
+ return lx.errorf("Expected four hexadecimal digits after '\\u', "+
+ "but got '%s' instead.", lx.current())
+ }
+ }
+ return lx.pop()
+}
+
+func lexLongUnicodeEscape(lx *lexer) stateFn {
+ var r rune
+ for i := 0; i < 8; i++ {
+ r = lx.next()
+ if !isHexadecimal(r) {
+ return lx.errorf("Expected eight hexadecimal digits after '\\U', "+
+ "but got '%s' instead.", lx.current())
+ }
+ }
+ return lx.pop()
+}
+
+// lexNumberOrDateStart consumes either a (positive) integer, float or
+// datetime. It assumes that NO negative sign has been consumed.
+func lexNumberOrDateStart(lx *lexer) stateFn {
+ r := lx.next()
+ if !isDigit(r) {
+ if r == '.' {
+ return lx.errorf("Floats must start with a digit, not '.'.")
+ } else {
+ return lx.errorf("Expected a digit but got %q.", r)
+ }
+ }
+ return lexNumberOrDate
+}
+
+// lexNumberOrDate consumes either a (positive) integer, float or datetime.
+func lexNumberOrDate(lx *lexer) stateFn {
+ r := lx.next()
+ switch {
+ case r == '-':
+ if lx.pos-lx.start != 5 {
+ return lx.errorf("All ISO8601 dates must be in full Zulu form.")
+ }
+ return lexDateAfterYear
+ case isDigit(r):
+ return lexNumberOrDate
+ case r == '.':
+ return lexFloatStart
+ }
+
+ lx.backup()
+ lx.emit(itemInteger)
+ return lx.pop()
+}
+
+// lexDateAfterYear consumes a full Zulu Datetime in ISO8601 format.
+// It assumes that "YYYY-" has already been consumed.
+func lexDateAfterYear(lx *lexer) stateFn {
+ formats := []rune{
+ // digits are '0'.
+ // everything else is direct equality.
+ '0', '0', '-', '0', '0',
+ 'T',
+ '0', '0', ':', '0', '0', ':', '0', '0',
+ 'Z',
+ }
+ for _, f := range formats {
+ r := lx.next()
+ if f == '0' {
+ if !isDigit(r) {
+ return lx.errorf("Expected digit in ISO8601 datetime, "+
+ "but found %q instead.", r)
+ }
+ } else if f != r {
+ return lx.errorf("Expected %q in ISO8601 datetime, "+
+ "but found %q instead.", f, r)
+ }
+ }
+ lx.emit(itemDatetime)
+ return lx.pop()
+}
+
+// lexNumberStart consumes either an integer or a float. It assumes that
+// a negative sign has already been read, but that *no* digits have been
+// consumed. lexNumberStart will move to the appropriate integer or float
+// states.
+func lexNumberStart(lx *lexer) stateFn {
+ // we MUST see a digit. Even floats have to start with a digit.
+ r := lx.next()
+ if !isDigit(r) {
+ if r == '.' {
+ return lx.errorf("Floats must start with a digit, not '.'.")
+ } else {
+ return lx.errorf("Expected a digit but got %q.", r)
+ }
+ }
+ return lexNumber
+}
+
+// lexNumber consumes an integer or a float after seeing the first digit.
+func lexNumber(lx *lexer) stateFn {
+ r := lx.next()
+ switch {
+ case isDigit(r):
+ return lexNumber
+ case r == '.':
+ return lexFloatStart
+ }
+
+ lx.backup()
+ lx.emit(itemInteger)
+ return lx.pop()
+}
+
+// lexFloatStart starts the consumption of digits of a float after a '.'.
+// Namely, at least one digit is required.
+func lexFloatStart(lx *lexer) stateFn {
+ r := lx.next()
+ if !isDigit(r) {
+ return lx.errorf("Floats must have a digit after the '.', but got "+
+ "%q instead.", r)
+ }
+ return lexFloat
+}
+
+// lexFloat consumes the digits of a float after a '.'.
+// Assumes that one digit has been consumed after a '.' already.
+func lexFloat(lx *lexer) stateFn {
+ r := lx.next()
+ if isDigit(r) {
+ return lexFloat
+ }
+
+ lx.backup()
+ lx.emit(itemFloat)
+ return lx.pop()
+}
+
+// lexConst consumes the s[1:] in s. It assumes that s[0] has already been
+// consumed.
+func lexConst(lx *lexer, s string) stateFn {
+ for i := range s[1:] {
+ if r := lx.next(); r != rune(s[i+1]) {
+ return lx.errorf("Expected %q, but found %q instead.", s[:i+1],
+ s[:i]+string(r))
+ }
+ }
+ return nil
+}
+
+// lexTrue consumes the "rue" in "true". It assumes that 't' has already
+// been consumed.
+func lexTrue(lx *lexer) stateFn {
+ if fn := lexConst(lx, "true"); fn != nil {
+ return fn
+ }
+ lx.emit(itemBool)
+ return lx.pop()
+}
+
+// lexFalse consumes the "alse" in "false". It assumes that 'f' has already
+// been consumed.
+func lexFalse(lx *lexer) stateFn {
+ if fn := lexConst(lx, "false"); fn != nil {
+ return fn
+ }
+ lx.emit(itemBool)
+ return lx.pop()
+}
+
+// lexCommentStart begins the lexing of a comment. It will emit
+// itemCommentStart and consume no characters, passing control to lexComment.
+func lexCommentStart(lx *lexer) stateFn {
+ lx.ignore()
+ lx.emit(itemCommentStart)
+ return lexComment
+}
+
+// lexComment lexes an entire comment. It assumes that '#' has been consumed.
+// It will consume *up to* the first new line character, and pass control
+// back to the last state on the stack.
+func lexComment(lx *lexer) stateFn {
+ r := lx.peek()
+ if isNL(r) || r == eof {
+ lx.emit(itemText)
+ return lx.pop()
+ }
+ lx.next()
+ return lexComment
+}
+
+// lexSkip ignores all slurped input and moves on to the next state.
+func lexSkip(lx *lexer, nextState stateFn) stateFn {
+ return func(lx *lexer) stateFn {
+ lx.ignore()
+ return nextState
+ }
+}
+
+// isWhitespace returns true if `r` is a whitespace character according
+// to the spec.
+func isWhitespace(r rune) bool {
+ return r == '\t' || r == ' '
+}
+
+func isNL(r rune) bool {
+ return r == '\n' || r == '\r'
+}
+
+func isDigit(r rune) bool {
+ return r >= '0' && r <= '9'
+}
+
+func isHexadecimal(r rune) bool {
+ return (r >= '0' && r <= '9') ||
+ (r >= 'a' && r <= 'f') ||
+ (r >= 'A' && r <= 'F')
+}
+
+func isBareKeyChar(r rune) bool {
+ return (r >= 'A' && r <= 'Z') ||
+ (r >= 'a' && r <= 'z') ||
+ (r >= '0' && r <= '9') ||
+ r == '_' ||
+ r == '-'
+}
+
+func (itype itemType) String() string {
+ switch itype {
+ case itemError:
+ return "Error"
+ case itemNIL:
+ return "NIL"
+ case itemEOF:
+ return "EOF"
+ case itemText:
+ return "Text"
+ case itemString:
+ return "String"
+ case itemRawString:
+ return "String"
+ case itemMultilineString:
+ return "String"
+ case itemRawMultilineString:
+ return "String"
+ case itemBool:
+ return "Bool"
+ case itemInteger:
+ return "Integer"
+ case itemFloat:
+ return "Float"
+ case itemDatetime:
+ return "DateTime"
+ case itemTableStart:
+ return "TableStart"
+ case itemTableEnd:
+ return "TableEnd"
+ case itemKeyStart:
+ return "KeyStart"
+ case itemArray:
+ return "Array"
+ case itemArrayEnd:
+ return "ArrayEnd"
+ case itemCommentStart:
+ return "CommentStart"
+ }
+ panic(fmt.Sprintf("BUG: Unknown type '%d'.", int(itype)))
+}
+
+func (item item) String() string {
+ return fmt.Sprintf("(%s, %s)", item.typ.String(), item.val)
+}
diff --git a/vendor/github.com/BurntSushi/toml/parse.go b/vendor/github.com/BurntSushi/toml/parse.go
new file mode 100644
index 0000000000..c6069be1f1
--- /dev/null
+++ b/vendor/github.com/BurntSushi/toml/parse.go
@@ -0,0 +1,498 @@
+package toml
+
+import (
+ "fmt"
+ "log"
+ "strconv"
+ "strings"
+ "time"
+ "unicode"
+ "unicode/utf8"
+)
+
+type parser struct {
+ mapping map[string]interface{}
+ types map[string]tomlType
+ lx *lexer
+
+ // A list of keys in the order that they appear in the TOML data.
+ ordered []Key
+
+ // the full key for the current hash in scope
+ context Key
+
+ // the base key name for everything except hashes
+ currentKey string
+
+ // rough approximation of line number
+ approxLine int
+
+ // A map of 'key.group.names' to whether they were created implicitly.
+ implicits map[string]bool
+}
+
+type parseError string
+
+func (pe parseError) Error() string {
+ return string(pe)
+}
+
+func parse(data string) (p *parser, err error) {
+ defer func() {
+ if r := recover(); r != nil {
+ var ok bool
+ if err, ok = r.(parseError); ok {
+ return
+ }
+ panic(r)
+ }
+ }()
+
+ p = &parser{
+ mapping: make(map[string]interface{}),
+ types: make(map[string]tomlType),
+ lx: lex(data),
+ ordered: make([]Key, 0),
+ implicits: make(map[string]bool),
+ }
+ for {
+ item := p.next()
+ if item.typ == itemEOF {
+ break
+ }
+ p.topLevel(item)
+ }
+
+ return p, nil
+}
+
+func (p *parser) panicf(format string, v ...interface{}) {
+ msg := fmt.Sprintf("Near line %d (last key parsed '%s'): %s",
+ p.approxLine, p.current(), fmt.Sprintf(format, v...))
+ panic(parseError(msg))
+}
+
+func (p *parser) next() item {
+ it := p.lx.nextItem()
+ if it.typ == itemError {
+ p.panicf("%s", it.val)
+ }
+ return it
+}
+
+func (p *parser) bug(format string, v ...interface{}) {
+ log.Fatalf("BUG: %s\n\n", fmt.Sprintf(format, v...))
+}
+
+func (p *parser) expect(typ itemType) item {
+ it := p.next()
+ p.assertEqual(typ, it.typ)
+ return it
+}
+
+func (p *parser) assertEqual(expected, got itemType) {
+ if expected != got {
+ p.bug("Expected '%s' but got '%s'.", expected, got)
+ }
+}
+
+func (p *parser) topLevel(item item) {
+ switch item.typ {
+ case itemCommentStart:
+ p.approxLine = item.line
+ p.expect(itemText)
+ case itemTableStart:
+ kg := p.next()
+ p.approxLine = kg.line
+
+ var key Key
+ for ; kg.typ != itemTableEnd && kg.typ != itemEOF; kg = p.next() {
+ key = append(key, p.keyString(kg))
+ }
+ p.assertEqual(itemTableEnd, kg.typ)
+
+ p.establishContext(key, false)
+ p.setType("", tomlHash)
+ p.ordered = append(p.ordered, key)
+ case itemArrayTableStart:
+ kg := p.next()
+ p.approxLine = kg.line
+
+ var key Key
+ for ; kg.typ != itemArrayTableEnd && kg.typ != itemEOF; kg = p.next() {
+ key = append(key, p.keyString(kg))
+ }
+ p.assertEqual(itemArrayTableEnd, kg.typ)
+
+ p.establishContext(key, true)
+ p.setType("", tomlArrayHash)
+ p.ordered = append(p.ordered, key)
+ case itemKeyStart:
+ kname := p.next()
+ p.approxLine = kname.line
+ p.currentKey = p.keyString(kname)
+
+ val, typ := p.value(p.next())
+ p.setValue(p.currentKey, val)
+ p.setType(p.currentKey, typ)
+ p.ordered = append(p.ordered, p.context.add(p.currentKey))
+ p.currentKey = ""
+ default:
+ p.bug("Unexpected type at top level: %s", item.typ)
+ }
+}
+
+// Gets a string for a key (or part of a key in a table name).
+func (p *parser) keyString(it item) string {
+ switch it.typ {
+ case itemText:
+ return it.val
+ case itemString, itemMultilineString,
+ itemRawString, itemRawMultilineString:
+ s, _ := p.value(it)
+ return s.(string)
+ default:
+ p.bug("Unexpected key type: %s", it.typ)
+ panic("unreachable")
+ }
+}
+
+// value translates an expected value from the lexer into a Go value wrapped
+// as an empty interface.
+func (p *parser) value(it item) (interface{}, tomlType) {
+ switch it.typ {
+ case itemString:
+ return p.replaceEscapes(it.val), p.typeOfPrimitive(it)
+ case itemMultilineString:
+ trimmed := stripFirstNewline(stripEscapedWhitespace(it.val))
+ return p.replaceEscapes(trimmed), p.typeOfPrimitive(it)
+ case itemRawString:
+ return it.val, p.typeOfPrimitive(it)
+ case itemRawMultilineString:
+ return stripFirstNewline(it.val), p.typeOfPrimitive(it)
+ case itemBool:
+ switch it.val {
+ case "true":
+ return true, p.typeOfPrimitive(it)
+ case "false":
+ return false, p.typeOfPrimitive(it)
+ }
+ p.bug("Expected boolean value, but got '%s'.", it.val)
+ case itemInteger:
+ num, err := strconv.ParseInt(it.val, 10, 64)
+ if err != nil {
+ // See comment below for floats describing why we make a
+ // distinction between a bug and a user error.
+ if e, ok := err.(*strconv.NumError); ok &&
+ e.Err == strconv.ErrRange {
+
+ p.panicf("Integer '%s' is out of the range of 64-bit "+
+ "signed integers.", it.val)
+ } else {
+ p.bug("Expected integer value, but got '%s'.", it.val)
+ }
+ }
+ return num, p.typeOfPrimitive(it)
+ case itemFloat:
+ num, err := strconv.ParseFloat(it.val, 64)
+ if err != nil {
+ // Distinguish float values. Normally, it'd be a bug if the lexer
+ // provides an invalid float, but it's possible that the float is
+ // out of range of valid values (which the lexer cannot determine).
+ // So mark the former as a bug but the latter as a legitimate user
+ // error.
+ //
+ // This is also true for integers.
+ if e, ok := err.(*strconv.NumError); ok &&
+ e.Err == strconv.ErrRange {
+
+ p.panicf("Float '%s' is out of the range of 64-bit "+
+ "IEEE-754 floating-point numbers.", it.val)
+ } else {
+ p.bug("Expected float value, but got '%s'.", it.val)
+ }
+ }
+ return num, p.typeOfPrimitive(it)
+ case itemDatetime:
+ t, err := time.Parse("2006-01-02T15:04:05Z", it.val)
+ if err != nil {
+ p.bug("Expected Zulu formatted DateTime, but got '%s'.", it.val)
+ }
+ return t, p.typeOfPrimitive(it)
+ case itemArray:
+ array := make([]interface{}, 0)
+ types := make([]tomlType, 0)
+
+ for it = p.next(); it.typ != itemArrayEnd; it = p.next() {
+ if it.typ == itemCommentStart {
+ p.expect(itemText)
+ continue
+ }
+
+ val, typ := p.value(it)
+ array = append(array, val)
+ types = append(types, typ)
+ }
+ return array, p.typeOfArray(types)
+ }
+ p.bug("Unexpected value type: %s", it.typ)
+ panic("unreachable")
+}
+
+// establishContext sets the current context of the parser,
+// where the context is either a hash or an array of hashes. Which one is
+// set depends on the value of the `array` parameter.
+//
+// Establishing the context also makes sure that the key isn't a duplicate, and
+// will create implicit hashes automatically.
+func (p *parser) establishContext(key Key, array bool) {
+ var ok bool
+
+ // Always start at the top level and drill down for our context.
+ hashContext := p.mapping
+ keyContext := make(Key, 0)
+
+ // We only need implicit hashes for key[0:-1]
+ for _, k := range key[0 : len(key)-1] {
+ _, ok = hashContext[k]
+ keyContext = append(keyContext, k)
+
+ // No key? Make an implicit hash and move on.
+ if !ok {
+ p.addImplicit(keyContext)
+ hashContext[k] = make(map[string]interface{})
+ }
+
+ // If the hash context is actually an array of tables, then set
+ // the hash context to the last element in that array.
+ //
+ // Otherwise, it better be a table, since this MUST be a key group (by
+ // virtue of it not being the last element in a key).
+ switch t := hashContext[k].(type) {
+ case []map[string]interface{}:
+ hashContext = t[len(t)-1]
+ case map[string]interface{}:
+ hashContext = t
+ default:
+ p.panicf("Key '%s' was already created as a hash.", keyContext)
+ }
+ }
+
+ p.context = keyContext
+ if array {
+ // If this is the first element for this array, then allocate a new
+ // list of tables for it.
+ k := key[len(key)-1]
+ if _, ok := hashContext[k]; !ok {
+ hashContext[k] = make([]map[string]interface{}, 0, 5)
+ }
+
+ // Add a new table. But make sure the key hasn't already been used
+ // for something else.
+ if hash, ok := hashContext[k].([]map[string]interface{}); ok {
+ hashContext[k] = append(hash, make(map[string]interface{}))
+ } else {
+ p.panicf("Key '%s' was already created and cannot be used as "+
+ "an array.", keyContext)
+ }
+ } else {
+ p.setValue(key[len(key)-1], make(map[string]interface{}))
+ }
+ p.context = append(p.context, key[len(key)-1])
+}
+
+// setValue sets the given key to the given value in the current context.
+// It will make sure that the key hasn't already been defined, account for
+// implicit key groups.
+func (p *parser) setValue(key string, value interface{}) {
+ var tmpHash interface{}
+ var ok bool
+
+ hash := p.mapping
+ keyContext := make(Key, 0)
+ for _, k := range p.context {
+ keyContext = append(keyContext, k)
+ if tmpHash, ok = hash[k]; !ok {
+ p.bug("Context for key '%s' has not been established.", keyContext)
+ }
+ switch t := tmpHash.(type) {
+ case []map[string]interface{}:
+ // The context is a table of hashes. Pick the most recent table
+ // defined as the current hash.
+ hash = t[len(t)-1]
+ case map[string]interface{}:
+ hash = t
+ default:
+ p.bug("Expected hash to have type 'map[string]interface{}', but "+
+ "it has '%T' instead.", tmpHash)
+ }
+ }
+ keyContext = append(keyContext, key)
+
+ if _, ok := hash[key]; ok {
+ // Typically, if the given key has already been set, then we have
+ // to raise an error since duplicate keys are disallowed. However,
+ // it's possible that a key was previously defined implicitly. In this
+ // case, it is allowed to be redefined concretely. (See the
+ // `tests/valid/implicit-and-explicit-after.toml` test in `toml-test`.)
+ //
+ // But we have to make sure to stop marking it as an implicit. (So that
+ // another redefinition provokes an error.)
+ //
+ // Note that since it has already been defined (as a hash), we don't
+ // want to overwrite it. So our business is done.
+ if p.isImplicit(keyContext) {
+ p.removeImplicit(keyContext)
+ return
+ }
+
+ // Otherwise, we have a concrete key trying to override a previous
+ // key, which is *always* wrong.
+ p.panicf("Key '%s' has already been defined.", keyContext)
+ }
+ hash[key] = value
+}
+
+// setType sets the type of a particular value at a given key.
+// It should be called immediately AFTER setValue.
+//
+// Note that if `key` is empty, then the type given will be applied to the
+// current context (which is either a table or an array of tables).
+func (p *parser) setType(key string, typ tomlType) {
+ keyContext := make(Key, 0, len(p.context)+1)
+ for _, k := range p.context {
+ keyContext = append(keyContext, k)
+ }
+ if len(key) > 0 { // allow type setting for hashes
+ keyContext = append(keyContext, key)
+ }
+ p.types[keyContext.String()] = typ
+}
+
+// addImplicit sets the given Key as having been created implicitly.
+func (p *parser) addImplicit(key Key) {
+ p.implicits[key.String()] = true
+}
+
+// removeImplicit stops tagging the given key as having been implicitly
+// created.
+func (p *parser) removeImplicit(key Key) {
+ p.implicits[key.String()] = false
+}
+
+// isImplicit returns true if the key group pointed to by the key was created
+// implicitly.
+func (p *parser) isImplicit(key Key) bool {
+ return p.implicits[key.String()]
+}
+
+// current returns the full key name of the current context.
+func (p *parser) current() string {
+ if len(p.currentKey) == 0 {
+ return p.context.String()
+ }
+ if len(p.context) == 0 {
+ return p.currentKey
+ }
+ return fmt.Sprintf("%s.%s", p.context, p.currentKey)
+}
+
+func stripFirstNewline(s string) string {
+ if len(s) == 0 || s[0] != '\n' {
+ return s
+ }
+ return s[1:len(s)]
+}
+
+func stripEscapedWhitespace(s string) string {
+ esc := strings.Split(s, "\\\n")
+ if len(esc) > 1 {
+ for i := 1; i < len(esc); i++ {
+ esc[i] = strings.TrimLeftFunc(esc[i], unicode.IsSpace)
+ }
+ }
+ return strings.Join(esc, "")
+}
+
+func (p *parser) replaceEscapes(str string) string {
+ var replaced []rune
+ s := []byte(str)
+ r := 0
+ for r < len(s) {
+ if s[r] != '\\' {
+ c, size := utf8.DecodeRune(s[r:])
+ r += size
+ replaced = append(replaced, c)
+ continue
+ }
+ r += 1
+ if r >= len(s) {
+ p.bug("Escape sequence at end of string.")
+ return ""
+ }
+ switch s[r] {
+ default:
+ p.bug("Expected valid escape code after \\, but got %q.", s[r])
+ return ""
+ case 'b':
+ replaced = append(replaced, rune(0x0008))
+ r += 1
+ case 't':
+ replaced = append(replaced, rune(0x0009))
+ r += 1
+ case 'n':
+ replaced = append(replaced, rune(0x000A))
+ r += 1
+ case 'f':
+ replaced = append(replaced, rune(0x000C))
+ r += 1
+ case 'r':
+ replaced = append(replaced, rune(0x000D))
+ r += 1
+ case '"':
+ replaced = append(replaced, rune(0x0022))
+ r += 1
+ case '\\':
+ replaced = append(replaced, rune(0x005C))
+ r += 1
+ case 'u':
+ // At this point, we know we have a Unicode escape of the form
+ // `uXXXX` at [r, r+5). (Because the lexer guarantees this
+ // for us.)
+ escaped := p.asciiEscapeToUnicode(s[r+1 : r+5])
+ replaced = append(replaced, escaped)
+ r += 5
+ case 'U':
+ // At this point, we know we have a Unicode escape of the form
+ // `uXXXX` at [r, r+9). (Because the lexer guarantees this
+ // for us.)
+ escaped := p.asciiEscapeToUnicode(s[r+1 : r+9])
+ replaced = append(replaced, escaped)
+ r += 9
+ }
+ }
+ return string(replaced)
+}
+
+func (p *parser) asciiEscapeToUnicode(bs []byte) rune {
+ s := string(bs)
+ hex, err := strconv.ParseUint(strings.ToLower(s), 16, 32)
+ if err != nil {
+ p.bug("Could not parse '%s' as a hexadecimal number, but the "+
+ "lexer claims it's OK: %s", s, err)
+ }
+
+ // BUG(burntsushi)
+ // I honestly don't understand how this works. I can't seem
+ // to find a way to make this fail. I figured this would fail on invalid
+ // UTF-8 characters like U+DCFF, but it doesn't.
+ if !utf8.ValidString(string(rune(hex))) {
+ p.panicf("Escaped character '\\u%s' is not valid UTF-8.", s)
+ }
+ return rune(hex)
+}
+
+func isStringType(ty itemType) bool {
+ return ty == itemString || ty == itemMultilineString ||
+ ty == itemRawString || ty == itemRawMultilineString
+}
diff --git a/vendor/github.com/BurntSushi/toml/session.vim b/vendor/github.com/BurntSushi/toml/session.vim
new file mode 100755
index 0000000000..562164be06
--- /dev/null
+++ b/vendor/github.com/BurntSushi/toml/session.vim
@@ -0,0 +1 @@
+au BufWritePost *.go silent!make tags > /dev/null 2>&1
diff --git a/vendor/github.com/BurntSushi/toml/type_check.go b/vendor/github.com/BurntSushi/toml/type_check.go
new file mode 100644
index 0000000000..c73f8afc1a
--- /dev/null
+++ b/vendor/github.com/BurntSushi/toml/type_check.go
@@ -0,0 +1,91 @@
+package toml
+
+// tomlType represents any Go type that corresponds to a TOML type.
+// While the first draft of the TOML spec has a simplistic type system that
+// probably doesn't need this level of sophistication, we seem to be militating
+// toward adding real composite types.
+type tomlType interface {
+ typeString() string
+}
+
+// typeEqual accepts any two types and returns true if they are equal.
+func typeEqual(t1, t2 tomlType) bool {
+ if t1 == nil || t2 == nil {
+ return false
+ }
+ return t1.typeString() == t2.typeString()
+}
+
+func typeIsHash(t tomlType) bool {
+ return typeEqual(t, tomlHash) || typeEqual(t, tomlArrayHash)
+}
+
+type tomlBaseType string
+
+func (btype tomlBaseType) typeString() string {
+ return string(btype)
+}
+
+func (btype tomlBaseType) String() string {
+ return btype.typeString()
+}
+
+var (
+ tomlInteger tomlBaseType = "Integer"
+ tomlFloat tomlBaseType = "Float"
+ tomlDatetime tomlBaseType = "Datetime"
+ tomlString tomlBaseType = "String"
+ tomlBool tomlBaseType = "Bool"
+ tomlArray tomlBaseType = "Array"
+ tomlHash tomlBaseType = "Hash"
+ tomlArrayHash tomlBaseType = "ArrayHash"
+)
+
+// typeOfPrimitive returns a tomlType of any primitive value in TOML.
+// Primitive values are: Integer, Float, Datetime, String and Bool.
+//
+// Passing a lexer item other than the following will cause a BUG message
+// to occur: itemString, itemBool, itemInteger, itemFloat, itemDatetime.
+func (p *parser) typeOfPrimitive(lexItem item) tomlType {
+ switch lexItem.typ {
+ case itemInteger:
+ return tomlInteger
+ case itemFloat:
+ return tomlFloat
+ case itemDatetime:
+ return tomlDatetime
+ case itemString:
+ return tomlString
+ case itemMultilineString:
+ return tomlString
+ case itemRawString:
+ return tomlString
+ case itemRawMultilineString:
+ return tomlString
+ case itemBool:
+ return tomlBool
+ }
+ p.bug("Cannot infer primitive type of lex item '%s'.", lexItem)
+ panic("unreachable")
+}
+
+// typeOfArray returns a tomlType for an array given a list of types of its
+// values.
+//
+// In the current spec, if an array is homogeneous, then its type is always
+// "Array". If the array is not homogeneous, an error is generated.
+func (p *parser) typeOfArray(types []tomlType) tomlType {
+ // Empty arrays are cool.
+ if len(types) == 0 {
+ return tomlArray
+ }
+
+ theType := types[0]
+ for _, t := range types[1:] {
+ if !typeEqual(theType, t) {
+ p.panicf("Array contains values of type '%s' and '%s', but "+
+ "arrays must be homogeneous.", theType, t)
+ }
+ }
+ return tomlArray
+}
diff --git a/vendor/github.com/BurntSushi/toml/type_fields.go b/vendor/github.com/BurntSushi/toml/type_fields.go
new file mode 100644
index 0000000000..7592f87a45
--- /dev/null
+++ b/vendor/github.com/BurntSushi/toml/type_fields.go
@@ -0,0 +1,241 @@
+package toml
+
+// Struct field handling is adapted from code in encoding/json:
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the Go distribution.
+
+import (
+ "reflect"
+ "sort"
+ "sync"
+)
+
+// A field represents a single field found in a struct.
+type field struct {
+ name string // the name of the field (`toml` tag included)
+ tag bool // whether field has a `toml` tag
+ index []int // represents the depth of an anonymous field
+ typ reflect.Type // the type of the field
+}
+
+// byName sorts field by name, breaking ties with depth,
+// then breaking ties with "name came from toml tag", then
+// breaking ties with index sequence.
+type byName []field
+
+func (x byName) Len() int { return len(x) }
+
+func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+
+func (x byName) Less(i, j int) bool {
+ if x[i].name != x[j].name {
+ return x[i].name < x[j].name
+ }
+ if len(x[i].index) != len(x[j].index) {
+ return len(x[i].index) < len(x[j].index)
+ }
+ if x[i].tag != x[j].tag {
+ return x[i].tag
+ }
+ return byIndex(x).Less(i, j)
+}
+
+// byIndex sorts field by index sequence.
+type byIndex []field
+
+func (x byIndex) Len() int { return len(x) }
+
+func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+
+func (x byIndex) Less(i, j int) bool {
+ for k, xik := range x[i].index {
+ if k >= len(x[j].index) {
+ return false
+ }
+ if xik != x[j].index[k] {
+ return xik < x[j].index[k]
+ }
+ }
+ return len(x[i].index) < len(x[j].index)
+}
+
+// typeFields returns a list of fields that TOML should recognize for the given
+// type. The algorithm is breadth-first search over the set of structs to
+// include - the top struct and then any reachable anonymous structs.
+func typeFields(t reflect.Type) []field {
+ // Anonymous fields to explore at the current level and the next.
+ current := []field{}
+ next := []field{{typ: t}}
+
+ // Count of queued names for current level and the next.
+ count := map[reflect.Type]int{}
+ nextCount := map[reflect.Type]int{}
+
+ // Types already visited at an earlier level.
+ visited := map[reflect.Type]bool{}
+
+ // Fields found.
+ var fields []field
+
+ for len(next) > 0 {
+ current, next = next, current[:0]
+ count, nextCount = nextCount, map[reflect.Type]int{}
+
+ for _, f := range current {
+ if visited[f.typ] {
+ continue
+ }
+ visited[f.typ] = true
+
+ // Scan f.typ for fields to include.
+ for i := 0; i < f.typ.NumField(); i++ {
+ sf := f.typ.Field(i)
+ if sf.PkgPath != "" { // unexported
+ continue
+ }
+ name := sf.Tag.Get("toml")
+ if name == "-" {
+ continue
+ }
+ index := make([]int, len(f.index)+1)
+ copy(index, f.index)
+ index[len(f.index)] = i
+
+ ft := sf.Type
+ if ft.Name() == "" && ft.Kind() == reflect.Ptr {
+ // Follow pointer.
+ ft = ft.Elem()
+ }
+
+ // Record found field and index sequence.
+ if name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct {
+ tagged := name != ""
+ if name == "" {
+ name = sf.Name
+ }
+ fields = append(fields, field{name, tagged, index, ft})
+ if count[f.typ] > 1 {
+ // If there were multiple instances, add a second,
+ // so that the annihilation code will see a duplicate.
+ // It only cares about the distinction between 1 or 2,
+ // so don't bother generating any more copies.
+ fields = append(fields, fields[len(fields)-1])
+ }
+ continue
+ }
+
+ // Record new anonymous struct to explore in next round.
+ nextCount[ft]++
+ if nextCount[ft] == 1 {
+ f := field{name: ft.Name(), index: index, typ: ft}
+ next = append(next, f)
+ }
+ }
+ }
+ }
+
+ sort.Sort(byName(fields))
+
+ // Delete all fields that are hidden by the Go rules for embedded fields,
+ // except that fields with TOML tags are promoted.
+
+ // The fields are sorted in primary order of name, secondary order
+ // of field index length. Loop over names; for each name, delete
+ // hidden fields by choosing the one dominant field that survives.
+ out := fields[:0]
+ for advance, i := 0, 0; i < len(fields); i += advance {
+ // One iteration per name.
+ // Find the sequence of fields with the name of this first field.
+ fi := fields[i]
+ name := fi.name
+ for advance = 1; i+advance < len(fields); advance++ {
+ fj := fields[i+advance]
+ if fj.name != name {
+ break
+ }
+ }
+ if advance == 1 { // Only one field with this name
+ out = append(out, fi)
+ continue
+ }
+ dominant, ok := dominantField(fields[i : i+advance])
+ if ok {
+ out = append(out, dominant)
+ }
+ }
+
+ fields = out
+ sort.Sort(byIndex(fields))
+
+ return fields
+}
+
+// dominantField looks through the fields, all of which are known to
+// have the same name, to find the single field that dominates the
+// others using Go's embedding rules, modified by the presence of
+// TOML tags. If there are multiple top-level fields, the boolean
+// will be false: This condition is an error in Go and we skip all
+// the fields.
+func dominantField(fields []field) (field, bool) {
+ // The fields are sorted in increasing index-length order. The winner
+ // must therefore be one with the shortest index length. Drop all
+ // longer entries, which is easy: just truncate the slice.
+ length := len(fields[0].index)
+ tagged := -1 // Index of first tagged field.
+ for i, f := range fields {
+ if len(f.index) > length {
+ fields = fields[:i]
+ break
+ }
+ if f.tag {
+ if tagged >= 0 {
+ // Multiple tagged fields at the same level: conflict.
+ // Return no field.
+ return field{}, false
+ }
+ tagged = i
+ }
+ }
+ if tagged >= 0 {
+ return fields[tagged], true
+ }
+ // All remaining fields have the same length. If there's more than one,
+ // we have a conflict (two fields named "X" at the same level) and we
+ // return no field.
+ if len(fields) > 1 {
+ return field{}, false
+ }
+ return fields[0], true
+}
+
+var fieldCache struct {
+ sync.RWMutex
+ m map[reflect.Type][]field
+}
+
+// cachedTypeFields is like typeFields but uses a cache to avoid repeated work.
+func cachedTypeFields(t reflect.Type) []field {
+ fieldCache.RLock()
+ f := fieldCache.m[t]
+ fieldCache.RUnlock()
+ if f != nil {
+ return f
+ }
+
+ // Compute fields without lock.
+ // Might duplicate effort but won't hold other computations back.
+ f = typeFields(t)
+ if f == nil {
+ f = []field{}
+ }
+
+ fieldCache.Lock()
+ if fieldCache.m == nil {
+ fieldCache.m = map[reflect.Type][]field{}
+ }
+ fieldCache.m[t] = f
+ fieldCache.Unlock()
+ return f
+}
diff --git a/vendor/github.com/Sirupsen/logrus/CHANGELOG.md b/vendor/github.com/Sirupsen/logrus/CHANGELOG.md
new file mode 100644
index 0000000000..f2c2bc2111
--- /dev/null
+++ b/vendor/github.com/Sirupsen/logrus/CHANGELOG.md
@@ -0,0 +1,66 @@
+# 0.10.0
+
+* feature: Add a test hook (#180)
+* feature: `ParseLevel` is now case-insensitive (#326)
+* feature: `FieldLogger` interface that generalizes `Logger` and `Entry` (#308)
+* performance: avoid re-allocations on `WithFields` (#335)
+
+# 0.9.0
+
+* logrus/text_formatter: don't emit empty msg
+* logrus/hooks/airbrake: move out of main repository
+* logrus/hooks/sentry: move out of main repository
+* logrus/hooks/papertrail: move out of main repository
+* logrus/hooks/bugsnag: move out of main repository
+* logrus/core: run tests with `-race`
+* logrus/core: detect TTY based on `stderr`
+* logrus/core: support `WithError` on logger
+* logrus/core: Solaris support
+
+# 0.8.7
+
+* logrus/core: fix possible race (#216)
+* logrus/doc: small typo fixes and doc improvements
+
+
+# 0.8.6
+
+* hooks/raven: allow passing an initialized client
+
+# 0.8.5
+
+* logrus/core: revert #208
+
+# 0.8.4
+
+* formatter/text: fix data race (#218)
+
+# 0.8.3
+
+* logrus/core: fix entry log level (#208)
+* logrus/core: improve performance of text formatter by 40%
+* logrus/core: expose `LevelHooks` type
+* logrus/core: add support for DragonflyBSD and NetBSD
+* formatter/text: print structs more verbosely
+
+# 0.8.2
+
+* logrus: fix more Fatal family functions
+
+# 0.8.1
+
+* logrus: fix not exiting on `Fatalf` and `Fatalln`
+
+# 0.8.0
+
+* logrus: defaults to stderr instead of stdout
+* hooks/sentry: add special field for `*http.Request`
+* formatter/text: ignore Windows for colors
+
+# 0.7.3
+
+* formatter/\*: allow configuration of timestamp layout
+
+# 0.7.2
+
+* formatter/text: Add configuration option for time format (#158)
diff --git a/vendor/github.com/Sirupsen/logrus/LICENSE b/vendor/github.com/Sirupsen/logrus/LICENSE
new file mode 100644
index 0000000000..f090cb42f3
--- /dev/null
+++ b/vendor/github.com/Sirupsen/logrus/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Simon Eskildsen
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/vendor/github.com/Sirupsen/logrus/README.md b/vendor/github.com/Sirupsen/logrus/README.md
new file mode 100644
index 0000000000..126cd1fc2b
--- /dev/null
+++ b/vendor/github.com/Sirupsen/logrus/README.md
@@ -0,0 +1,425 @@
+# Logrus [![Build Status](https://travis-ci.org/Sirupsen/logrus.svg?branch=master)](https://travis-ci.org/Sirupsen/logrus) [![GoDoc](https://godoc.org/github.com/Sirupsen/logrus?status.svg)](https://godoc.org/github.com/Sirupsen/logrus)
+
+Logrus is a structured logger for Go (golang), completely API compatible with
+the standard library logger. [Godoc][godoc]. **Please note the Logrus API is not
+yet stable (pre 1.0). Logrus itself is completely stable and has been used in
+many large deployments. The core API is unlikely to change much but please
+version control your Logrus to make sure you aren't fetching latest `master` on
+every build.**
+
+Nicely color-coded in development (when a TTY is attached, otherwise just
+plain text):
+
+![Colored](http://i.imgur.com/PY7qMwd.png)
+
+With `log.SetFormatter(&log.JSONFormatter{})`, for easy parsing by logstash
+or Splunk:
+
+```json
+{"animal":"walrus","level":"info","msg":"A group of walrus emerges from the
+ocean","size":10,"time":"2014-03-10 19:57:38.562264131 -0400 EDT"}
+
+{"level":"warning","msg":"The group's number increased tremendously!",
+"number":122,"omg":true,"time":"2014-03-10 19:57:38.562471297 -0400 EDT"}
+
+{"animal":"walrus","level":"info","msg":"A giant walrus appears!",
+"size":10,"time":"2014-03-10 19:57:38.562500591 -0400 EDT"}
+
+{"animal":"walrus","level":"info","msg":"Tremendously sized cow enters the ocean.",
+"size":9,"time":"2014-03-10 19:57:38.562527896 -0400 EDT"}
+
+{"level":"fatal","msg":"The ice breaks!","number":100,"omg":true,
+"time":"2014-03-10 19:57:38.562543128 -0400 EDT"}
+```
+
+With the default `log.SetFormatter(&log.TextFormatter{})` when a TTY is not
+attached, the output is compatible with the
+[logfmt](http://godoc.org/github.com/kr/logfmt) format:
+
+```text
+time="2015-03-26T01:27:38-04:00" level=debug msg="Started observing beach" animal=walrus number=8
+time="2015-03-26T01:27:38-04:00" level=info msg="A group of walrus emerges from the ocean" animal=walrus size=10
+time="2015-03-26T01:27:38-04:00" level=warning msg="The group's number increased tremendously!" number=122 omg=true
+time="2015-03-26T01:27:38-04:00" level=debug msg="Temperature changes" temperature=-4
+time="2015-03-26T01:27:38-04:00" level=panic msg="It's over 9000!" animal=orca size=9009
+time="2015-03-26T01:27:38-04:00" level=fatal msg="The ice breaks!" err=&{0x2082280c0 map[animal:orca size:9009] 2015-03-26 01:27:38.441574009 -0400 EDT panic It's over 9000!} number=100 omg=true
+exit status 1
+```
+
+#### Example
+
+The simplest way to use Logrus is simply the package-level exported logger:
+
+```go
+package main
+
+import (
+ log "github.com/Sirupsen/logrus"
+)
+
+func main() {
+ log.WithFields(log.Fields{
+ "animal": "walrus",
+ }).Info("A walrus appears")
+}
+```
+
+Note that it's completely api-compatible with the stdlib logger, so you can
+replace your `log` imports everywhere with `log "github.com/Sirupsen/logrus"`
+and you'll now have the flexibility of Logrus. You can customize it all you
+want:
+
+```go
+package main
+
+import (
+ "os"
+ log "github.com/Sirupsen/logrus"
+)
+
+func init() {
+ // Log as JSON instead of the default ASCII formatter.
+ log.SetFormatter(&log.JSONFormatter{})
+
+ // Output to stderr instead of stdout, could also be a file.
+ log.SetOutput(os.Stderr)
+
+ // Only log the warning severity or above.
+ log.SetLevel(log.WarnLevel)
+}
+
+func main() {
+ log.WithFields(log.Fields{
+ "animal": "walrus",
+ "size": 10,
+ }).Info("A group of walrus emerges from the ocean")
+
+ log.WithFields(log.Fields{
+ "omg": true,
+ "number": 122,
+ }).Warn("The group's number increased tremendously!")
+
+ log.WithFields(log.Fields{
+ "omg": true,
+ "number": 100,
+ }).Fatal("The ice breaks!")
+
+ // A common pattern is to re-use fields between logging statements by re-using
+ // the logrus.Entry returned from WithFields()
+ contextLogger := log.WithFields(log.Fields{
+ "common": "this is a common field",
+ "other": "I also should be logged always",
+ })
+
+ contextLogger.Info("I'll be logged with common and other field")
+ contextLogger.Info("Me too")
+}
+```
+
+For more advanced usage such as logging to multiple locations from the same
+application, you can also create an instance of the `logrus` Logger:
+
+```go
+package main
+
+import (
+ "github.com/Sirupsen/logrus"
+)
+
+// Create a new instance of the logger. You can have any number of instances.
+var log = logrus.New()
+
+func main() {
+ // The API for setting attributes is a little different than the package level
+ // exported logger. See Godoc.
+ log.Out = os.Stderr
+
+ log.WithFields(logrus.Fields{
+ "animal": "walrus",
+ "size": 10,
+ }).Info("A group of walrus emerges from the ocean")
+}
+```
+
+#### Fields
+
+Logrus encourages careful, structured logging though logging fields instead of
+long, unparseable error messages. For example, instead of: `log.Fatalf("Failed
+to send event %s to topic %s with key %d")`, you should log the much more
+discoverable:
+
+```go
+log.WithFields(log.Fields{
+ "event": event,
+ "topic": topic,
+ "key": key,
+}).Fatal("Failed to send event")
+```
+
+We've found this API forces you to think about logging in a way that produces
+much more useful logging messages. We've been in countless situations where just
+a single added field to a log statement that was already there would've saved us
+hours. The `WithFields` call is optional.
+
+In general, with Logrus using any of the `printf`-family functions should be
+seen as a hint you should add a field, however, you can still use the
+`printf`-family functions with Logrus.
+
+#### Hooks
+
+You can add hooks for logging levels. For example to send errors to an exception
+tracking service on `Error`, `Fatal` and `Panic`, info to StatsD or log to
+multiple places simultaneously, e.g. syslog.
+
+Logrus comes with [built-in hooks](hooks/). Add those, or your custom hook, in
+`init`:
+
+```go
+import (
+ log "github.com/Sirupsen/logrus"
+ "gopkg.in/gemnasium/logrus-airbrake-hook.v2" // the package is named "aibrake"
+ logrus_syslog "github.com/Sirupsen/logrus/hooks/syslog"
+ "log/syslog"
+)
+
+func init() {
+
+ // Use the Airbrake hook to report errors that have Error severity or above to
+ // an exception tracker. You can create custom hooks, see the Hooks section.
+ log.AddHook(airbrake.NewHook(123, "xyz", "production"))
+
+ hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "")
+ if err != nil {
+ log.Error("Unable to connect to local syslog daemon")
+ } else {
+ log.AddHook(hook)
+ }
+}
+```
+Note: Syslog hook also support connecting to local syslog (Ex. "/dev/log" or "/var/run/syslog" or "/var/run/log"). For the detail, please check the [syslog hook README](hooks/syslog/README.md).
+
+| Hook | Description |
+| ----- | ----------- |
+| [Airbrake](https://github.com/gemnasium/logrus-airbrake-hook) | Send errors to the Airbrake API V3. Uses the official [`gobrake`](https://github.com/airbrake/gobrake) behind the scenes. |
+| [Airbrake "legacy"](https://github.com/gemnasium/logrus-airbrake-legacy-hook) | Send errors to an exception tracking service compatible with the Airbrake API V2. Uses [`airbrake-go`](https://github.com/tobi/airbrake-go) behind the scenes. |
+| [Papertrail](https://github.com/polds/logrus-papertrail-hook) | Send errors to the [Papertrail](https://papertrailapp.com) hosted logging service via UDP. |
+| [Syslog](https://github.com/Sirupsen/logrus/blob/master/hooks/syslog/syslog.go) | Send errors to remote syslog server. Uses standard library `log/syslog` behind the scenes. |
+| [Bugsnag](https://github.com/Shopify/logrus-bugsnag/blob/master/bugsnag.go) | Send errors to the Bugsnag exception tracking service. |
+| [Sentry](https://github.com/evalphobia/logrus_sentry) | Send errors to the Sentry error logging and aggregation service. |
+| [Hiprus](https://github.com/nubo/hiprus) | Send errors to a channel in hipchat. |
+| [Logrusly](https://github.com/sebest/logrusly) | Send logs to [Loggly](https://www.loggly.com/) |
+| [Slackrus](https://github.com/johntdyer/slackrus) | Hook for Slack chat. |
+| [Journalhook](https://github.com/wercker/journalhook) | Hook for logging to `systemd-journald` |
+| [Graylog](https://github.com/gemnasium/logrus-graylog-hook) | Hook for logging to [Graylog](http://graylog2.org/) |
+| [Raygun](https://github.com/squirkle/logrus-raygun-hook) | Hook for logging to [Raygun.io](http://raygun.io/) |
+| [LFShook](https://github.com/rifflock/lfshook) | Hook for logging to the local filesystem |
+| [Honeybadger](https://github.com/agonzalezro/logrus_honeybadger) | Hook for sending exceptions to Honeybadger |
+| [Mail](https://github.com/zbindenren/logrus_mail) | Hook for sending exceptions via mail |
+| [Rollrus](https://github.com/heroku/rollrus) | Hook for sending errors to rollbar |
+| [Fluentd](https://github.com/evalphobia/logrus_fluent) | Hook for logging to fluentd |
+| [Mongodb](https://github.com/weekface/mgorus) | Hook for logging to mongodb |
+| [Influxus] (http://github.com/vlad-doru/influxus) | Hook for concurrently logging to [InfluxDB] (http://influxdata.com/) |
+| [InfluxDB](https://github.com/Abramovic/logrus_influxdb) | Hook for logging to influxdb |
+| [Octokit](https://github.com/dorajistyle/logrus-octokit-hook) | Hook for logging to github via octokit |
+| [DeferPanic](https://github.com/deferpanic/dp-logrus) | Hook for logging to DeferPanic |
+| [Redis-Hook](https://github.com/rogierlommers/logrus-redis-hook) | Hook for logging to a ELK stack (through Redis) |
+| [Amqp-Hook](https://github.com/vladoatanasov/logrus_amqp) | Hook for logging to Amqp broker (Like RabbitMQ) |
+| [KafkaLogrus](https://github.com/goibibo/KafkaLogrus) | Hook for logging to kafka |
+| [Typetalk](https://github.com/dragon3/logrus-typetalk-hook) | Hook for logging to [Typetalk](https://www.typetalk.in/) |
+| [ElasticSearch](https://github.com/sohlich/elogrus) | Hook for logging to ElasticSearch|
+| [Sumorus](https://github.com/doublefree/sumorus) | Hook for logging to [SumoLogic](https://www.sumologic.com/)|
+| [Scribe](https://github.com/sagar8192/logrus-scribe-hook) | Hook for logging to [Scribe](https://github.com/facebookarchive/scribe)|
+| [Logstash](https://github.com/bshuster-repo/logrus-logstash-hook) | Hook for logging to [Logstash](https://www.elastic.co/products/logstash) |
+| [logz.io](https://github.com/ripcurld00d/logrus-logzio-hook) | Hook for logging to [logz.io](https://logz.io), a Log as a Service using Logstash |
+| [Logmatic.io](https://github.com/logmatic/logmatic-go) | Hook for logging to [Logmatic.io](http://logmatic.io/) |
+| [Pushover](https://github.com/toorop/logrus_pushover) | Send error via [Pushover](https://pushover.net) |
+
+
+#### Level logging
+
+Logrus has six logging levels: Debug, Info, Warning, Error, Fatal and Panic.
+
+```go
+log.Debug("Useful debugging information.")
+log.Info("Something noteworthy happened!")
+log.Warn("You should probably take a look at this.")
+log.Error("Something failed but I'm not quitting.")
+// Calls os.Exit(1) after logging
+log.Fatal("Bye.")
+// Calls panic() after logging
+log.Panic("I'm bailing.")
+```
+
+You can set the logging level on a `Logger`, then it will only log entries with
+that severity or anything above it:
+
+```go
+// Will log anything that is info or above (warn, error, fatal, panic). Default.
+log.SetLevel(log.InfoLevel)
+```
+
+It may be useful to set `log.Level = logrus.DebugLevel` in a debug or verbose
+environment if your application has that.
+
+#### Entries
+
+Besides the fields added with `WithField` or `WithFields` some fields are
+automatically added to all logging events:
+
+1. `time`. The timestamp when the entry was created.
+2. `msg`. The logging message passed to `{Info,Warn,Error,Fatal,Panic}` after
+ the `AddFields` call. E.g. `Failed to send event.`
+3. `level`. The logging level. E.g. `info`.
+
+#### Environments
+
+Logrus has no notion of environment.
+
+If you wish for hooks and formatters to only be used in specific environments,
+you should handle that yourself. For example, if your application has a global
+variable `Environment`, which is a string representation of the environment you
+could do:
+
+```go
+import (
+ log "github.com/Sirupsen/logrus"
+)
+
+init() {
+ // do something here to set environment depending on an environment variable
+ // or command-line flag
+ if Environment == "production" {
+ log.SetFormatter(&log.JSONFormatter{})
+ } else {
+ // The TextFormatter is default, you don't actually have to do this.
+ log.SetFormatter(&log.TextFormatter{})
+ }
+}
+```
+
+This configuration is how `logrus` was intended to be used, but JSON in
+production is mostly only useful if you do log aggregation with tools like
+Splunk or Logstash.
+
+#### Formatters
+
+The built-in logging formatters are:
+
+* `logrus.TextFormatter`. Logs the event in colors if stdout is a tty, otherwise
+ without colors.
+ * *Note:* to force colored output when there is no TTY, set the `ForceColors`
+ field to `true`. To force no colored output even if there is a TTY set the
+ `DisableColors` field to `true`
+* `logrus.JSONFormatter`. Logs fields as JSON.
+
+Third party logging formatters:
+
+* [`logstash`](https://github.com/bshuster-repo/logrus-logstash-hook). Logs fields as [Logstash](http://logstash.net) Events.
+* [`prefixed`](https://github.com/x-cray/logrus-prefixed-formatter). Displays log entry source along with alternative layout.
+* [`zalgo`](https://github.com/aybabtme/logzalgo). Invoking the P͉̫o̳̼̊w̖͈̰͎e̬͔̭͂r͚̼̹̲ ̫͓͉̳͈ō̠͕͖̚f̝͍̠ ͕̲̞͖͑Z̖̫̤̫ͪa͉̬͈̗l͖͎g̳̥o̰̥̅!̣͔̲̻͊̄ ̙̘̦̹̦.
+
+You can define your formatter by implementing the `Formatter` interface,
+requiring a `Format` method. `Format` takes an `*Entry`. `entry.Data` is a
+`Fields` type (`map[string]interface{}`) with all your fields as well as the
+default ones (see Entries section above):
+
+```go
+type MyJSONFormatter struct {
+}
+
+log.SetFormatter(new(MyJSONFormatter))
+
+func (f *MyJSONFormatter) Format(entry *Entry) ([]byte, error) {
+ // Note this doesn't include Time, Level and Message which are available on
+ // the Entry. Consult `godoc` on information about those fields or read the
+ // source of the official loggers.
+ serialized, err := json.Marshal(entry.Data)
+ if err != nil {
+ return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
+ }
+ return append(serialized, '\n'), nil
+}
+```
+
+#### Logger as an `io.Writer`
+
+Logrus can be transformed into an `io.Writer`. That writer is the end of an `io.Pipe` and it is your responsibility to close it.
+
+```go
+w := logger.Writer()
+defer w.Close()
+
+srv := http.Server{
+ // create a stdlib log.Logger that writes to
+ // logrus.Logger.
+ ErrorLog: log.New(w, "", 0),
+}
+```
+
+Each line written to that writer will be printed the usual way, using formatters
+and hooks. The level for those entries is `info`.
+
+#### Rotation
+
+Log rotation is not provided with Logrus. Log rotation should be done by an
+external program (like `logrotate(8)`) that can compress and delete old log
+entries. It should not be a feature of the application-level logger.
+
+#### Tools
+
+| Tool | Description |
+| ---- | ----------- |
+|[Logrus Mate](https://github.com/gogap/logrus_mate)|Logrus mate is a tool for Logrus to manage loggers, you can initial logger's level, hook and formatter by config file, the logger will generated with different config at different environment.|
+|[Logrus Viper Helper](https://github.com/heirko/go-contrib/tree/master/logrusHelper)|An Helper arround Logrus to wrap with spf13/Viper to load configuration with fangs! And to simplify Logrus configuration use some behavior of [Logrus Mate](https://github.com/gogap/logrus_mate). [sample](https://github.com/heirko/iris-contrib/blob/master/middleware/logrus-logger/example) |
+
+#### Testing
+
+Logrus has a built in facility for asserting the presence of log messages. This is implemented through the `test` hook and provides:
+
+* decorators for existing logger (`test.NewLocal` and `test.NewGlobal`) which basically just add the `test` hook
+* a test logger (`test.NewNullLogger`) that just records log messages (and does not output any):
+
+```go
+logger, hook := NewNullLogger()
+logger.Error("Hello error")
+
+assert.Equal(1, len(hook.Entries))
+assert.Equal(logrus.ErrorLevel, hook.LastEntry().Level)
+assert.Equal("Hello error", hook.LastEntry().Message)
+
+hook.Reset()
+assert.Nil(hook.LastEntry())
+```
+
+#### Fatal handlers
+
+Logrus can register one or more functions that will be called when any `fatal`
+level message is logged. The registered handlers will be executed before
+logrus performs a `os.Exit(1)`. This behavior may be helpful if callers need
+to gracefully shutdown. Unlike a `panic("Something went wrong...")` call which can be intercepted with a deferred `recover` a call to `os.Exit(1)` can not be intercepted.
+
+```
+...
+handler := func() {
+ // gracefully shutdown something...
+}
+logrus.RegisterExitHandler(handler)
+...
+```
+
+#### Thread safty
+
+By default Logger is protected by mutex for concurrent writes, this mutex is invoked when calling hooks and writing logs.
+If you are sure such locking is not needed, you can call logger.SetNoLock() to disable the locking.
+
+Situation when locking is not needed includes:
+
+* You have no hooks registered, or hooks calling is already thread-safe.
+
+* Writing to logger.Out is already thread-safe, for example:
+
+ 1) logger.Out is protected by locks.
+
+ 2) logger.Out is a os.File handler opened with `O_APPEND` flag, and every write is smaller than 4k. (This allow multi-thread/multi-process writing)
+
+ (Refer to http://www.notthewizard.com/2014/06/17/are-files-appends-really-atomic/)
diff --git a/vendor/github.com/Sirupsen/logrus/alt_exit.go b/vendor/github.com/Sirupsen/logrus/alt_exit.go
new file mode 100644
index 0000000000..b4c9e84754
--- /dev/null
+++ b/vendor/github.com/Sirupsen/logrus/alt_exit.go
@@ -0,0 +1,64 @@
+package logrus
+
+// The following code was sourced and modified from the
+// https://bitbucket.org/tebeka/atexit package governed by the following license:
+//
+// Copyright (c) 2012 Miki Tebeka .
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal in
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+// the Software, and to permit persons to whom the Software is furnished to do so,
+// subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+// IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+// CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+import (
+ "fmt"
+ "os"
+)
+
+var handlers = []func(){}
+
+func runHandler(handler func()) {
+ defer func() {
+ if err := recover(); err != nil {
+ fmt.Fprintln(os.Stderr, "Error: Logrus exit handler error:", err)
+ }
+ }()
+
+ handler()
+}
+
+func runHandlers() {
+ for _, handler := range handlers {
+ runHandler(handler)
+ }
+}
+
+// Exit runs all the Logrus atexit handlers and then terminates the program using os.Exit(code)
+func Exit(code int) {
+ runHandlers()
+ os.Exit(code)
+}
+
+// RegisterExitHandler adds a Logrus Exit handler, call logrus.Exit to invoke
+// all handlers. The handlers will also be invoked when any Fatal log entry is
+// made.
+//
+// This method is useful when a caller wishes to use logrus to log a fatal
+// message but also needs to gracefully shutdown. An example usecase could be
+// closing database connections, or sending a alert that the application is
+// closing.
+func RegisterExitHandler(handler func()) {
+ handlers = append(handlers, handler)
+}
diff --git a/vendor/github.com/Sirupsen/logrus/doc.go b/vendor/github.com/Sirupsen/logrus/doc.go
new file mode 100644
index 0000000000..dddd5f877b
--- /dev/null
+++ b/vendor/github.com/Sirupsen/logrus/doc.go
@@ -0,0 +1,26 @@
+/*
+Package logrus is a structured logger for Go, completely API compatible with the standard library logger.
+
+
+The simplest way to use Logrus is simply the package-level exported logger:
+
+ package main
+
+ import (
+ log "github.com/Sirupsen/logrus"
+ )
+
+ func main() {
+ log.WithFields(log.Fields{
+ "animal": "walrus",
+ "number": 1,
+ "size": 10,
+ }).Info("A walrus appears")
+ }
+
+Output:
+ time="2015-09-07T08:48:33Z" level=info msg="A walrus appears" animal=walrus number=1 size=10
+
+For a full guide visit https://github.com/Sirupsen/logrus
+*/
+package logrus
diff --git a/vendor/github.com/Sirupsen/logrus/entry.go b/vendor/github.com/Sirupsen/logrus/entry.go
new file mode 100644
index 0000000000..4edbe7a2de
--- /dev/null
+++ b/vendor/github.com/Sirupsen/logrus/entry.go
@@ -0,0 +1,275 @@
+package logrus
+
+import (
+ "bytes"
+ "fmt"
+ "os"
+ "sync"
+ "time"
+)
+
+var bufferPool *sync.Pool
+
+func init() {
+ bufferPool = &sync.Pool{
+ New: func() interface{} {
+ return new(bytes.Buffer)
+ },
+ }
+}
+
+// Defines the key when adding errors using WithError.
+var ErrorKey = "error"
+
+// An entry is the final or intermediate Logrus logging entry. It contains all
+// the fields passed with WithField{,s}. It's finally logged when Debug, Info,
+// Warn, Error, Fatal or Panic is called on it. These objects can be reused and
+// passed around as much as you wish to avoid field duplication.
+type Entry struct {
+ Logger *Logger
+
+ // Contains all the fields set by the user.
+ Data Fields
+
+ // Time at which the log entry was created
+ Time time.Time
+
+ // Level the log entry was logged at: Debug, Info, Warn, Error, Fatal or Panic
+ Level Level
+
+ // Message passed to Debug, Info, Warn, Error, Fatal or Panic
+ Message string
+
+ // When formatter is called in entry.log(), an Buffer may be set to entry
+ Buffer *bytes.Buffer
+}
+
+func NewEntry(logger *Logger) *Entry {
+ return &Entry{
+ Logger: logger,
+ // Default is three fields, give a little extra room
+ Data: make(Fields, 5),
+ }
+}
+
+// Returns the string representation from the reader and ultimately the
+// formatter.
+func (entry *Entry) String() (string, error) {
+ serialized, err := entry.Logger.Formatter.Format(entry)
+ if err != nil {
+ return "", err
+ }
+ str := string(serialized)
+ return str, nil
+}
+
+// Add an error as single field (using the key defined in ErrorKey) to the Entry.
+func (entry *Entry) WithError(err error) *Entry {
+ return entry.WithField(ErrorKey, err)
+}
+
+// Add a single field to the Entry.
+func (entry *Entry) WithField(key string, value interface{}) *Entry {
+ return entry.WithFields(Fields{key: value})
+}
+
+// Add a map of fields to the Entry.
+func (entry *Entry) WithFields(fields Fields) *Entry {
+ data := make(Fields, len(entry.Data)+len(fields))
+ for k, v := range entry.Data {
+ data[k] = v
+ }
+ for k, v := range fields {
+ data[k] = v
+ }
+ return &Entry{Logger: entry.Logger, Data: data}
+}
+
+// This function is not declared with a pointer value because otherwise
+// race conditions will occur when using multiple goroutines
+func (entry Entry) log(level Level, msg string) {
+ var buffer *bytes.Buffer
+ entry.Time = time.Now()
+ entry.Level = level
+ entry.Message = msg
+
+ if err := entry.Logger.Hooks.Fire(level, &entry); err != nil {
+ entry.Logger.mu.Lock()
+ fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err)
+ entry.Logger.mu.Unlock()
+ }
+ buffer = bufferPool.Get().(*bytes.Buffer)
+ buffer.Reset()
+ defer bufferPool.Put(buffer)
+ entry.Buffer = buffer
+ serialized, err := entry.Logger.Formatter.Format(&entry)
+ entry.Buffer = nil
+ if err != nil {
+ entry.Logger.mu.Lock()
+ fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err)
+ entry.Logger.mu.Unlock()
+ } else {
+ entry.Logger.mu.Lock()
+ _, err = entry.Logger.Out.Write(serialized)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err)
+ }
+ entry.Logger.mu.Unlock()
+ }
+
+ // To avoid Entry#log() returning a value that only would make sense for
+ // panic() to use in Entry#Panic(), we avoid the allocation by checking
+ // directly here.
+ if level <= PanicLevel {
+ panic(&entry)
+ }
+}
+
+func (entry *Entry) Debug(args ...interface{}) {
+ if entry.Logger.Level >= DebugLevel {
+ entry.log(DebugLevel, fmt.Sprint(args...))
+ }
+}
+
+func (entry *Entry) Print(args ...interface{}) {
+ entry.Info(args...)
+}
+
+func (entry *Entry) Info(args ...interface{}) {
+ if entry.Logger.Level >= InfoLevel {
+ entry.log(InfoLevel, fmt.Sprint(args...))
+ }
+}
+
+func (entry *Entry) Warn(args ...interface{}) {
+ if entry.Logger.Level >= WarnLevel {
+ entry.log(WarnLevel, fmt.Sprint(args...))
+ }
+}
+
+func (entry *Entry) Warning(args ...interface{}) {
+ entry.Warn(args...)
+}
+
+func (entry *Entry) Error(args ...interface{}) {
+ if entry.Logger.Level >= ErrorLevel {
+ entry.log(ErrorLevel, fmt.Sprint(args...))
+ }
+}
+
+func (entry *Entry) Fatal(args ...interface{}) {
+ if entry.Logger.Level >= FatalLevel {
+ entry.log(FatalLevel, fmt.Sprint(args...))
+ }
+ Exit(1)
+}
+
+func (entry *Entry) Panic(args ...interface{}) {
+ if entry.Logger.Level >= PanicLevel {
+ entry.log(PanicLevel, fmt.Sprint(args...))
+ }
+ panic(fmt.Sprint(args...))
+}
+
+// Entry Printf family functions
+
+func (entry *Entry) Debugf(format string, args ...interface{}) {
+ if entry.Logger.Level >= DebugLevel {
+ entry.Debug(fmt.Sprintf(format, args...))
+ }
+}
+
+func (entry *Entry) Infof(format string, args ...interface{}) {
+ if entry.Logger.Level >= InfoLevel {
+ entry.Info(fmt.Sprintf(format, args...))
+ }
+}
+
+func (entry *Entry) Printf(format string, args ...interface{}) {
+ entry.Infof(format, args...)
+}
+
+func (entry *Entry) Warnf(format string, args ...interface{}) {
+ if entry.Logger.Level >= WarnLevel {
+ entry.Warn(fmt.Sprintf(format, args...))
+ }
+}
+
+func (entry *Entry) Warningf(format string, args ...interface{}) {
+ entry.Warnf(format, args...)
+}
+
+func (entry *Entry) Errorf(format string, args ...interface{}) {
+ if entry.Logger.Level >= ErrorLevel {
+ entry.Error(fmt.Sprintf(format, args...))
+ }
+}
+
+func (entry *Entry) Fatalf(format string, args ...interface{}) {
+ if entry.Logger.Level >= FatalLevel {
+ entry.Fatal(fmt.Sprintf(format, args...))
+ }
+ Exit(1)
+}
+
+func (entry *Entry) Panicf(format string, args ...interface{}) {
+ if entry.Logger.Level >= PanicLevel {
+ entry.Panic(fmt.Sprintf(format, args...))
+ }
+}
+
+// Entry Println family functions
+
+func (entry *Entry) Debugln(args ...interface{}) {
+ if entry.Logger.Level >= DebugLevel {
+ entry.Debug(entry.sprintlnn(args...))
+ }
+}
+
+func (entry *Entry) Infoln(args ...interface{}) {
+ if entry.Logger.Level >= InfoLevel {
+ entry.Info(entry.sprintlnn(args...))
+ }
+}
+
+func (entry *Entry) Println(args ...interface{}) {
+ entry.Infoln(args...)
+}
+
+func (entry *Entry) Warnln(args ...interface{}) {
+ if entry.Logger.Level >= WarnLevel {
+ entry.Warn(entry.sprintlnn(args...))
+ }
+}
+
+func (entry *Entry) Warningln(args ...interface{}) {
+ entry.Warnln(args...)
+}
+
+func (entry *Entry) Errorln(args ...interface{}) {
+ if entry.Logger.Level >= ErrorLevel {
+ entry.Error(entry.sprintlnn(args...))
+ }
+}
+
+func (entry *Entry) Fatalln(args ...interface{}) {
+ if entry.Logger.Level >= FatalLevel {
+ entry.Fatal(entry.sprintlnn(args...))
+ }
+ Exit(1)
+}
+
+func (entry *Entry) Panicln(args ...interface{}) {
+ if entry.Logger.Level >= PanicLevel {
+ entry.Panic(entry.sprintlnn(args...))
+ }
+}
+
+// Sprintlnn => Sprint no newline. This is to get the behavior of how
+// fmt.Sprintln where spaces are always added between operands, regardless of
+// their type. Instead of vendoring the Sprintln implementation to spare a
+// string allocation, we do the simplest thing.
+func (entry *Entry) sprintlnn(args ...interface{}) string {
+ msg := fmt.Sprintln(args...)
+ return msg[:len(msg)-1]
+}
diff --git a/vendor/github.com/Sirupsen/logrus/exported.go b/vendor/github.com/Sirupsen/logrus/exported.go
new file mode 100644
index 0000000000..9a0120ac1d
--- /dev/null
+++ b/vendor/github.com/Sirupsen/logrus/exported.go
@@ -0,0 +1,193 @@
+package logrus
+
+import (
+ "io"
+)
+
+var (
+ // std is the name of the standard logger in stdlib `log`
+ std = New()
+)
+
+func StandardLogger() *Logger {
+ return std
+}
+
+// SetOutput sets the standard logger output.
+func SetOutput(out io.Writer) {
+ std.mu.Lock()
+ defer std.mu.Unlock()
+ std.Out = out
+}
+
+// SetFormatter sets the standard logger formatter.
+func SetFormatter(formatter Formatter) {
+ std.mu.Lock()
+ defer std.mu.Unlock()
+ std.Formatter = formatter
+}
+
+// SetLevel sets the standard logger level.
+func SetLevel(level Level) {
+ std.mu.Lock()
+ defer std.mu.Unlock()
+ std.Level = level
+}
+
+// GetLevel returns the standard logger level.
+func GetLevel() Level {
+ std.mu.Lock()
+ defer std.mu.Unlock()
+ return std.Level
+}
+
+// AddHook adds a hook to the standard logger hooks.
+func AddHook(hook Hook) {
+ std.mu.Lock()
+ defer std.mu.Unlock()
+ std.Hooks.Add(hook)
+}
+
+// WithError creates an entry from the standard logger and adds an error to it, using the value defined in ErrorKey as key.
+func WithError(err error) *Entry {
+ return std.WithField(ErrorKey, err)
+}
+
+// WithField creates an entry from the standard logger and adds a field to
+// it. If you want multiple fields, use `WithFields`.
+//
+// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal
+// or Panic on the Entry it returns.
+func WithField(key string, value interface{}) *Entry {
+ return std.WithField(key, value)
+}
+
+// WithFields creates an entry from the standard logger and adds multiple
+// fields to it. This is simply a helper for `WithField`, invoking it
+// once for each field.
+//
+// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal
+// or Panic on the Entry it returns.
+func WithFields(fields Fields) *Entry {
+ return std.WithFields(fields)
+}
+
+// Debug logs a message at level Debug on the standard logger.
+func Debug(args ...interface{}) {
+ std.Debug(args...)
+}
+
+// Print logs a message at level Info on the standard logger.
+func Print(args ...interface{}) {
+ std.Print(args...)
+}
+
+// Info logs a message at level Info on the standard logger.
+func Info(args ...interface{}) {
+ std.Info(args...)
+}
+
+// Warn logs a message at level Warn on the standard logger.
+func Warn(args ...interface{}) {
+ std.Warn(args...)
+}
+
+// Warning logs a message at level Warn on the standard logger.
+func Warning(args ...interface{}) {
+ std.Warning(args...)
+}
+
+// Error logs a message at level Error on the standard logger.
+func Error(args ...interface{}) {
+ std.Error(args...)
+}
+
+// Panic logs a message at level Panic on the standard logger.
+func Panic(args ...interface{}) {
+ std.Panic(args...)
+}
+
+// Fatal logs a message at level Fatal on the standard logger.
+func Fatal(args ...interface{}) {
+ std.Fatal(args...)
+}
+
+// Debugf logs a message at level Debug on the standard logger.
+func Debugf(format string, args ...interface{}) {
+ std.Debugf(format, args...)
+}
+
+// Printf logs a message at level Info on the standard logger.
+func Printf(format string, args ...interface{}) {
+ std.Printf(format, args...)
+}
+
+// Infof logs a message at level Info on the standard logger.
+func Infof(format string, args ...interface{}) {
+ std.Infof(format, args...)
+}
+
+// Warnf logs a message at level Warn on the standard logger.
+func Warnf(format string, args ...interface{}) {
+ std.Warnf(format, args...)
+}
+
+// Warningf logs a message at level Warn on the standard logger.
+func Warningf(format string, args ...interface{}) {
+ std.Warningf(format, args...)
+}
+
+// Errorf logs a message at level Error on the standard logger.
+func Errorf(format string, args ...interface{}) {
+ std.Errorf(format, args...)
+}
+
+// Panicf logs a message at level Panic on the standard logger.
+func Panicf(format string, args ...interface{}) {
+ std.Panicf(format, args...)
+}
+
+// Fatalf logs a message at level Fatal on the standard logger.
+func Fatalf(format string, args ...interface{}) {
+ std.Fatalf(format, args...)
+}
+
+// Debugln logs a message at level Debug on the standard logger.
+func Debugln(args ...interface{}) {
+ std.Debugln(args...)
+}
+
+// Println logs a message at level Info on the standard logger.
+func Println(args ...interface{}) {
+ std.Println(args...)
+}
+
+// Infoln logs a message at level Info on the standard logger.
+func Infoln(args ...interface{}) {
+ std.Infoln(args...)
+}
+
+// Warnln logs a message at level Warn on the standard logger.
+func Warnln(args ...interface{}) {
+ std.Warnln(args...)
+}
+
+// Warningln logs a message at level Warn on the standard logger.
+func Warningln(args ...interface{}) {
+ std.Warningln(args...)
+}
+
+// Errorln logs a message at level Error on the standard logger.
+func Errorln(args ...interface{}) {
+ std.Errorln(args...)
+}
+
+// Panicln logs a message at level Panic on the standard logger.
+func Panicln(args ...interface{}) {
+ std.Panicln(args...)
+}
+
+// Fatalln logs a message at level Fatal on the standard logger.
+func Fatalln(args ...interface{}) {
+ std.Fatalln(args...)
+}
diff --git a/vendor/github.com/Sirupsen/logrus/formatter.go b/vendor/github.com/Sirupsen/logrus/formatter.go
new file mode 100644
index 0000000000..b5fbe934d1
--- /dev/null
+++ b/vendor/github.com/Sirupsen/logrus/formatter.go
@@ -0,0 +1,45 @@
+package logrus
+
+import "time"
+
+const DefaultTimestampFormat = time.RFC3339
+
+// The Formatter interface is used to implement a custom Formatter. It takes an
+// `Entry`. It exposes all the fields, including the default ones:
+//
+// * `entry.Data["msg"]`. The message passed from Info, Warn, Error ..
+// * `entry.Data["time"]`. The timestamp.
+// * `entry.Data["level"]. The level the entry was logged at.
+//
+// Any additional fields added with `WithField` or `WithFields` are also in
+// `entry.Data`. Format is expected to return an array of bytes which are then
+// logged to `logger.Out`.
+type Formatter interface {
+ Format(*Entry) ([]byte, error)
+}
+
+// This is to not silently overwrite `time`, `msg` and `level` fields when
+// dumping it. If this code wasn't there doing:
+//
+// logrus.WithField("level", 1).Info("hello")
+//
+// Would just silently drop the user provided level. Instead with this code
+// it'll logged as:
+//
+// {"level": "info", "fields.level": 1, "msg": "hello", "time": "..."}
+//
+// It's not exported because it's still using Data in an opinionated way. It's to
+// avoid code duplication between the two default formatters.
+func prefixFieldClashes(data Fields) {
+ if t, ok := data["time"]; ok {
+ data["fields.time"] = t
+ }
+
+ if m, ok := data["msg"]; ok {
+ data["fields.msg"] = m
+ }
+
+ if l, ok := data["level"]; ok {
+ data["fields.level"] = l
+ }
+}
diff --git a/vendor/github.com/Sirupsen/logrus/hooks.go b/vendor/github.com/Sirupsen/logrus/hooks.go
new file mode 100644
index 0000000000..3f151cdc39
--- /dev/null
+++ b/vendor/github.com/Sirupsen/logrus/hooks.go
@@ -0,0 +1,34 @@
+package logrus
+
+// A hook to be fired when logging on the logging levels returned from
+// `Levels()` on your implementation of the interface. Note that this is not
+// fired in a goroutine or a channel with workers, you should handle such
+// functionality yourself if your call is non-blocking and you don't wish for
+// the logging calls for levels returned from `Levels()` to block.
+type Hook interface {
+ Levels() []Level
+ Fire(*Entry) error
+}
+
+// Internal type for storing the hooks on a logger instance.
+type LevelHooks map[Level][]Hook
+
+// Add a hook to an instance of logger. This is called with
+// `log.Hooks.Add(new(MyHook))` where `MyHook` implements the `Hook` interface.
+func (hooks LevelHooks) Add(hook Hook) {
+ for _, level := range hook.Levels() {
+ hooks[level] = append(hooks[level], hook)
+ }
+}
+
+// Fire all the hooks for the passed level. Used by `entry.log` to fire
+// appropriate hooks for a log entry.
+func (hooks LevelHooks) Fire(level Level, entry *Entry) error {
+ for _, hook := range hooks[level] {
+ if err := hook.Fire(entry); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/Sirupsen/logrus/json_formatter.go b/vendor/github.com/Sirupsen/logrus/json_formatter.go
new file mode 100644
index 0000000000..2ad6dc5cf4
--- /dev/null
+++ b/vendor/github.com/Sirupsen/logrus/json_formatter.go
@@ -0,0 +1,41 @@
+package logrus
+
+import (
+ "encoding/json"
+ "fmt"
+)
+
+type JSONFormatter struct {
+ // TimestampFormat sets the format used for marshaling timestamps.
+ TimestampFormat string
+}
+
+func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
+ data := make(Fields, len(entry.Data)+3)
+ for k, v := range entry.Data {
+ switch v := v.(type) {
+ case error:
+ // Otherwise errors are ignored by `encoding/json`
+ // https://github.com/Sirupsen/logrus/issues/137
+ data[k] = v.Error()
+ default:
+ data[k] = v
+ }
+ }
+ prefixFieldClashes(data)
+
+ timestampFormat := f.TimestampFormat
+ if timestampFormat == "" {
+ timestampFormat = DefaultTimestampFormat
+ }
+
+ data["time"] = entry.Time.Format(timestampFormat)
+ data["msg"] = entry.Message
+ data["level"] = entry.Level.String()
+
+ serialized, err := json.Marshal(data)
+ if err != nil {
+ return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
+ }
+ return append(serialized, '\n'), nil
+}
diff --git a/vendor/github.com/Sirupsen/logrus/logger.go b/vendor/github.com/Sirupsen/logrus/logger.go
new file mode 100644
index 0000000000..b769f3d352
--- /dev/null
+++ b/vendor/github.com/Sirupsen/logrus/logger.go
@@ -0,0 +1,308 @@
+package logrus
+
+import (
+ "io"
+ "os"
+ "sync"
+)
+
+type Logger struct {
+ // The logs are `io.Copy`'d to this in a mutex. It's common to set this to a
+ // file, or leave it default which is `os.Stderr`. You can also set this to
+ // something more adventorous, such as logging to Kafka.
+ Out io.Writer
+ // Hooks for the logger instance. These allow firing events based on logging
+ // levels and log entries. For example, to send errors to an error tracking
+ // service, log to StatsD or dump the core on fatal errors.
+ Hooks LevelHooks
+ // All log entries pass through the formatter before logged to Out. The
+ // included formatters are `TextFormatter` and `JSONFormatter` for which
+ // TextFormatter is the default. In development (when a TTY is attached) it
+ // logs with colors, but to a file it wouldn't. You can easily implement your
+ // own that implements the `Formatter` interface, see the `README` or included
+ // formatters for examples.
+ Formatter Formatter
+ // The logging level the logger should log at. This is typically (and defaults
+ // to) `logrus.Info`, which allows Info(), Warn(), Error() and Fatal() to be
+ // logged. `logrus.Debug` is useful in
+ Level Level
+ // Used to sync writing to the log. Locking is enabled by Default
+ mu MutexWrap
+ // Reusable empty entry
+ entryPool sync.Pool
+}
+
+type MutexWrap struct {
+ lock sync.Mutex
+ disabled bool
+}
+
+func (mw *MutexWrap) Lock() {
+ if !mw.disabled {
+ mw.lock.Lock()
+ }
+}
+
+func (mw *MutexWrap) Unlock() {
+ if !mw.disabled {
+ mw.lock.Unlock()
+ }
+}
+
+func (mw *MutexWrap) Disable() {
+ mw.disabled = true
+}
+
+// Creates a new logger. Configuration should be set by changing `Formatter`,
+// `Out` and `Hooks` directly on the default logger instance. You can also just
+// instantiate your own:
+//
+// var log = &Logger{
+// Out: os.Stderr,
+// Formatter: new(JSONFormatter),
+// Hooks: make(LevelHooks),
+// Level: logrus.DebugLevel,
+// }
+//
+// It's recommended to make this a global instance called `log`.
+func New() *Logger {
+ return &Logger{
+ Out: os.Stderr,
+ Formatter: new(TextFormatter),
+ Hooks: make(LevelHooks),
+ Level: InfoLevel,
+ }
+}
+
+func (logger *Logger) newEntry() *Entry {
+ entry, ok := logger.entryPool.Get().(*Entry)
+ if ok {
+ return entry
+ }
+ return NewEntry(logger)
+}
+
+func (logger *Logger) releaseEntry(entry *Entry) {
+ logger.entryPool.Put(entry)
+}
+
+// Adds a field to the log entry, note that it doesn't log until you call
+// Debug, Print, Info, Warn, Fatal or Panic. It only creates a log entry.
+// If you want multiple fields, use `WithFields`.
+func (logger *Logger) WithField(key string, value interface{}) *Entry {
+ entry := logger.newEntry()
+ defer logger.releaseEntry(entry)
+ return entry.WithField(key, value)
+}
+
+// Adds a struct of fields to the log entry. All it does is call `WithField` for
+// each `Field`.
+func (logger *Logger) WithFields(fields Fields) *Entry {
+ entry := logger.newEntry()
+ defer logger.releaseEntry(entry)
+ return entry.WithFields(fields)
+}
+
+// Add an error as single field to the log entry. All it does is call
+// `WithError` for the given `error`.
+func (logger *Logger) WithError(err error) *Entry {
+ entry := logger.newEntry()
+ defer logger.releaseEntry(entry)
+ return entry.WithError(err)
+}
+
+func (logger *Logger) Debugf(format string, args ...interface{}) {
+ if logger.Level >= DebugLevel {
+ entry := logger.newEntry()
+ entry.Debugf(format, args...)
+ logger.releaseEntry(entry)
+ }
+}
+
+func (logger *Logger) Infof(format string, args ...interface{}) {
+ if logger.Level >= InfoLevel {
+ entry := logger.newEntry()
+ entry.Infof(format, args...)
+ logger.releaseEntry(entry)
+ }
+}
+
+func (logger *Logger) Printf(format string, args ...interface{}) {
+ entry := logger.newEntry()
+ entry.Printf(format, args...)
+ logger.releaseEntry(entry)
+}
+
+func (logger *Logger) Warnf(format string, args ...interface{}) {
+ if logger.Level >= WarnLevel {
+ entry := logger.newEntry()
+ entry.Warnf(format, args...)
+ logger.releaseEntry(entry)
+ }
+}
+
+func (logger *Logger) Warningf(format string, args ...interface{}) {
+ if logger.Level >= WarnLevel {
+ entry := logger.newEntry()
+ entry.Warnf(format, args...)
+ logger.releaseEntry(entry)
+ }
+}
+
+func (logger *Logger) Errorf(format string, args ...interface{}) {
+ if logger.Level >= ErrorLevel {
+ entry := logger.newEntry()
+ entry.Errorf(format, args...)
+ logger.releaseEntry(entry)
+ }
+}
+
+func (logger *Logger) Fatalf(format string, args ...interface{}) {
+ if logger.Level >= FatalLevel {
+ entry := logger.newEntry()
+ entry.Fatalf(format, args...)
+ logger.releaseEntry(entry)
+ }
+ Exit(1)
+}
+
+func (logger *Logger) Panicf(format string, args ...interface{}) {
+ if logger.Level >= PanicLevel {
+ entry := logger.newEntry()
+ entry.Panicf(format, args...)
+ logger.releaseEntry(entry)
+ }
+}
+
+func (logger *Logger) Debug(args ...interface{}) {
+ if logger.Level >= DebugLevel {
+ entry := logger.newEntry()
+ entry.Debug(args...)
+ logger.releaseEntry(entry)
+ }
+}
+
+func (logger *Logger) Info(args ...interface{}) {
+ if logger.Level >= InfoLevel {
+ entry := logger.newEntry()
+ entry.Info(args...)
+ logger.releaseEntry(entry)
+ }
+}
+
+func (logger *Logger) Print(args ...interface{}) {
+ entry := logger.newEntry()
+ entry.Info(args...)
+ logger.releaseEntry(entry)
+}
+
+func (logger *Logger) Warn(args ...interface{}) {
+ if logger.Level >= WarnLevel {
+ entry := logger.newEntry()
+ entry.Warn(args...)
+ logger.releaseEntry(entry)
+ }
+}
+
+func (logger *Logger) Warning(args ...interface{}) {
+ if logger.Level >= WarnLevel {
+ entry := logger.newEntry()
+ entry.Warn(args...)
+ logger.releaseEntry(entry)
+ }
+}
+
+func (logger *Logger) Error(args ...interface{}) {
+ if logger.Level >= ErrorLevel {
+ entry := logger.newEntry()
+ entry.Error(args...)
+ logger.releaseEntry(entry)
+ }
+}
+
+func (logger *Logger) Fatal(args ...interface{}) {
+ if logger.Level >= FatalLevel {
+ entry := logger.newEntry()
+ entry.Fatal(args...)
+ logger.releaseEntry(entry)
+ }
+ Exit(1)
+}
+
+func (logger *Logger) Panic(args ...interface{}) {
+ if logger.Level >= PanicLevel {
+ entry := logger.newEntry()
+ entry.Panic(args...)
+ logger.releaseEntry(entry)
+ }
+}
+
+func (logger *Logger) Debugln(args ...interface{}) {
+ if logger.Level >= DebugLevel {
+ entry := logger.newEntry()
+ entry.Debugln(args...)
+ logger.releaseEntry(entry)
+ }
+}
+
+func (logger *Logger) Infoln(args ...interface{}) {
+ if logger.Level >= InfoLevel {
+ entry := logger.newEntry()
+ entry.Infoln(args...)
+ logger.releaseEntry(entry)
+ }
+}
+
+func (logger *Logger) Println(args ...interface{}) {
+ entry := logger.newEntry()
+ entry.Println(args...)
+ logger.releaseEntry(entry)
+}
+
+func (logger *Logger) Warnln(args ...interface{}) {
+ if logger.Level >= WarnLevel {
+ entry := logger.newEntry()
+ entry.Warnln(args...)
+ logger.releaseEntry(entry)
+ }
+}
+
+func (logger *Logger) Warningln(args ...interface{}) {
+ if logger.Level >= WarnLevel {
+ entry := logger.newEntry()
+ entry.Warnln(args...)
+ logger.releaseEntry(entry)
+ }
+}
+
+func (logger *Logger) Errorln(args ...interface{}) {
+ if logger.Level >= ErrorLevel {
+ entry := logger.newEntry()
+ entry.Errorln(args...)
+ logger.releaseEntry(entry)
+ }
+}
+
+func (logger *Logger) Fatalln(args ...interface{}) {
+ if logger.Level >= FatalLevel {
+ entry := logger.newEntry()
+ entry.Fatalln(args...)
+ logger.releaseEntry(entry)
+ }
+ Exit(1)
+}
+
+func (logger *Logger) Panicln(args ...interface{}) {
+ if logger.Level >= PanicLevel {
+ entry := logger.newEntry()
+ entry.Panicln(args...)
+ logger.releaseEntry(entry)
+ }
+}
+
+//When file is opened with appending mode, it's safe to
+//write concurrently to a file (within 4k message on Linux).
+//In these cases user can choose to disable the lock.
+func (logger *Logger) SetNoLock() {
+ logger.mu.Disable()
+}
diff --git a/vendor/github.com/Sirupsen/logrus/logrus.go b/vendor/github.com/Sirupsen/logrus/logrus.go
new file mode 100644
index 0000000000..e596691116
--- /dev/null
+++ b/vendor/github.com/Sirupsen/logrus/logrus.go
@@ -0,0 +1,143 @@
+package logrus
+
+import (
+ "fmt"
+ "log"
+ "strings"
+)
+
+// Fields type, used to pass to `WithFields`.
+type Fields map[string]interface{}
+
+// Level type
+type Level uint8
+
+// Convert the Level to a string. E.g. PanicLevel becomes "panic".
+func (level Level) String() string {
+ switch level {
+ case DebugLevel:
+ return "debug"
+ case InfoLevel:
+ return "info"
+ case WarnLevel:
+ return "warning"
+ case ErrorLevel:
+ return "error"
+ case FatalLevel:
+ return "fatal"
+ case PanicLevel:
+ return "panic"
+ }
+
+ return "unknown"
+}
+
+// ParseLevel takes a string level and returns the Logrus log level constant.
+func ParseLevel(lvl string) (Level, error) {
+ switch strings.ToLower(lvl) {
+ case "panic":
+ return PanicLevel, nil
+ case "fatal":
+ return FatalLevel, nil
+ case "error":
+ return ErrorLevel, nil
+ case "warn", "warning":
+ return WarnLevel, nil
+ case "info":
+ return InfoLevel, nil
+ case "debug":
+ return DebugLevel, nil
+ }
+
+ var l Level
+ return l, fmt.Errorf("not a valid logrus Level: %q", lvl)
+}
+
+// A constant exposing all logging levels
+var AllLevels = []Level{
+ PanicLevel,
+ FatalLevel,
+ ErrorLevel,
+ WarnLevel,
+ InfoLevel,
+ DebugLevel,
+}
+
+// These are the different logging levels. You can set the logging level to log
+// on your instance of logger, obtained with `logrus.New()`.
+const (
+ // PanicLevel level, highest level of severity. Logs and then calls panic with the
+ // message passed to Debug, Info, ...
+ PanicLevel Level = iota
+ // FatalLevel level. Logs and then calls `os.Exit(1)`. It will exit even if the
+ // logging level is set to Panic.
+ FatalLevel
+ // ErrorLevel level. Logs. Used for errors that should definitely be noted.
+ // Commonly used for hooks to send errors to an error tracking service.
+ ErrorLevel
+ // WarnLevel level. Non-critical entries that deserve eyes.
+ WarnLevel
+ // InfoLevel level. General operational entries about what's going on inside the
+ // application.
+ InfoLevel
+ // DebugLevel level. Usually only enabled when debugging. Very verbose logging.
+ DebugLevel
+)
+
+// Won't compile if StdLogger can't be realized by a log.Logger
+var (
+ _ StdLogger = &log.Logger{}
+ _ StdLogger = &Entry{}
+ _ StdLogger = &Logger{}
+)
+
+// StdLogger is what your logrus-enabled library should take, that way
+// it'll accept a stdlib logger and a logrus logger. There's no standard
+// interface, this is the closest we get, unfortunately.
+type StdLogger interface {
+ Print(...interface{})
+ Printf(string, ...interface{})
+ Println(...interface{})
+
+ Fatal(...interface{})
+ Fatalf(string, ...interface{})
+ Fatalln(...interface{})
+
+ Panic(...interface{})
+ Panicf(string, ...interface{})
+ Panicln(...interface{})
+}
+
+// The FieldLogger interface generalizes the Entry and Logger types
+type FieldLogger interface {
+ WithField(key string, value interface{}) *Entry
+ WithFields(fields Fields) *Entry
+ WithError(err error) *Entry
+
+ Debugf(format string, args ...interface{})
+ Infof(format string, args ...interface{})
+ Printf(format string, args ...interface{})
+ Warnf(format string, args ...interface{})
+ Warningf(format string, args ...interface{})
+ Errorf(format string, args ...interface{})
+ Fatalf(format string, args ...interface{})
+ Panicf(format string, args ...interface{})
+
+ Debug(args ...interface{})
+ Info(args ...interface{})
+ Print(args ...interface{})
+ Warn(args ...interface{})
+ Warning(args ...interface{})
+ Error(args ...interface{})
+ Fatal(args ...interface{})
+ Panic(args ...interface{})
+
+ Debugln(args ...interface{})
+ Infoln(args ...interface{})
+ Println(args ...interface{})
+ Warnln(args ...interface{})
+ Warningln(args ...interface{})
+ Errorln(args ...interface{})
+ Fatalln(args ...interface{})
+ Panicln(args ...interface{})
+}
diff --git a/vendor/github.com/Sirupsen/logrus/terminal_appengine.go b/vendor/github.com/Sirupsen/logrus/terminal_appengine.go
new file mode 100644
index 0000000000..1960169ef2
--- /dev/null
+++ b/vendor/github.com/Sirupsen/logrus/terminal_appengine.go
@@ -0,0 +1,8 @@
+// +build appengine
+
+package logrus
+
+// IsTerminal returns true if stderr's file descriptor is a terminal.
+func IsTerminal() bool {
+ return true
+}
diff --git a/vendor/github.com/Sirupsen/logrus/terminal_bsd.go b/vendor/github.com/Sirupsen/logrus/terminal_bsd.go
new file mode 100644
index 0000000000..5f6be4d3c0
--- /dev/null
+++ b/vendor/github.com/Sirupsen/logrus/terminal_bsd.go
@@ -0,0 +1,10 @@
+// +build darwin freebsd openbsd netbsd dragonfly
+// +build !appengine
+
+package logrus
+
+import "syscall"
+
+const ioctlReadTermios = syscall.TIOCGETA
+
+type Termios syscall.Termios
diff --git a/vendor/github.com/Sirupsen/logrus/terminal_linux.go b/vendor/github.com/Sirupsen/logrus/terminal_linux.go
new file mode 100644
index 0000000000..308160ca80
--- /dev/null
+++ b/vendor/github.com/Sirupsen/logrus/terminal_linux.go
@@ -0,0 +1,14 @@
+// Based on ssh/terminal:
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+
+package logrus
+
+import "syscall"
+
+const ioctlReadTermios = syscall.TCGETS
+
+type Termios syscall.Termios
diff --git a/vendor/github.com/Sirupsen/logrus/terminal_notwindows.go b/vendor/github.com/Sirupsen/logrus/terminal_notwindows.go
new file mode 100644
index 0000000000..329038f6ca
--- /dev/null
+++ b/vendor/github.com/Sirupsen/logrus/terminal_notwindows.go
@@ -0,0 +1,22 @@
+// Based on ssh/terminal:
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build linux darwin freebsd openbsd netbsd dragonfly
+// +build !appengine
+
+package logrus
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+// IsTerminal returns true if stderr's file descriptor is a terminal.
+func IsTerminal() bool {
+ fd := syscall.Stderr
+ var termios Termios
+ _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0)
+ return err == 0
+}
diff --git a/vendor/github.com/Sirupsen/logrus/terminal_solaris.go b/vendor/github.com/Sirupsen/logrus/terminal_solaris.go
new file mode 100644
index 0000000000..a3c6f6e7df
--- /dev/null
+++ b/vendor/github.com/Sirupsen/logrus/terminal_solaris.go
@@ -0,0 +1,15 @@
+// +build solaris,!appengine
+
+package logrus
+
+import (
+ "os"
+
+ "golang.org/x/sys/unix"
+)
+
+// IsTerminal returns true if the given file descriptor is a terminal.
+func IsTerminal() bool {
+ _, err := unix.IoctlGetTermios(int(os.Stdout.Fd()), unix.TCGETA)
+ return err == nil
+}
diff --git a/vendor/github.com/Sirupsen/logrus/terminal_windows.go b/vendor/github.com/Sirupsen/logrus/terminal_windows.go
new file mode 100644
index 0000000000..3727e8adfb
--- /dev/null
+++ b/vendor/github.com/Sirupsen/logrus/terminal_windows.go
@@ -0,0 +1,27 @@
+// Based on ssh/terminal:
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build windows,!appengine
+
+package logrus
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+var kernel32 = syscall.NewLazyDLL("kernel32.dll")
+
+var (
+ procGetConsoleMode = kernel32.NewProc("GetConsoleMode")
+)
+
+// IsTerminal returns true if stderr's file descriptor is a terminal.
+func IsTerminal() bool {
+ fd := syscall.Stderr
+ var st uint32
+ r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0)
+ return r != 0 && e == 0
+}
diff --git a/vendor/github.com/Sirupsen/logrus/text_formatter.go b/vendor/github.com/Sirupsen/logrus/text_formatter.go
new file mode 100644
index 0000000000..9114b3ca47
--- /dev/null
+++ b/vendor/github.com/Sirupsen/logrus/text_formatter.go
@@ -0,0 +1,168 @@
+package logrus
+
+import (
+ "bytes"
+ "fmt"
+ "runtime"
+ "sort"
+ "strings"
+ "time"
+)
+
+const (
+ nocolor = 0
+ red = 31
+ green = 32
+ yellow = 33
+ blue = 34
+ gray = 37
+)
+
+var (
+ baseTimestamp time.Time
+ isTerminal bool
+)
+
+func init() {
+ baseTimestamp = time.Now()
+ isTerminal = IsTerminal()
+}
+
+func miniTS() int {
+ return int(time.Since(baseTimestamp) / time.Second)
+}
+
+type TextFormatter struct {
+ // Set to true to bypass checking for a TTY before outputting colors.
+ ForceColors bool
+
+ // Force disabling colors.
+ DisableColors bool
+
+ // Disable timestamp logging. useful when output is redirected to logging
+ // system that already adds timestamps.
+ DisableTimestamp bool
+
+ // Enable logging the full timestamp when a TTY is attached instead of just
+ // the time passed since beginning of execution.
+ FullTimestamp bool
+
+ // TimestampFormat to use for display when a full timestamp is printed
+ TimestampFormat string
+
+ // The fields are sorted by default for a consistent output. For applications
+ // that log extremely frequently and don't use the JSON formatter this may not
+ // be desired.
+ DisableSorting bool
+}
+
+func (f *TextFormatter) Format(entry *Entry) ([]byte, error) {
+ var b *bytes.Buffer
+ var keys []string = make([]string, 0, len(entry.Data))
+ for k := range entry.Data {
+ keys = append(keys, k)
+ }
+
+ if !f.DisableSorting {
+ sort.Strings(keys)
+ }
+ if entry.Buffer != nil {
+ b = entry.Buffer
+ } else {
+ b = &bytes.Buffer{}
+ }
+
+ prefixFieldClashes(entry.Data)
+
+ isColorTerminal := isTerminal && (runtime.GOOS != "windows")
+ isColored := (f.ForceColors || isColorTerminal) && !f.DisableColors
+
+ timestampFormat := f.TimestampFormat
+ if timestampFormat == "" {
+ timestampFormat = DefaultTimestampFormat
+ }
+ if isColored {
+ f.printColored(b, entry, keys, timestampFormat)
+ } else {
+ if !f.DisableTimestamp {
+ f.appendKeyValue(b, "time", entry.Time.Format(timestampFormat))
+ }
+ f.appendKeyValue(b, "level", entry.Level.String())
+ if entry.Message != "" {
+ f.appendKeyValue(b, "msg", entry.Message)
+ }
+ for _, key := range keys {
+ f.appendKeyValue(b, key, entry.Data[key])
+ }
+ }
+
+ b.WriteByte('\n')
+ return b.Bytes(), nil
+}
+
+func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []string, timestampFormat string) {
+ var levelColor int
+ switch entry.Level {
+ case DebugLevel:
+ levelColor = gray
+ case WarnLevel:
+ levelColor = yellow
+ case ErrorLevel, FatalLevel, PanicLevel:
+ levelColor = red
+ default:
+ levelColor = blue
+ }
+
+ levelText := strings.ToUpper(entry.Level.String())[0:4]
+
+ if !f.FullTimestamp {
+ fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d] %-44s ", levelColor, levelText, miniTS(), entry.Message)
+ } else {
+ fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s] %-44s ", levelColor, levelText, entry.Time.Format(timestampFormat), entry.Message)
+ }
+ for _, k := range keys {
+ v := entry.Data[k]
+ fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=", levelColor, k)
+ f.appendValue(b, v)
+ }
+}
+
+func needsQuoting(text string) bool {
+ for _, ch := range text {
+ if !((ch >= 'a' && ch <= 'z') ||
+ (ch >= 'A' && ch <= 'Z') ||
+ (ch >= '0' && ch <= '9') ||
+ ch == '-' || ch == '.') {
+ return true
+ }
+ }
+ return false
+}
+
+func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key string, value interface{}) {
+
+ b.WriteString(key)
+ b.WriteByte('=')
+ f.appendValue(b, value)
+ b.WriteByte(' ')
+}
+
+func (f *TextFormatter) appendValue(b *bytes.Buffer, value interface{}) {
+ switch value := value.(type) {
+ case string:
+ if !needsQuoting(value) {
+ b.WriteString(value)
+ } else {
+ fmt.Fprintf(b, "%q", value)
+ }
+ case error:
+ errmsg := value.Error()
+ if !needsQuoting(errmsg) {
+ b.WriteString(errmsg)
+ } else {
+ fmt.Fprintf(b, "%q", errmsg)
+ }
+ default:
+ fmt.Fprint(b, value)
+ }
+}
diff --git a/vendor/github.com/Sirupsen/logrus/writer.go b/vendor/github.com/Sirupsen/logrus/writer.go
new file mode 100644
index 0000000000..f74d2aa5fc
--- /dev/null
+++ b/vendor/github.com/Sirupsen/logrus/writer.go
@@ -0,0 +1,53 @@
+package logrus
+
+import (
+ "bufio"
+ "io"
+ "runtime"
+)
+
+func (logger *Logger) Writer() *io.PipeWriter {
+ return logger.WriterLevel(InfoLevel)
+}
+
+func (logger *Logger) WriterLevel(level Level) *io.PipeWriter {
+ reader, writer := io.Pipe()
+
+ var printFunc func(args ...interface{})
+ switch level {
+ case DebugLevel:
+ printFunc = logger.Debug
+ case InfoLevel:
+ printFunc = logger.Info
+ case WarnLevel:
+ printFunc = logger.Warn
+ case ErrorLevel:
+ printFunc = logger.Error
+ case FatalLevel:
+ printFunc = logger.Fatal
+ case PanicLevel:
+ printFunc = logger.Panic
+ default:
+ printFunc = logger.Print
+ }
+
+ go logger.writerScanner(reader, printFunc)
+ runtime.SetFinalizer(writer, writerFinalizer)
+
+ return writer
+}
+
+func (logger *Logger) writerScanner(reader *io.PipeReader, printFunc func(args ...interface{})) {
+ scanner := bufio.NewScanner(reader)
+ for scanner.Scan() {
+ printFunc(scanner.Text())
+ }
+ if err := scanner.Err(); err != nil {
+ logger.Errorf("Error while reading from Writer: %s", err)
+ }
+ reader.Close()
+}
+
+func writerFinalizer(writer *io.PipeWriter) {
+ writer.Close()
+}
diff --git a/vendor/github.com/cloudflare/cfssl/LICENSE b/vendor/github.com/cloudflare/cfssl/LICENSE
new file mode 100644
index 0000000000..bc5841fa55
--- /dev/null
+++ b/vendor/github.com/cloudflare/cfssl/LICENSE
@@ -0,0 +1,24 @@
+Copyright (c) 2014 CloudFlare Inc.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+
+Redistributions of source code must retain the above copyright notice,
+this list of conditions and the following disclaimer.
+
+Redistributions in binary form must reproduce the above copyright notice,
+this list of conditions and the following disclaimer in the documentation
+and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/cloudflare/cfssl/api/api.go b/vendor/github.com/cloudflare/cfssl/api/api.go
new file mode 100644
index 0000000000..a3b8a0cd0c
--- /dev/null
+++ b/vendor/github.com/cloudflare/cfssl/api/api.go
@@ -0,0 +1,235 @@
+// Package api implements an HTTP-based API and server for CFSSL.
+package api
+
+import (
+ "encoding/json"
+ "io/ioutil"
+ "net/http"
+
+ "github.com/cloudflare/cfssl/errors"
+ "github.com/cloudflare/cfssl/log"
+)
+
+// Handler is an interface providing a generic mechanism for handling HTTP requests.
+type Handler interface {
+ Handle(w http.ResponseWriter, r *http.Request) error
+}
+
+// HTTPHandler is a wrapper that encapsulates Handler interface as http.Handler.
+// HTTPHandler also enforces that the Handler only responds to requests with registered HTTP methods.
+type HTTPHandler struct {
+ Handler // CFSSL handler
+ Methods []string // The associated HTTP methods
+}
+
+// HandlerFunc is similar to the http.HandlerFunc type; it serves as
+// an adapter allowing the use of ordinary functions as Handlers. If
+// f is a function with the appropriate signature, HandlerFunc(f) is a
+// Handler object that calls f.
+type HandlerFunc func(http.ResponseWriter, *http.Request) error
+
+// Handle calls f(w, r)
+func (f HandlerFunc) Handle(w http.ResponseWriter, r *http.Request) error {
+ w.Header().Set("Content-Type", "application/json")
+ return f(w, r)
+}
+
+func HandleError(w http.ResponseWriter, err error) (code int) {
+ return handleError(w,err)
+}
+
+// handleError is the centralised error handling and reporting.
+func handleError(w http.ResponseWriter, err error) (code int) {
+ if err == nil {
+ return http.StatusOK
+ }
+ msg := err.Error()
+ httpCode := http.StatusInternalServerError
+
+ // If it is recognized as HttpError emitted from cfssl,
+ // we rewrite the status code accordingly. If it is a
+ // cfssl error, set the http status to StatusBadRequest
+ switch err := err.(type) {
+ case *errors.HTTPError:
+ httpCode = err.StatusCode
+ code = err.StatusCode
+ case *errors.Error:
+ httpCode = http.StatusBadRequest
+ code = err.ErrorCode
+ msg = err.Message
+ }
+
+ response := NewErrorResponse(msg, code)
+ jsonMessage, err := json.Marshal(response)
+ if err != nil {
+ log.Errorf("Failed to marshal JSON: %v", err)
+ } else {
+ msg = string(jsonMessage)
+ }
+ http.Error(w, msg, httpCode)
+ return code
+}
+
+// ServeHTTP encapsulates the call to underlying Handler to handle the request
+// and return the response with proper HTTP status code
+func (h HTTPHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+ var err error
+ var match bool
+ // Throw 405 when requested with an unsupported verb.
+ for _, m := range h.Methods {
+ if m == r.Method {
+ match = true
+ }
+ }
+ if match {
+ err = h.Handle(w, r)
+ } else {
+ err = errors.NewMethodNotAllowed(r.Method)
+ }
+ status := handleError(w, err)
+ log.Infof("%s - \"%s %s\" %d", r.RemoteAddr, r.Method, r.URL, status)
+}
+
+// readRequestBlob takes a JSON-blob-encoded response body in the form
+// map[string]string and returns it, the list of keywords presented,
+// and any error that occurred.
+func readRequestBlob(r *http.Request) (map[string]string, error) {
+ var blob map[string]string
+
+ body, err := ioutil.ReadAll(r.Body)
+ if err != nil {
+ return nil, err
+ }
+ r.Body.Close()
+
+ err = json.Unmarshal(body, &blob)
+ if err != nil {
+ return nil, err
+ }
+ return blob, nil
+}
+
+// ProcessRequestOneOf reads a JSON blob for the request and makes
+// sure it contains one of a set of keywords. For example, a request
+// might have the ('foo' && 'bar') keys, OR it might have the 'baz'
+// key. In either case, we want to accept the request; however, if
+// none of these sets shows up, the request is a bad request, and it
+// should be returned.
+func ProcessRequestOneOf(r *http.Request, keywordSets [][]string) (map[string]string, []string, error) {
+ blob, err := readRequestBlob(r)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ var matched []string
+ for _, set := range keywordSets {
+ if matchKeywords(blob, set) {
+ if matched != nil {
+ return nil, nil, errors.NewBadRequestString("mismatched parameters")
+ }
+ matched = set
+ }
+ }
+ if matched == nil {
+ return nil, nil, errors.NewBadRequestString("no valid parameter sets found")
+ }
+ return blob, matched, nil
+}
+
+// ProcessRequestFirstMatchOf reads a JSON blob for the request and returns
+// the first match of a set of keywords. For example, a request
+// might have one of the following combinations: (foo=1, bar=2), (foo=1), and (bar=2)
+// By giving a specific ordering of those combinations, we could decide how to accept
+// the request.
+func ProcessRequestFirstMatchOf(r *http.Request, keywordSets [][]string) (map[string]string, []string, error) {
+ blob, err := readRequestBlob(r)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ for _, set := range keywordSets {
+ if matchKeywords(blob, set) {
+ return blob, set, nil
+ }
+ }
+ return nil, nil, errors.NewBadRequestString("no valid parameter sets found")
+}
+
+func matchKeywords(blob map[string]string, keywords []string) bool {
+ for _, keyword := range keywords {
+ if _, ok := blob[keyword]; !ok {
+ return false
+ }
+ }
+ return true
+}
+
+// ResponseMessage implements the standard for response errors and
+// messages. A message has a code and a string message.
+type ResponseMessage struct {
+ Code int `json:"code"`
+ Message string `json:"message"`
+}
+
+// Response implements the CloudFlare standard for API
+// responses.
+type Response struct {
+ Success bool `json:"success"`
+ Result interface{} `json:"result"`
+ Errors []ResponseMessage `json:"errors"`
+ Messages []ResponseMessage `json:"messages"`
+}
+
+// NewSuccessResponse is a shortcut for creating new successul API
+// responses.
+func NewSuccessResponse(result interface{}) Response {
+ return Response{
+ Success: true,
+ Result: result,
+ Errors: []ResponseMessage{},
+ Messages: []ResponseMessage{},
+ }
+}
+
+// NewSuccessResponseWithMessage is a shortcut for creating new successul API
+// responses that includes a message.
+func NewSuccessResponseWithMessage(result interface{}, message string, code int) Response {
+ return Response{
+ Success: true,
+ Result: result,
+ Errors: []ResponseMessage{},
+ Messages: []ResponseMessage{{code, message}},
+ }
+}
+
+// NewErrorResponse is a shortcut for creating an error response for a
+// single error.
+func NewErrorResponse(message string, code int) Response {
+ return Response{
+ Success: false,
+ Result: nil,
+ Errors: []ResponseMessage{{code, message}},
+ Messages: []ResponseMessage{},
+ }
+}
+
+// SendResponse builds a response from the result, sets the JSON
+// header, and writes to the http.ResponseWriter.
+func SendResponse(w http.ResponseWriter, result interface{}) error {
+ response := NewSuccessResponse(result)
+ w.Header().Set("Content-Type", "application/json")
+ enc := json.NewEncoder(w)
+ err := enc.Encode(response)
+ return err
+}
+
+// SendResponseWithMessage builds a response from the result and the
+// provided message, sets the JSON header, and writes to the
+// http.ResponseWriter.
+func SendResponseWithMessage(w http.ResponseWriter, result interface{}, message string, code int) error {
+ response := NewSuccessResponseWithMessage(result, message, code)
+ w.Header().Set("Content-Type", "application/json")
+ enc := json.NewEncoder(w)
+ err := enc.Encode(response)
+ return err
+}
diff --git a/vendor/github.com/cloudflare/cfssl/api/client/api.go b/vendor/github.com/cloudflare/cfssl/api/client/api.go
new file mode 100644
index 0000000000..22abf87774
--- /dev/null
+++ b/vendor/github.com/cloudflare/cfssl/api/client/api.go
@@ -0,0 +1,6 @@
+package client
+
+// SignResult is the result of signing a CSR.
+type SignResult struct {
+ Certificate []byte `json:"certificate"`
+}
diff --git a/vendor/github.com/cloudflare/cfssl/api/client/client.go b/vendor/github.com/cloudflare/cfssl/api/client/client.go
new file mode 100644
index 0000000000..3fe4479574
--- /dev/null
+++ b/vendor/github.com/cloudflare/cfssl/api/client/client.go
@@ -0,0 +1,332 @@
+// Package client implements the a Go client for CFSSL API commands.
+package client
+
+import (
+ "bytes"
+ "crypto/tls"
+ "encoding/json"
+ stderr "errors"
+ "fmt"
+ "io/ioutil"
+ "net"
+ "net/http"
+ "net/url"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/cloudflare/cfssl/api"
+ "github.com/cloudflare/cfssl/auth"
+ "github.com/cloudflare/cfssl/errors"
+ "github.com/cloudflare/cfssl/info"
+ "github.com/cloudflare/cfssl/log"
+)
+
+// A server points to a single remote CFSSL instance.
+type server struct {
+ URL string
+ TLSConfig *tls.Config
+ reqModifier func(*http.Request, []byte)
+}
+
+// A Remote points to at least one (but possibly multiple) remote
+// CFSSL instances. It must be able to perform a authenticated and
+// unauthenticated certificate signing requests, return information
+// about the CA on the other end, and return a list of the hosts that
+// are used by the remote.
+type Remote interface {
+ AuthSign(req, id []byte, provider auth.Provider) ([]byte, error)
+ Sign(jsonData []byte) ([]byte, error)
+ Info(jsonData []byte) (*info.Resp, error)
+ Hosts() []string
+ SetReqModifier(func(*http.Request, []byte))
+}
+
+// NewServer sets up a new server target. The address should be of
+// The format [protocol:]name[:port] of the remote CFSSL instance.
+// If no protocol is given http is default. If no port
+// is specified, the CFSSL default port (8888) is used. If the name is
+// a comma-separated list of hosts, an ordered group will be returned.
+func NewServer(addr string) Remote {
+ return NewServerTLS(addr, nil)
+}
+
+// NewServerTLS is the TLS version of NewServer
+func NewServerTLS(addr string, tlsConfig *tls.Config) Remote {
+ addrs := strings.Split(addr, ",")
+
+ var remote Remote
+
+ if len(addrs) > 1 {
+ remote, _ = NewGroup(addrs, tlsConfig, StrategyOrderedList)
+ } else {
+ u, err := normalizeURL(addrs[0])
+ if err != nil {
+ return nil
+ }
+ srv, _ := newServer(u, tlsConfig)
+ if srv != nil {
+ remote = srv
+ }
+ }
+ return remote
+}
+
+func (srv *server) Hosts() []string {
+ return []string{srv.URL}
+}
+
+func (srv *server) SetReqModifier(mod func(*http.Request, []byte)) {
+ srv.reqModifier = mod
+}
+
+func newServer(u *url.URL, tlsConfig *tls.Config) (*server, error) {
+ URL := u.String()
+ return &server{URL, tlsConfig, nil}, nil
+}
+
+func (srv *server) getURL(endpoint string) string {
+ return fmt.Sprintf("%s/api/v1/cfssl/%s", srv.URL, endpoint)
+}
+
+func (srv *server) createTLSTransport() (transport *http.Transport) {
+ // Setup HTTPS client
+ tlsConfig := srv.TLSConfig
+ tlsConfig.BuildNameToCertificate()
+ return &http.Transport{TLSClientConfig: tlsConfig}
+}
+
+// post connects to the remote server and returns a Response struct
+func (srv *server) post(url string, jsonData []byte) (*api.Response, error) {
+ var resp *http.Response
+ var err error
+ client := &http.Client{}
+ if srv.TLSConfig != nil {
+ client.Transport = srv.createTLSTransport()
+ }
+ req, err := http.NewRequest("POST", url, bytes.NewReader(jsonData))
+ if err != nil {
+ err = fmt.Errorf("failed POST to %s: %v", url, err)
+ return nil, errors.Wrap(errors.APIClientError, errors.ClientHTTPError, err)
+ }
+ req.Header.Set("content-type", "application/json")
+ if srv.reqModifier != nil {
+ srv.reqModifier(req, jsonData)
+ }
+ resp, err = client.Do(req)
+ if err != nil {
+ err = fmt.Errorf("failed POST to %s: %v", url, err)
+ return nil, errors.Wrap(errors.APIClientError, errors.ClientHTTPError, err)
+ }
+ body, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return nil, errors.Wrap(errors.APIClientError, errors.IOError, err)
+ }
+ resp.Body.Close()
+
+ if resp.StatusCode != http.StatusOK {
+ log.Errorf("http error with %s", url)
+ return nil, errors.Wrap(errors.APIClientError, errors.ClientHTTPError, stderr.New(string(body)))
+ }
+
+ var response api.Response
+ err = json.Unmarshal(body, &response)
+ if err != nil {
+ log.Debug("Unable to parse response body:", string(body))
+ return nil, errors.Wrap(errors.APIClientError, errors.JSONError, err)
+ }
+
+ if !response.Success || response.Result == nil {
+ if len(response.Errors) > 0 {
+ return nil, errors.Wrap(errors.APIClientError, errors.ServerRequestFailed, stderr.New(response.Errors[0].Message))
+ }
+ return nil, errors.New(errors.APIClientError, errors.ServerRequestFailed)
+ }
+
+ return &response, nil
+}
+
+// AuthSign fills out an authenticated signing request to the server,
+// receiving a certificate or error in response.
+// It takes the serialized JSON request to send, remote address and
+// authentication provider.
+func (srv *server) AuthSign(req, id []byte, provider auth.Provider) ([]byte, error) {
+ return srv.authReq(req, id, provider, "sign")
+}
+
+// AuthInfo fills out an authenticated info request to the server,
+// receiving a certificate or error in response.
+// It takes the serialized JSON request to send, remote address and
+// authentication provider.
+func (srv *server) AuthInfo(req, id []byte, provider auth.Provider) ([]byte, error) {
+ return srv.authReq(req, id, provider, "info")
+}
+
+// authReq is the common logic for AuthSign and AuthInfo -- perform the given
+// request, and return the resultant certificate.
+// The target is either 'sign' or 'info'.
+func (srv *server) authReq(req, ID []byte, provider auth.Provider, target string) ([]byte, error) {
+ url := srv.getURL("auth" + target)
+
+ token, err := provider.Token(req)
+ if err != nil {
+ return nil, errors.Wrap(errors.APIClientError, errors.AuthenticationFailure, err)
+ }
+
+ aReq := &auth.AuthenticatedRequest{
+ Timestamp: time.Now().Unix(),
+ RemoteAddress: ID,
+ Token: token,
+ Request: req,
+ }
+
+ jsonData, err := json.Marshal(aReq)
+ if err != nil {
+ return nil, errors.Wrap(errors.APIClientError, errors.JSONError, err)
+ }
+
+ response, err := srv.post(url, jsonData)
+ if err != nil {
+ return nil, err
+ }
+
+ result, ok := response.Result.(map[string]interface{})
+ if !ok {
+ return nil, errors.New(errors.APIClientError, errors.JSONError)
+ }
+
+ cert, ok := result["certificate"].(string)
+ if !ok {
+ return nil, errors.New(errors.APIClientError, errors.JSONError)
+ }
+
+ return []byte(cert), nil
+}
+
+// Sign sends a signature request to the remote CFSSL server,
+// receiving a signed certificate or an error in response.
+// It takes the serialized JSON request to send.
+func (srv *server) Sign(jsonData []byte) ([]byte, error) {
+ return srv.request(jsonData, "sign")
+}
+
+// Info sends an info request to the remote CFSSL server, receiving a
+// response or an error in response.
+// It takes the serialized JSON request to send.
+func (srv *server) Info(jsonData []byte) (*info.Resp, error) {
+ res, err := srv.getResultMap(jsonData, "info")
+ if err != nil {
+ return nil, err
+ }
+
+ info := new(info.Resp)
+
+ if val, ok := res["certificate"]; ok {
+ info.Certificate = val.(string)
+ }
+ var usages []interface{}
+ if val, ok := res["usages"]; ok && val != nil {
+ usages = val.([]interface{})
+ }
+ if val, ok := res["expiry"]; ok && val != nil {
+ info.ExpiryString = val.(string)
+ }
+
+ info.Usage = make([]string, len(usages))
+ for i, s := range usages {
+ info.Usage[i] = s.(string)
+ }
+
+ return info, nil
+}
+
+func (srv *server) getResultMap(jsonData []byte, target string) (result map[string]interface{}, err error) {
+ url := srv.getURL(target)
+ response, err := srv.post(url, jsonData)
+ if err != nil {
+ return
+ }
+ result, ok := response.Result.(map[string]interface{})
+ if !ok {
+ err = errors.Wrap(errors.APIClientError, errors.ClientHTTPError, stderr.New("response is formatted improperly"))
+ return
+ }
+ return
+}
+
+// request performs the common logic for Sign and Info, performing the actual
+// request and returning the resultant certificate.
+func (srv *server) request(jsonData []byte, target string) ([]byte, error) {
+ result, err := srv.getResultMap(jsonData, target)
+ if err != nil {
+ return nil, err
+ }
+ cert := result["certificate"].(string)
+ if cert != "" {
+ return []byte(cert), nil
+ }
+
+ return nil, errors.Wrap(errors.APIClientError, errors.ClientHTTPError, stderr.New("response doesn't contain certificate."))
+}
+
+// AuthRemote acts as a Remote with a default Provider for AuthSign.
+type AuthRemote struct {
+ Remote
+ provider auth.Provider
+}
+
+// NewAuthServer sets up a new auth server target with an addr
+// in the same format at NewServer and a default authentication provider to
+// use for Sign requests.
+func NewAuthServer(addr string, tlsConfig *tls.Config, provider auth.Provider) *AuthRemote {
+ return &AuthRemote{
+ Remote: NewServerTLS(addr, tlsConfig),
+ provider: provider,
+ }
+}
+
+// Sign is overloaded to perform an AuthSign request using the default auth provider.
+func (ar *AuthRemote) Sign(req []byte) ([]byte, error) {
+ return ar.AuthSign(req, nil, ar.provider)
+}
+
+// nomalizeURL checks for http/https protocol, appends "http" as default protocol if not defiend in url
+func normalizeURL(addr string) (*url.URL, error) {
+ addr = strings.TrimSpace(addr)
+
+ u, err := url.Parse(addr)
+ if err != nil {
+ return nil, err
+ }
+
+ if u.Opaque != "" {
+ u.Host = net.JoinHostPort(u.Scheme, u.Opaque)
+ u.Opaque = ""
+ } else if u.Path != "" && !strings.Contains(u.Path, ":") {
+ u.Host = net.JoinHostPort(u.Path, "8888")
+ u.Path = ""
+ } else if u.Scheme == "" {
+ u.Host = u.Path
+ u.Path = ""
+ }
+
+ if u.Scheme != "https" {
+ u.Scheme = "http"
+ }
+
+ _, port, err := net.SplitHostPort(u.Host)
+ if err != nil {
+ _, port, err = net.SplitHostPort(u.Host + ":8888")
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ if port != "" {
+ _, err = strconv.Atoi(port)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return u, nil
+}
diff --git a/vendor/github.com/cloudflare/cfssl/api/client/group.go b/vendor/github.com/cloudflare/cfssl/api/client/group.go
new file mode 100644
index 0000000000..ac2d22b21b
--- /dev/null
+++ b/vendor/github.com/cloudflare/cfssl/api/client/group.go
@@ -0,0 +1,118 @@
+package client
+
+import (
+ "crypto/tls"
+ "errors"
+ "net/http"
+ "strings"
+
+ "github.com/cloudflare/cfssl/auth"
+ "github.com/cloudflare/cfssl/info"
+)
+
+// Strategy is the means by which the server to use as a remote should
+// be selected.
+type Strategy int
+
+const (
+ // StrategyInvalid indicates any strategy that is unsupported
+ // or returned when no strategy is applicable.
+ StrategyInvalid = iota
+
+ // StrategyOrderedList is a sequential list of servers: if the
+ // first server cannot be reached, the next is used. The
+ // client will proceed in this manner until the list of
+ // servers is exhausted, and then an error is returned.
+ StrategyOrderedList
+)
+
+var strategyStrings = map[string]Strategy{
+ "ordered_list": StrategyOrderedList,
+}
+
+// StrategyFromString takes a string describing a
+func StrategyFromString(s string) Strategy {
+ s = strings.TrimSpace(strings.ToLower(s))
+ strategy, ok := strategyStrings[s]
+ if !ok {
+ return StrategyInvalid
+ }
+ return strategy
+}
+
+// NewGroup will use the collection of remotes specified with the
+// given strategy.
+func NewGroup(remotes []string, tlsConfig *tls.Config, strategy Strategy) (Remote, error) {
+ var servers = make([]*server, len(remotes))
+ for i := range remotes {
+ u, err := normalizeURL(remotes[i])
+ if err != nil {
+ return nil, err
+ }
+ servers[i], _ = newServer(u, tlsConfig)
+ }
+
+ switch strategy {
+ case StrategyOrderedList:
+ return newOrdererdListGroup(servers)
+ default:
+ return nil, errors.New("unrecognised strategy")
+ }
+}
+
+type orderedListGroup struct {
+ remotes []*server
+}
+
+func (g *orderedListGroup) Hosts() []string {
+ var hosts = make([]string, 0, len(g.remotes))
+ for _, srv := range g.remotes {
+ srvHosts := srv.Hosts()
+ hosts = append(hosts, srvHosts[0])
+ }
+ return hosts
+}
+
+func newOrdererdListGroup(remotes []*server) (Remote, error) {
+ return &orderedListGroup{
+ remotes: remotes,
+ }, nil
+}
+
+func (g *orderedListGroup) AuthSign(req, id []byte, provider auth.Provider) (resp []byte, err error) {
+ for i := range g.remotes {
+ resp, err = g.remotes[i].AuthSign(req, id, provider)
+ if err == nil {
+ return resp, nil
+ }
+ }
+
+ return nil, err
+}
+
+func (g *orderedListGroup) Sign(jsonData []byte) (resp []byte, err error) {
+ for i := range g.remotes {
+ resp, err = g.remotes[i].Sign(jsonData)
+ if err == nil {
+ return resp, nil
+ }
+ }
+
+ return nil, err
+}
+
+func (g *orderedListGroup) Info(jsonData []byte) (resp *info.Resp, err error) {
+ for i := range g.remotes {
+ resp, err = g.remotes[i].Info(jsonData)
+ if err == nil {
+ return resp, nil
+ }
+ }
+
+ return nil, err
+}
+
+// SetReqModifier does nothing because there is no request modifier for group
+func (g *orderedListGroup) SetReqModifier(mod func(*http.Request, []byte)) {
+ // noop
+}
diff --git a/vendor/github.com/cloudflare/cfssl/auth/auth.go b/vendor/github.com/cloudflare/cfssl/auth/auth.go
new file mode 100644
index 0000000000..ecd5e5fefd
--- /dev/null
+++ b/vendor/github.com/cloudflare/cfssl/auth/auth.go
@@ -0,0 +1,94 @@
+// Package auth implements an interface for providing CFSSL
+// authentication. This is meant to authenticate a client CFSSL to a
+// remote CFSSL in order to prevent unauthorised use of the signature
+// capabilities. This package provides both the interface and a
+// standard HMAC-based implementation.
+package auth
+
+import (
+ "crypto/hmac"
+ "crypto/sha256"
+ "encoding/hex"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "strings"
+)
+
+// An AuthenticatedRequest contains a request and authentication
+// token. The Provider may determine whether to validate the timestamp
+// and remote address.
+type AuthenticatedRequest struct {
+ // An Authenticator decides whether to use this field.
+ Timestamp int64 `json:"timestamp,omitempty"`
+ RemoteAddress []byte `json:"remote_address,omitempty"`
+ Token []byte `json:"token"`
+ Request []byte `json:"request"`
+}
+
+// A Provider can generate tokens from a request and verify a
+// request. The handling of additional authentication data (such as
+// the IP address) is handled by the concrete type, as is any
+// serialisation and state-keeping.
+type Provider interface {
+ Token(req []byte) (token []byte, err error)
+ Verify(aReq *AuthenticatedRequest) bool
+}
+
+// Standard implements an HMAC-SHA-256 authentication provider. It may
+// be supplied additional data at creation time that will be used as
+// request || additional-data with the HMAC.
+type Standard struct {
+ key []byte
+ ad []byte
+}
+
+// New generates a new standard authentication provider from the key
+// and additional data. The additional data will be used when
+// generating a new token.
+func New(key string, ad []byte) (*Standard, error) {
+ if splitKey := strings.SplitN(key, ":", 2); len(splitKey) == 2 {
+ switch splitKey[0] {
+ case "env":
+ key = os.Getenv(splitKey[1])
+ case "file":
+ data, err := ioutil.ReadFile(splitKey[1])
+ if err != nil {
+ return nil, err
+ }
+ key = string(data)
+ default:
+ return nil, fmt.Errorf("unknown key prefix: %s", splitKey[0])
+ }
+ }
+
+ keyBytes, err := hex.DecodeString(key)
+ if err != nil {
+ return nil, err
+ }
+
+ return &Standard{keyBytes, ad}, nil
+}
+
+// Token generates a new authentication token from the request.
+func (p Standard) Token(req []byte) (token []byte, err error) {
+ h := hmac.New(sha256.New, p.key)
+ h.Write(req)
+ h.Write(p.ad)
+ return h.Sum(nil), nil
+}
+
+// Verify determines whether an authenticated request is valid.
+func (p Standard) Verify(ad *AuthenticatedRequest) bool {
+ if ad == nil {
+ return false
+ }
+
+ // Standard token generation returns no error.
+ token, _ := p.Token(ad.Request)
+ if len(ad.Token) != len(token) {
+ return false
+ }
+
+ return hmac.Equal(token, ad.Token)
+}
diff --git a/vendor/github.com/cloudflare/cfssl/certdb/README.md b/vendor/github.com/cloudflare/cfssl/certdb/README.md
new file mode 100644
index 0000000000..fbd941e39c
--- /dev/null
+++ b/vendor/github.com/cloudflare/cfssl/certdb/README.md
@@ -0,0 +1,75 @@
+# certdb usage
+
+Using a database enables additional functionality for existing commands when a
+db config is provided:
+
+ - `sign` and `gencert` add a certificate to the certdb after signing it
+ - `serve` enables database functionality for the sign and revoke endpoints
+
+A database is required for the following:
+
+ - `revoke` marks certificates revoked in the database with an optional reason
+ - `ocsprefresh` refreshes the table of cached OCSP responses
+ - `ocspdump` outputs cached OCSP responses in a concatenated base64-encoded format
+
+## Setup/Migration
+
+This directory stores [goose](https://bitbucket.org/liamstask/goose/) db migration scripts for various DB backends.
+Currently supported:
+ - MySQL in mysql
+ - PostgreSQL in pg
+ - SQLite in sqlite
+
+### Get goose
+
+ go get bitbucket.org/liamstask/goose/cmd/goose
+
+### Use goose to start and terminate a MySQL DB
+To start a MySQL using goose:
+
+ goose -path $GOPATH/src/github.com/cloudflare/cfssl/certdb/mysql up
+
+To tear down a MySQL DB using goose
+
+ goose -path $GOPATH/src/github.com/cloudflare/cfssl/certdb/mysql down
+
+Note: the administration of MySQL DB is not included. We assume
+the databases being connected to are already created and access control
+is properly handled.
+
+### Use goose to start and terminate a PostgreSQL DB
+To start a PostgreSQL using goose:
+
+ goose -path $GOPATH/src/github.com/cloudflare/cfssl/certdb/pg up
+
+To tear down a PostgreSQL DB using goose
+
+ goose -path $GOPATH/src/github.com/cloudflare/cfssl/certdb/pg down
+
+Note: the administration of PostgreSQL DB is not included. We assume
+the databases being connected to are already created and access control
+is properly handled.
+
+### Use goose to start and terminate a SQLite DB
+To start a SQLite DB using goose:
+
+ goose -path $GOPATH/src/github.com/cloudflare/cfssl/certdb/sqlite up
+
+To tear down a SQLite DB using goose
+
+ goose -path $GOPATH/src/github.com/cloudflare/cfssl/certdb/sqlite down
+
+## CFSSL Configuration
+
+Several cfssl commands take a -db-config flag. Create a file with a
+JSON dictionary:
+
+ {"driver":"sqlite3","data_source":"certs.db"}
+
+or
+
+ {"driver":"postgres","data_source":"postgres://user:password@host/db"}
+
+or
+
+ {"driver":"mysql","data_source":"user:password@tcp(hostname:3306)/db?parseTime=true"}
diff --git a/vendor/github.com/cloudflare/cfssl/certdb/certdb.go b/vendor/github.com/cloudflare/cfssl/certdb/certdb.go
new file mode 100644
index 0000000000..96694f7685
--- /dev/null
+++ b/vendor/github.com/cloudflare/cfssl/certdb/certdb.go
@@ -0,0 +1,40 @@
+package certdb
+
+import (
+ "time"
+)
+
+// CertificateRecord encodes a certificate and its metadata
+// that will be recorded in a database.
+type CertificateRecord struct {
+ Serial string `db:"serial_number"`
+ AKI string `db:"authority_key_identifier"`
+ CALabel string `db:"ca_label"`
+ Status string `db:"status"`
+ Reason int `db:"reason"`
+ Expiry time.Time `db:"expiry"`
+ RevokedAt time.Time `db:"revoked_at"`
+ PEM string `db:"pem"`
+}
+
+// OCSPRecord encodes a OCSP response body and its metadata
+// that will be recorded in a database.
+type OCSPRecord struct {
+ Serial string `db:"serial_number"`
+ AKI string `db:"authority_key_identifier"`
+ Body string `db:"body"`
+ Expiry time.Time `db:"expiry"`
+}
+
+// Accessor abstracts the CRUD of certdb objects from a DB.
+type Accessor interface {
+ InsertCertificate(cr CertificateRecord) error
+ GetCertificate(serial, aki string) ([]CertificateRecord, error)
+ GetUnexpiredCertificates() ([]CertificateRecord, error)
+ RevokeCertificate(serial, aki string, reasonCode int) error
+ InsertOCSP(rr OCSPRecord) error
+ GetOCSP(serial, aki string) ([]OCSPRecord, error)
+ GetUnexpiredOCSPs() ([]OCSPRecord, error)
+ UpdateOCSP(serial, aki, body string, expiry time.Time) error
+ UpsertOCSP(serial, aki, body string, expiry time.Time) error
+}
diff --git a/vendor/github.com/cloudflare/cfssl/certdb/sql/database_accessor.go b/vendor/github.com/cloudflare/cfssl/certdb/sql/database_accessor.go
new file mode 100644
index 0000000000..0b0fc88e25
--- /dev/null
+++ b/vendor/github.com/cloudflare/cfssl/certdb/sql/database_accessor.go
@@ -0,0 +1,314 @@
+package sql
+
+import (
+ "errors"
+ "fmt"
+ "time"
+
+ "github.com/cloudflare/cfssl/certdb"
+ cferr "github.com/cloudflare/cfssl/errors"
+
+ "github.com/jmoiron/sqlx"
+ "github.com/kisielk/sqlstruct"
+)
+
+// Match to sqlx
+func init() {
+ sqlstruct.TagName = "db"
+}
+
+const (
+ insertSQL = `
+INSERT INTO certificates (serial_number, authority_key_identifier, ca_label, status, reason, expiry, revoked_at, pem)
+ VALUES (:serial_number, :authority_key_identifier, :ca_label, :status, :reason, :expiry, :revoked_at, :pem);`
+
+ selectSQL = `
+SELECT %s FROM certificates
+ WHERE (serial_number = ? AND authority_key_identifier = ?);`
+
+ selectAllUnexpiredSQL = `
+SELECT %s FROM certificates
+ WHERE CURRENT_TIMESTAMP < expiry;`
+
+ updateRevokeSQL = `
+UPDATE certificates
+ SET status='revoked', revoked_at=CURRENT_TIMESTAMP, reason=:reason
+ WHERE (serial_number = :serial_number AND authority_key_identifier = :authority_key_identifier);`
+
+ insertOCSPSQL = `
+INSERT INTO ocsp_responses (serial_number, authority_key_identifier, body, expiry)
+ VALUES (:serial_number, :authority_key_identifier, :body, :expiry);`
+
+ updateOCSPSQL = `
+UPDATE ocsp_responses
+ SET body = :body, expiry = :expiry
+ WHERE (serial_number = :serial_number AND authority_key_identifier = :authority_key_identifier);`
+
+ selectAllUnexpiredOCSPSQL = `
+SELECT %s FROM ocsp_responses
+ WHERE CURRENT_TIMESTAMP < expiry;`
+
+ selectOCSPSQL = `
+SELECT %s FROM ocsp_responses
+ WHERE (serial_number = ? AND authority_key_identifier = ?);`
+)
+
+// Accessor implements certdb.Accessor interface.
+type Accessor struct {
+ db *sqlx.DB
+}
+
+func wrapSQLError(err error) error {
+ if err != nil {
+ return cferr.Wrap(cferr.CertStoreError, cferr.Unknown, err)
+ }
+ return nil
+}
+
+func (d *Accessor) checkDB() error {
+ if d.db == nil {
+ return cferr.Wrap(cferr.CertStoreError, cferr.Unknown,
+ errors.New("unknown db object, please check SetDB method"))
+ }
+ return nil
+}
+
+// NewAccessor returns a new Accessor.
+func NewAccessor(db *sqlx.DB) *Accessor {
+ return &Accessor{db: db}
+}
+
+// SetDB changes the underlying sql.DB object Accessor is manipulating.
+func (d *Accessor) SetDB(db *sqlx.DB) {
+ d.db = db
+ return
+}
+
+// InsertCertificate puts a certdb.CertificateRecord into db.
+func (d *Accessor) InsertCertificate(cr certdb.CertificateRecord) error {
+ err := d.checkDB()
+ if err != nil {
+ return err
+ }
+
+ res, err := d.db.NamedExec(insertSQL, &certdb.CertificateRecord{
+ Serial: cr.Serial,
+ AKI: cr.AKI,
+ CALabel: cr.CALabel,
+ Status: cr.Status,
+ Reason: cr.Reason,
+ Expiry: cr.Expiry.UTC(),
+ RevokedAt: cr.RevokedAt.UTC(),
+ PEM: cr.PEM,
+ })
+ if err != nil {
+ return wrapSQLError(err)
+ }
+
+ numRowsAffected, err := res.RowsAffected()
+
+ if numRowsAffected == 0 {
+ return cferr.Wrap(cferr.CertStoreError, cferr.InsertionFailed, fmt.Errorf("failed to insert the certificate record"))
+ }
+
+ if numRowsAffected != 1 {
+ return wrapSQLError(fmt.Errorf("%d rows are affected, should be 1 row", numRowsAffected))
+ }
+
+ return err
+}
+
+// GetCertificate gets a certdb.CertificateRecord indexed by serial.
+func (d *Accessor) GetCertificate(serial, aki string) (crs []certdb.CertificateRecord, err error) {
+ err = d.checkDB()
+ if err != nil {
+ return nil, err
+ }
+
+ err = d.db.Select(&crs, fmt.Sprintf(d.db.Rebind(selectSQL), sqlstruct.Columns(certdb.CertificateRecord{})), serial, aki)
+ if err != nil {
+ return nil, wrapSQLError(err)
+ }
+
+ return crs, nil
+}
+
+// GetUnexpiredCertificates gets all unexpired certificate from db.
+func (d *Accessor) GetUnexpiredCertificates() (crs []certdb.CertificateRecord, err error) {
+ err = d.checkDB()
+ if err != nil {
+ return nil, err
+ }
+
+ err = d.db.Select(&crs, fmt.Sprintf(d.db.Rebind(selectAllUnexpiredSQL), sqlstruct.Columns(certdb.CertificateRecord{})))
+ if err != nil {
+ return nil, wrapSQLError(err)
+ }
+
+ return crs, nil
+}
+
+// RevokeCertificate updates a certificate with a given serial number and marks it revoked.
+func (d *Accessor) RevokeCertificate(serial, aki string, reasonCode int) error {
+ err := d.checkDB()
+ if err != nil {
+ return err
+ }
+
+ result, err := d.db.NamedExec(updateRevokeSQL, &certdb.CertificateRecord{
+ AKI: aki,
+ Reason: reasonCode,
+ Serial: serial,
+ })
+ if err != nil {
+ return wrapSQLError(err)
+ }
+
+ numRowsAffected, err := result.RowsAffected()
+
+ if numRowsAffected == 0 {
+ return cferr.Wrap(cferr.CertStoreError, cferr.RecordNotFound, fmt.Errorf("failed to revoke the certificate: certificate not found"))
+ }
+
+ if numRowsAffected != 1 {
+ return wrapSQLError(fmt.Errorf("%d rows are affected, should be 1 row", numRowsAffected))
+ }
+
+ return err
+}
+
+// InsertOCSP puts a new certdb.OCSPRecord into the db.
+func (d *Accessor) InsertOCSP(rr certdb.OCSPRecord) error {
+ err := d.checkDB()
+ if err != nil {
+ return err
+ }
+
+ result, err := d.db.NamedExec(insertOCSPSQL, &certdb.OCSPRecord{
+ AKI: rr.AKI,
+ Body: rr.Body,
+ Expiry: rr.Expiry.UTC(),
+ Serial: rr.Serial,
+ })
+ if err != nil {
+ return wrapSQLError(err)
+ }
+
+ numRowsAffected, err := result.RowsAffected()
+
+ if numRowsAffected == 0 {
+ return cferr.Wrap(cferr.CertStoreError, cferr.InsertionFailed, fmt.Errorf("failed to insert the OCSP record"))
+ }
+
+ if numRowsAffected != 1 {
+ return wrapSQLError(fmt.Errorf("%d rows are affected, should be 1 row", numRowsAffected))
+ }
+
+ return err
+}
+
+// GetOCSP retrieves a certdb.OCSPRecord from db by serial.
+func (d *Accessor) GetOCSP(serial, aki string) (ors []certdb.OCSPRecord, err error) {
+ err = d.checkDB()
+ if err != nil {
+ return nil, err
+ }
+
+ err = d.db.Select(&ors, fmt.Sprintf(d.db.Rebind(selectOCSPSQL), sqlstruct.Columns(certdb.OCSPRecord{})), serial, aki)
+ if err != nil {
+ return nil, wrapSQLError(err)
+ }
+
+ return ors, nil
+}
+
+// GetUnexpiredOCSPs retrieves all unexpired certdb.OCSPRecord from db.
+func (d *Accessor) GetUnexpiredOCSPs() (ors []certdb.OCSPRecord, err error) {
+ err = d.checkDB()
+ if err != nil {
+ return nil, err
+ }
+
+ err = d.db.Select(&ors, fmt.Sprintf(d.db.Rebind(selectAllUnexpiredOCSPSQL), sqlstruct.Columns(certdb.OCSPRecord{})))
+ if err != nil {
+ return nil, wrapSQLError(err)
+ }
+
+ return ors, nil
+}
+
+// UpdateOCSP updates a ocsp response record with a given serial number.
+func (d *Accessor) UpdateOCSP(serial, aki, body string, expiry time.Time) error {
+ err := d.checkDB()
+ if err != nil {
+ return err
+ }
+
+ result, err := d.db.NamedExec(updateOCSPSQL, &certdb.OCSPRecord{
+ AKI: aki,
+ Body: body,
+ Expiry: expiry.UTC(),
+ Serial: serial,
+ })
+ if err != nil {
+ return wrapSQLError(err)
+ }
+
+ numRowsAffected, err := result.RowsAffected()
+
+ if numRowsAffected == 0 {
+ return cferr.Wrap(cferr.CertStoreError, cferr.RecordNotFound, fmt.Errorf("failed to update the OCSP record"))
+ }
+
+ if numRowsAffected != 1 {
+ return wrapSQLError(fmt.Errorf("%d rows are affected, should be 1 row", numRowsAffected))
+ }
+
+ return err
+}
+
+// UpsertOCSP update a ocsp response record with a given serial number,
+// or insert the record if it doesn't yet exist in the db
+// Implementation note:
+// We didn't implement 'upsert' with SQL statement and we lost race condition
+// prevention provided by underlying DBMS.
+// Reasoning:
+// 1. it's diffcult to support multiple DBMS backends in the same time, the
+// SQL syntax differs from one to another.
+// 2. we don't need a strict simultaneous consistency between OCSP and certificate
+// status. It's OK that a OCSP response still shows 'good' while the
+// corresponding certificate is being revoked seconds ago, as long as the OCSP
+// response catches up to be eventually consistent (within hours to days).
+// Write race condition between OCSP writers on OCSP table is not a problem,
+// since we don't have write race condition on Certificate table and OCSP
+// writers should periodically use Certificate table to update OCSP table
+// to catch up.
+func (d *Accessor) UpsertOCSP(serial, aki, body string, expiry time.Time) error {
+ err := d.checkDB()
+ if err != nil {
+ return err
+ }
+
+ result, err := d.db.NamedExec(updateOCSPSQL, &certdb.OCSPRecord{
+ AKI: aki,
+ Body: body,
+ Expiry: expiry.UTC(),
+ Serial: serial,
+ })
+
+ if err != nil {
+ return wrapSQLError(err)
+ }
+
+ numRowsAffected, err := result.RowsAffected()
+
+ if numRowsAffected == 0 {
+ return d.InsertOCSP(certdb.OCSPRecord{Serial: serial, AKI: aki, Body: body, Expiry: expiry})
+ }
+
+ if numRowsAffected != 1 {
+ return wrapSQLError(fmt.Errorf("%d rows are affected, should be 1 row", numRowsAffected))
+ }
+
+ return err
+}
diff --git a/vendor/github.com/cloudflare/cfssl/config/config.go b/vendor/github.com/cloudflare/cfssl/config/config.go
new file mode 100644
index 0000000000..d5eb7c3760
--- /dev/null
+++ b/vendor/github.com/cloudflare/cfssl/config/config.go
@@ -0,0 +1,659 @@
+// Package config contains the configuration logic for CFSSL.
+package config
+
+import (
+ "crypto/tls"
+ "crypto/x509"
+ "encoding/asn1"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io/ioutil"
+ "regexp"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/cloudflare/cfssl/auth"
+ cferr "github.com/cloudflare/cfssl/errors"
+ "github.com/cloudflare/cfssl/helpers"
+ "github.com/cloudflare/cfssl/log"
+ ocspConfig "github.com/cloudflare/cfssl/ocsp/config"
+)
+
+// A CSRWhitelist stores booleans for fields in the CSR. If a CSRWhitelist is
+// not present in a SigningProfile, all of these fields may be copied from the
+// CSR into the signed certificate. If a CSRWhitelist *is* present in a
+// SigningProfile, only those fields with a `true` value in the CSRWhitelist may
+// be copied from the CSR to the signed certificate. Note that some of these
+// fields, like Subject, can be provided or partially provided through the API.
+// Since API clients are expected to be trusted, but CSRs are not, fields
+// provided through the API are not subject to whitelisting through this
+// mechanism.
+type CSRWhitelist struct {
+ Subject, PublicKeyAlgorithm, PublicKey, SignatureAlgorithm bool
+ DNSNames, IPAddresses, EmailAddresses bool
+}
+
+// OID is our own version of asn1's ObjectIdentifier, so we can define a custom
+// JSON marshal / unmarshal.
+type OID asn1.ObjectIdentifier
+
+// CertificatePolicy represents the ASN.1 PolicyInformation structure from
+// https://tools.ietf.org/html/rfc3280.html#page-106.
+// Valid values of Type are "id-qt-unotice" and "id-qt-cps"
+type CertificatePolicy struct {
+ ID OID
+ Qualifiers []CertificatePolicyQualifier
+}
+
+// CertificatePolicyQualifier represents a single qualifier from an ASN.1
+// PolicyInformation structure.
+type CertificatePolicyQualifier struct {
+ Type string
+ Value string
+}
+
+// AuthRemote is an authenticated remote signer.
+type AuthRemote struct {
+ RemoteName string `json:"remote"`
+ AuthKeyName string `json:"auth_key"`
+}
+
+// CAConstraint specifies various CA constraints on the signed certificate.
+// CAConstraint would verify against (and override) the CA
+// extensions in the given CSR.
+type CAConstraint struct {
+ IsCA bool `json:"is_ca"`
+ MaxPathLen int `json:"max_path_len"`
+ MaxPathLenZero bool `json:"max_path_len_zero"`
+}
+
+// A SigningProfile stores information that the CA needs to store
+// signature policy.
+type SigningProfile struct {
+ Usage []string `json:"usages"`
+ IssuerURL []string `json:"issuer_urls"`
+ OCSP string `json:"ocsp_url"`
+ CRL string `json:"crl_url"`
+ CAConstraint CAConstraint `json:"ca_constraint"`
+ OCSPNoCheck bool `json:"ocsp_no_check"`
+ ExpiryString string `json:"expiry"`
+ BackdateString string `json:"backdate"`
+ AuthKeyName string `json:"auth_key"`
+ RemoteName string `json:"remote"`
+ NotBefore time.Time `json:"not_before"`
+ NotAfter time.Time `json:"not_after"`
+ NameWhitelistString string `json:"name_whitelist"`
+ AuthRemote AuthRemote `json:"auth_remote"`
+ CTLogServers []string `json:"ct_log_servers"`
+ AllowedExtensions []OID `json:"allowed_extensions"`
+ CertStore string `json:"cert_store"`
+
+ Policies []CertificatePolicy
+ Expiry time.Duration
+ Backdate time.Duration
+ Provider auth.Provider
+ RemoteProvider auth.Provider
+ RemoteServer string
+ RemoteCAs *x509.CertPool
+ ClientCert *tls.Certificate
+ CSRWhitelist *CSRWhitelist
+ NameWhitelist *regexp.Regexp
+ ExtensionWhitelist map[string]bool
+ ClientProvidesSerialNumbers bool
+}
+
+// UnmarshalJSON unmarshals a JSON string into an OID.
+func (oid *OID) UnmarshalJSON(data []byte) (err error) {
+ if data[0] != '"' || data[len(data)-1] != '"' {
+ return errors.New("OID JSON string not wrapped in quotes." + string(data))
+ }
+ data = data[1 : len(data)-1]
+ parsedOid, err := parseObjectIdentifier(string(data))
+ if err != nil {
+ return err
+ }
+ *oid = OID(parsedOid)
+ return
+}
+
+// MarshalJSON marshals an oid into a JSON string.
+func (oid OID) MarshalJSON() ([]byte, error) {
+ return []byte(fmt.Sprintf(`"%v"`, asn1.ObjectIdentifier(oid))), nil
+}
+
+func parseObjectIdentifier(oidString string) (oid asn1.ObjectIdentifier, err error) {
+ validOID, err := regexp.MatchString("\\d(\\.\\d+)*", oidString)
+ if err != nil {
+ return
+ }
+ if !validOID {
+ err = errors.New("Invalid OID")
+ return
+ }
+
+ segments := strings.Split(oidString, ".")
+ oid = make(asn1.ObjectIdentifier, len(segments))
+ for i, intString := range segments {
+ oid[i], err = strconv.Atoi(intString)
+ if err != nil {
+ return
+ }
+ }
+ return
+}
+
+const timeFormat = "2006-01-02T15:04:05"
+
+// populate is used to fill in the fields that are not in JSON
+//
+// First, the ExpiryString parameter is needed to parse
+// expiration timestamps from JSON. The JSON decoder is not able to
+// decode a string time duration to a time.Duration, so this is called
+// when loading the configuration to properly parse and fill out the
+// Expiry parameter.
+// This function is also used to create references to the auth key
+// and default remote for the profile.
+// It returns true if ExpiryString is a valid representation of a
+// time.Duration, and the AuthKeyString and RemoteName point to
+// valid objects. It returns false otherwise.
+func (p *SigningProfile) populate(cfg *Config) error {
+ if p == nil {
+ return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, errors.New("can't parse nil profile"))
+ }
+
+ var err error
+ if p.RemoteName == "" && p.AuthRemote.RemoteName == "" {
+ log.Debugf("parse expiry in profile")
+ if p.ExpiryString == "" {
+ return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, errors.New("empty expiry string"))
+ }
+
+ dur, err := time.ParseDuration(p.ExpiryString)
+ if err != nil {
+ return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, err)
+ }
+
+ log.Debugf("expiry is valid")
+ p.Expiry = dur
+
+ if p.BackdateString != "" {
+ dur, err = time.ParseDuration(p.BackdateString)
+ if err != nil {
+ return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, err)
+ }
+
+ p.Backdate = dur
+ }
+
+ if !p.NotBefore.IsZero() && !p.NotAfter.IsZero() && p.NotAfter.Before(p.NotBefore) {
+ return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, err)
+ }
+
+ if len(p.Policies) > 0 {
+ for _, policy := range p.Policies {
+ for _, qualifier := range policy.Qualifiers {
+ if qualifier.Type != "" && qualifier.Type != "id-qt-unotice" && qualifier.Type != "id-qt-cps" {
+ return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy,
+ errors.New("invalid policy qualifier type"))
+ }
+ }
+ }
+ }
+ } else if p.RemoteName != "" {
+ log.Debug("match remote in profile to remotes section")
+ if p.AuthRemote.RemoteName != "" {
+ log.Error("profile has both a remote and an auth remote specified")
+ return cferr.New(cferr.PolicyError, cferr.InvalidPolicy)
+ }
+ if remote := cfg.Remotes[p.RemoteName]; remote != "" {
+ if err := p.updateRemote(remote); err != nil {
+ return err
+ }
+ } else {
+ return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy,
+ errors.New("failed to find remote in remotes section"))
+ }
+ } else {
+ log.Debug("match auth remote in profile to remotes section")
+ if remote := cfg.Remotes[p.AuthRemote.RemoteName]; remote != "" {
+ if err := p.updateRemote(remote); err != nil {
+ return err
+ }
+ } else {
+ return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy,
+ errors.New("failed to find remote in remotes section"))
+ }
+ }
+
+ if p.AuthKeyName != "" {
+ log.Debug("match auth key in profile to auth_keys section")
+ if key, ok := cfg.AuthKeys[p.AuthKeyName]; ok == true {
+ if key.Type == "standard" {
+ p.Provider, err = auth.New(key.Key, nil)
+ if err != nil {
+ log.Debugf("failed to create new standard auth provider: %v", err)
+ return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy,
+ errors.New("failed to create new standard auth provider"))
+ }
+ } else {
+ log.Debugf("unknown authentication type %v", key.Type)
+ return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy,
+ errors.New("unknown authentication type"))
+ }
+ } else {
+ return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy,
+ errors.New("failed to find auth_key in auth_keys section"))
+ }
+ }
+
+ if p.AuthRemote.AuthKeyName != "" {
+ log.Debug("match auth remote key in profile to auth_keys section")
+ if key, ok := cfg.AuthKeys[p.AuthRemote.AuthKeyName]; ok == true {
+ if key.Type == "standard" {
+ p.RemoteProvider, err = auth.New(key.Key, nil)
+ if err != nil {
+ log.Debugf("failed to create new standard auth provider: %v", err)
+ return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy,
+ errors.New("failed to create new standard auth provider"))
+ }
+ } else {
+ log.Debugf("unknown authentication type %v", key.Type)
+ return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy,
+ errors.New("unknown authentication type"))
+ }
+ } else {
+ return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy,
+ errors.New("failed to find auth_remote's auth_key in auth_keys section"))
+ }
+ }
+
+ if p.NameWhitelistString != "" {
+ log.Debug("compiling whitelist regular expression")
+ rule, err := regexp.Compile(p.NameWhitelistString)
+ if err != nil {
+ return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy,
+ errors.New("failed to compile name whitelist section"))
+ }
+ p.NameWhitelist = rule
+ }
+
+ p.ExtensionWhitelist = map[string]bool{}
+ for _, oid := range p.AllowedExtensions {
+ p.ExtensionWhitelist[asn1.ObjectIdentifier(oid).String()] = true
+ }
+
+ return nil
+}
+
+// updateRemote takes a signing profile and initializes the remote server object
+// to the hostname:port combination sent by remote.
+func (p *SigningProfile) updateRemote(remote string) error {
+ if remote != "" {
+ p.RemoteServer = remote
+ }
+ return nil
+}
+
+// OverrideRemotes takes a signing configuration and updates the remote server object
+// to the hostname:port combination sent by remote
+func (p *Signing) OverrideRemotes(remote string) error {
+ if remote != "" {
+ var err error
+ for _, profile := range p.Profiles {
+ err = profile.updateRemote(remote)
+ if err != nil {
+ return err
+ }
+ }
+ err = p.Default.updateRemote(remote)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// SetClientCertKeyPairFromFile updates the properties to set client certificates for mutual
+// authenticated TLS remote requests
+func (p *Signing) SetClientCertKeyPairFromFile(certFile string, keyFile string) error {
+ if certFile != "" && keyFile != "" {
+ cert, err := helpers.LoadClientCertificate(certFile, keyFile)
+ if err != nil {
+ return err
+ }
+ for _, profile := range p.Profiles {
+ profile.ClientCert = cert
+ }
+ p.Default.ClientCert = cert
+ }
+ return nil
+}
+
+// SetRemoteCAsFromFile reads root CAs from file and updates the properties to set remote CAs for TLS
+// remote requests
+func (p *Signing) SetRemoteCAsFromFile(caFile string) error {
+ if caFile != "" {
+ remoteCAs, err := helpers.LoadPEMCertPool(caFile)
+ if err != nil {
+ return err
+ }
+ p.SetRemoteCAs(remoteCAs)
+ }
+ return nil
+}
+
+// SetRemoteCAs updates the properties to set remote CAs for TLS
+// remote requests
+func (p *Signing) SetRemoteCAs(remoteCAs *x509.CertPool) {
+ for _, profile := range p.Profiles {
+ profile.RemoteCAs = remoteCAs
+ }
+ p.Default.RemoteCAs = remoteCAs
+}
+
+// NeedsRemoteSigner returns true if one of the profiles has a remote set
+func (p *Signing) NeedsRemoteSigner() bool {
+ for _, profile := range p.Profiles {
+ if profile.RemoteServer != "" {
+ return true
+ }
+ }
+
+ if p.Default.RemoteServer != "" {
+ return true
+ }
+
+ return false
+}
+
+// NeedsLocalSigner returns true if one of the profiles doe not have a remote set
+func (p *Signing) NeedsLocalSigner() bool {
+ for _, profile := range p.Profiles {
+ if profile.RemoteServer == "" {
+ return true
+ }
+ }
+
+ if p.Default.RemoteServer == "" {
+ return true
+ }
+
+ return false
+}
+
+// Usages parses the list of key uses in the profile, translating them
+// to a list of X.509 key usages and extended key usages. The unknown
+// uses are collected into a slice that is also returned.
+func (p *SigningProfile) Usages() (ku x509.KeyUsage, eku []x509.ExtKeyUsage, unk []string) {
+ for _, keyUse := range p.Usage {
+ if kuse, ok := KeyUsage[keyUse]; ok {
+ ku |= kuse
+ } else if ekuse, ok := ExtKeyUsage[keyUse]; ok {
+ eku = append(eku, ekuse)
+ } else {
+ unk = append(unk, keyUse)
+ }
+ }
+ return
+}
+
+// A valid profile must be a valid local profile or a valid remote profile.
+// A valid local profile has defined at least key usages to be used, and a
+// valid local default profile has defined at least a default expiration.
+// A valid remote profile (default or not) has remote signer initialized.
+// In addition, a remote profile must has a valid auth provider if auth
+// key defined.
+func (p *SigningProfile) validProfile(isDefault bool) bool {
+ if p == nil {
+ return false
+ }
+
+ if p.AuthRemote.RemoteName == "" && p.AuthRemote.AuthKeyName != "" {
+ log.Debugf("invalid auth remote profile: no remote signer specified")
+ return false
+ }
+
+ if p.RemoteName != "" {
+ log.Debugf("validate remote profile")
+
+ if p.RemoteServer == "" {
+ log.Debugf("invalid remote profile: no remote signer specified")
+ return false
+ }
+
+ if p.AuthKeyName != "" && p.Provider == nil {
+ log.Debugf("invalid remote profile: auth key name is defined but no auth provider is set")
+ return false
+ }
+
+ if p.AuthRemote.RemoteName != "" {
+ log.Debugf("invalid remote profile: auth remote is also specified")
+ return false
+ }
+ } else if p.AuthRemote.RemoteName != "" {
+ log.Debugf("validate auth remote profile")
+ if p.RemoteServer == "" {
+ log.Debugf("invalid auth remote profile: no remote signer specified")
+ return false
+ }
+
+ if p.AuthRemote.AuthKeyName == "" || p.RemoteProvider == nil {
+ log.Debugf("invalid auth remote profile: no auth key is defined")
+ return false
+ }
+ } else {
+ log.Debugf("validate local profile")
+ if !isDefault {
+ if len(p.Usage) == 0 {
+ log.Debugf("invalid local profile: no usages specified")
+ return false
+ } else if _, _, unk := p.Usages(); len(unk) == len(p.Usage) {
+ log.Debugf("invalid local profile: no valid usages")
+ return false
+ }
+ } else {
+ if p.Expiry == 0 {
+ log.Debugf("invalid local profile: no expiry set")
+ return false
+ }
+ }
+ }
+
+ log.Debugf("profile is valid")
+ return true
+}
+
+// This checks if the SigningProfile object contains configurations that are only effective with a local signer
+// which has access to CA private key.
+func (p *SigningProfile) hasLocalConfig() bool {
+ if p.Usage != nil ||
+ p.IssuerURL != nil ||
+ p.OCSP != "" ||
+ p.ExpiryString != "" ||
+ p.BackdateString != "" ||
+ p.CAConstraint.IsCA != false ||
+ !p.NotBefore.IsZero() ||
+ !p.NotAfter.IsZero() ||
+ p.NameWhitelistString != "" ||
+ len(p.CTLogServers) != 0 {
+ return true
+ }
+ return false
+}
+
+// warnSkippedSettings prints a log warning message about skipped settings
+// in a SigningProfile, usually due to remote signer.
+func (p *Signing) warnSkippedSettings() {
+ const warningMessage = `The configuration value by "usages", "issuer_urls", "ocsp_url", "crl_url", "ca_constraint", "expiry", "backdate", "not_before", "not_after", "cert_store" and "ct_log_servers" are skipped`
+ if p == nil {
+ return
+ }
+
+ if (p.Default.RemoteName != "" || p.Default.AuthRemote.RemoteName != "") && p.Default.hasLocalConfig() {
+ log.Warning("default profile points to a remote signer: ", warningMessage)
+ }
+
+ for name, profile := range p.Profiles {
+ if (profile.RemoteName != "" || profile.AuthRemote.RemoteName != "") && profile.hasLocalConfig() {
+ log.Warningf("Profiles[%s] points to a remote signer: %s", name, warningMessage)
+ }
+ }
+}
+
+// Signing codifies the signature configuration policy for a CA.
+type Signing struct {
+ Profiles map[string]*SigningProfile `json:"profiles"`
+ Default *SigningProfile `json:"default"`
+}
+
+// Config stores configuration information for the CA.
+type Config struct {
+ Signing *Signing `json:"signing"`
+ OCSP *ocspConfig.Config `json:"ocsp"`
+ AuthKeys map[string]AuthKey `json:"auth_keys,omitempty"`
+ Remotes map[string]string `json:"remotes,omitempty"`
+}
+
+// Valid ensures that Config is a valid configuration. It should be
+// called immediately after parsing a configuration file.
+func (c *Config) Valid() bool {
+ return c.Signing.Valid()
+}
+
+// Valid checks the signature policies, ensuring they are valid
+// policies. A policy is valid if it has defined at least key usages
+// to be used, and a valid default profile has defined at least a
+// default expiration.
+func (p *Signing) Valid() bool {
+ if p == nil {
+ return false
+ }
+
+ log.Debugf("validating configuration")
+ if !p.Default.validProfile(true) {
+ log.Debugf("default profile is invalid")
+ return false
+ }
+
+ for _, sp := range p.Profiles {
+ if !sp.validProfile(false) {
+ log.Debugf("invalid profile")
+ return false
+ }
+ }
+
+ p.warnSkippedSettings()
+
+ return true
+}
+
+// KeyUsage contains a mapping of string names to key usages.
+var KeyUsage = map[string]x509.KeyUsage{
+ "signing": x509.KeyUsageDigitalSignature,
+ "digital signature": x509.KeyUsageDigitalSignature,
+ "content committment": x509.KeyUsageContentCommitment,
+ "key encipherment": x509.KeyUsageKeyEncipherment,
+ "key agreement": x509.KeyUsageKeyAgreement,
+ "data encipherment": x509.KeyUsageDataEncipherment,
+ "cert sign": x509.KeyUsageCertSign,
+ "crl sign": x509.KeyUsageCRLSign,
+ "encipher only": x509.KeyUsageEncipherOnly,
+ "decipher only": x509.KeyUsageDecipherOnly,
+}
+
+// ExtKeyUsage contains a mapping of string names to extended key
+// usages.
+var ExtKeyUsage = map[string]x509.ExtKeyUsage{
+ "any": x509.ExtKeyUsageAny,
+ "server auth": x509.ExtKeyUsageServerAuth,
+ "client auth": x509.ExtKeyUsageClientAuth,
+ "code signing": x509.ExtKeyUsageCodeSigning,
+ "email protection": x509.ExtKeyUsageEmailProtection,
+ "s/mime": x509.ExtKeyUsageEmailProtection,
+ "ipsec end system": x509.ExtKeyUsageIPSECEndSystem,
+ "ipsec tunnel": x509.ExtKeyUsageIPSECTunnel,
+ "ipsec user": x509.ExtKeyUsageIPSECUser,
+ "timestamping": x509.ExtKeyUsageTimeStamping,
+ "ocsp signing": x509.ExtKeyUsageOCSPSigning,
+ "microsoft sgc": x509.ExtKeyUsageMicrosoftServerGatedCrypto,
+ "netscape sgc": x509.ExtKeyUsageNetscapeServerGatedCrypto,
+}
+
+// An AuthKey contains an entry for a key used for authentication.
+type AuthKey struct {
+ // Type contains information needed to select the appropriate
+ // constructor. For example, "standard" for HMAC-SHA-256,
+ // "standard-ip" for HMAC-SHA-256 incorporating the client's
+ // IP.
+ Type string `json:"type"`
+ // Key contains the key information, such as a hex-encoded
+ // HMAC key.
+ Key string `json:"key"`
+}
+
+// DefaultConfig returns a default configuration specifying basic key
+// usage and a 1 year expiration time. The key usages chosen are
+// signing, key encipherment, client auth and server auth.
+func DefaultConfig() *SigningProfile {
+ d := helpers.OneYear
+ return &SigningProfile{
+ Usage: []string{"signing", "key encipherment", "server auth", "client auth"},
+ Expiry: d,
+ ExpiryString: "8760h",
+ }
+}
+
+// LoadFile attempts to load the configuration file stored at the path
+// and returns the configuration. On error, it returns nil.
+func LoadFile(path string) (*Config, error) {
+ log.Debugf("loading configuration file from %s", path)
+ if path == "" {
+ return nil, cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, errors.New("invalid path"))
+ }
+
+ body, err := ioutil.ReadFile(path)
+ if err != nil {
+ return nil, cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, errors.New("could not read configuration file"))
+ }
+
+ return LoadConfig(body)
+}
+
+// LoadConfig attempts to load the configuration from a byte slice.
+// On error, it returns nil.
+func LoadConfig(config []byte) (*Config, error) {
+ var cfg = &Config{}
+ err := json.Unmarshal(config, &cfg)
+ if err != nil {
+ return nil, cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy,
+ errors.New("failed to unmarshal configuration: "+err.Error()))
+ }
+
+ if cfg.Signing == nil {
+ return nil, errors.New("No \"signing\" field present")
+ }
+
+ if cfg.Signing.Default == nil {
+ log.Debugf("no default given: using default config")
+ cfg.Signing.Default = DefaultConfig()
+ } else {
+ if err := cfg.Signing.Default.populate(cfg); err != nil {
+ return nil, err
+ }
+ }
+
+ for k := range cfg.Signing.Profiles {
+ if err := cfg.Signing.Profiles[k].populate(cfg); err != nil {
+ return nil, err
+ }
+ }
+
+ if !cfg.Valid() {
+ return nil, cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, errors.New("invalid configuration"))
+ }
+
+ log.Debugf("configuration ok")
+ return cfg, nil
+}
diff --git a/vendor/github.com/cloudflare/cfssl/crypto/pkcs7/pkcs7.go b/vendor/github.com/cloudflare/cfssl/crypto/pkcs7/pkcs7.go
new file mode 100644
index 0000000000..8db547fce5
--- /dev/null
+++ b/vendor/github.com/cloudflare/cfssl/crypto/pkcs7/pkcs7.go
@@ -0,0 +1,188 @@
+// Package pkcs7 implements the subset of the CMS PKCS #7 datatype that is typically
+// used to package certificates and CRLs. Using openssl, every certificate converted
+// to PKCS #7 format from another encoding such as PEM conforms to this implementation.
+// reference: https://www.openssl.org/docs/apps/crl2pkcs7.html
+//
+// PKCS #7 Data type, reference: https://tools.ietf.org/html/rfc2315
+//
+// The full pkcs#7 cryptographic message syntax allows for cryptographic enhancements,
+// for example data can be encrypted and signed and then packaged through pkcs#7 to be
+// sent over a network and then verified and decrypted. It is asn1, and the type of
+// PKCS #7 ContentInfo, which comprises the PKCS #7 structure, is:
+//
+// ContentInfo ::= SEQUENCE {
+// contentType ContentType,
+// content [0] EXPLICIT ANY DEFINED BY contentType OPTIONAL
+// }
+//
+// There are 6 possible ContentTypes, data, signedData, envelopedData,
+// signedAndEnvelopedData, digestedData, and encryptedData. Here signedData, Data, and encrypted
+// Data are implemented, as the degenerate case of signedData without a signature is the typical
+// format for transferring certificates and CRLS, and Data and encryptedData are used in PKCS #12
+// formats.
+// The ContentType signedData has the form:
+//
+//
+// signedData ::= SEQUENCE {
+// version Version,
+// digestAlgorithms DigestAlgorithmIdentifiers,
+// contentInfo ContentInfo,
+// certificates [0] IMPLICIT ExtendedCertificatesAndCertificates OPTIONAL
+// crls [1] IMPLICIT CertificateRevocationLists OPTIONAL,
+// signerInfos SignerInfos
+// }
+//
+// As of yet signerInfos and digestAlgorithms are not parsed, as they are not relevant to
+// this system's use of PKCS #7 data. Version is an integer type, note that PKCS #7 is
+// recursive, this second layer of ContentInfo is similar ignored for our degenerate
+// usage. The ExtendedCertificatesAndCertificates type consists of a sequence of choices
+// between PKCS #6 extended certificates and x509 certificates. Any sequence consisting
+// of any number of extended certificates is not yet supported in this implementation.
+//
+// The ContentType Data is simply a raw octet string and is parsed directly into a Go []byte slice.
+//
+// The ContentType encryptedData is the most complicated and its form can be gathered by
+// the go type below. It essentially contains a raw octet string of encrypted data and an
+// algorithm identifier for use in decrypting this data.
+package pkcs7
+
+import (
+ "crypto/x509"
+ "crypto/x509/pkix"
+ "encoding/asn1"
+ "errors"
+
+ cferr "github.com/cloudflare/cfssl/errors"
+)
+
+// Types used for asn1 Unmarshaling.
+
+type signedData struct {
+ Version int
+ DigestAlgorithms asn1.RawValue
+ ContentInfo asn1.RawValue
+ Certificates asn1.RawValue `asn1:"optional" asn1:"tag:0"`
+ Crls asn1.RawValue `asn1:"optional"`
+ SignerInfos asn1.RawValue
+}
+
+type initPKCS7 struct {
+ Raw asn1.RawContent
+ ContentType asn1.ObjectIdentifier
+ Content asn1.RawValue `asn1:"tag:0,explicit,optional"`
+}
+
+// Object identifier strings of the three implemented PKCS7 types.
+const (
+ ObjIDData = "1.2.840.113549.1.7.1"
+ ObjIDSignedData = "1.2.840.113549.1.7.2"
+ ObjIDEncryptedData = "1.2.840.113549.1.7.6"
+)
+
+// PKCS7 represents the ASN1 PKCS #7 Content type. It contains one of three
+// possible types of Content objects, as denoted by the object identifier in
+// the ContentInfo field, the other two being nil. SignedData
+// is the degenerate SignedData Content info without signature used
+// to hold certificates and crls. Data is raw bytes, and EncryptedData
+// is as defined in PKCS #7 standard.
+type PKCS7 struct {
+ Raw asn1.RawContent
+ ContentInfo string
+ Content Content
+}
+
+// Content implements three of the six possible PKCS7 data types. Only one is non-nil.
+type Content struct {
+ Data []byte
+ SignedData SignedData
+ EncryptedData EncryptedData
+}
+
+// SignedData defines the typical carrier of certificates and crls.
+type SignedData struct {
+ Raw asn1.RawContent
+ Version int
+ Certificates []*x509.Certificate
+ Crl *pkix.CertificateList
+}
+
+// Data contains raw bytes. Used as a subtype in PKCS12.
+type Data struct {
+ Bytes []byte
+}
+
+// EncryptedData contains encrypted data. Used as a subtype in PKCS12.
+type EncryptedData struct {
+ Raw asn1.RawContent
+ Version int
+ EncryptedContentInfo EncryptedContentInfo
+}
+
+// EncryptedContentInfo is a subtype of PKCS7EncryptedData.
+type EncryptedContentInfo struct {
+ Raw asn1.RawContent
+ ContentType asn1.ObjectIdentifier
+ ContentEncryptionAlgorithm pkix.AlgorithmIdentifier
+ EncryptedContent []byte `asn1:"tag:0,optional"`
+}
+
+// ParsePKCS7 attempts to parse the DER encoded bytes of a
+// PKCS7 structure.
+func ParsePKCS7(raw []byte) (msg *PKCS7, err error) {
+
+ var pkcs7 initPKCS7
+ _, err = asn1.Unmarshal(raw, &pkcs7)
+ if err != nil {
+ return nil, cferr.Wrap(cferr.CertificateError, cferr.ParseFailed, err)
+ }
+
+ msg = new(PKCS7)
+ msg.Raw = pkcs7.Raw
+ msg.ContentInfo = pkcs7.ContentType.String()
+ switch {
+ case msg.ContentInfo == ObjIDData:
+ msg.ContentInfo = "Data"
+ _, err = asn1.Unmarshal(pkcs7.Content.Bytes, &msg.Content.Data)
+ if err != nil {
+ return nil, cferr.Wrap(cferr.CertificateError, cferr.ParseFailed, err)
+ }
+ case msg.ContentInfo == ObjIDSignedData:
+ msg.ContentInfo = "SignedData"
+ var signedData signedData
+ _, err = asn1.Unmarshal(pkcs7.Content.Bytes, &signedData)
+ if err != nil {
+ return nil, cferr.Wrap(cferr.CertificateError, cferr.ParseFailed, err)
+ }
+ if len(signedData.Certificates.Bytes) != 0 {
+ msg.Content.SignedData.Certificates, err = x509.ParseCertificates(signedData.Certificates.Bytes)
+ if err != nil {
+ return nil, cferr.Wrap(cferr.CertificateError, cferr.ParseFailed, err)
+ }
+ }
+ if len(signedData.Crls.Bytes) != 0 {
+ msg.Content.SignedData.Crl, err = x509.ParseDERCRL(signedData.Crls.Bytes)
+ if err != nil {
+ return nil, cferr.Wrap(cferr.CertificateError, cferr.ParseFailed, err)
+ }
+ }
+ msg.Content.SignedData.Version = signedData.Version
+ msg.Content.SignedData.Raw = pkcs7.Content.Bytes
+ case msg.ContentInfo == ObjIDEncryptedData:
+ msg.ContentInfo = "EncryptedData"
+ var encryptedData EncryptedData
+ _, err = asn1.Unmarshal(pkcs7.Content.Bytes, &encryptedData)
+ if err != nil {
+ return nil, cferr.Wrap(cferr.CertificateError, cferr.ParseFailed, err)
+ }
+ if encryptedData.Version != 0 {
+ return nil, cferr.Wrap(cferr.CertificateError, cferr.ParseFailed, errors.New("Only support for PKCS #7 encryptedData version 0"))
+ }
+ msg.Content.EncryptedData = encryptedData
+
+ default:
+ return nil, cferr.Wrap(cferr.CertificateError, cferr.ParseFailed, errors.New("Attempt to parse PKCS# 7 Content not of type data, signed data or encrypted data"))
+ }
+
+ return msg, nil
+
+}
diff --git a/vendor/github.com/cloudflare/cfssl/csr/csr.go b/vendor/github.com/cloudflare/cfssl/csr/csr.go
new file mode 100644
index 0000000000..eef4cf3060
--- /dev/null
+++ b/vendor/github.com/cloudflare/cfssl/csr/csr.go
@@ -0,0 +1,431 @@
+// Package csr implements certificate requests for CFSSL.
+package csr
+
+import (
+ "crypto"
+ "crypto/ecdsa"
+ "crypto/elliptic"
+ "crypto/rand"
+ "crypto/rsa"
+ "crypto/x509"
+ "crypto/x509/pkix"
+ "encoding/asn1"
+ "encoding/pem"
+ "errors"
+ "net"
+ "net/mail"
+ "strings"
+
+ cferr "github.com/cloudflare/cfssl/errors"
+ "github.com/cloudflare/cfssl/helpers"
+ "github.com/cloudflare/cfssl/log"
+)
+
+const (
+ curveP256 = 256
+ curveP384 = 384
+ curveP521 = 521
+)
+
+// A Name contains the SubjectInfo fields.
+type Name struct {
+ C string // Country
+ ST string // State
+ L string // Locality
+ O string // OrganisationName
+ OU string // OrganisationalUnitName
+ SerialNumber string
+}
+
+// A KeyRequest is a generic request for a new key.
+type KeyRequest interface {
+ Algo() string
+ Size() int
+ Generate() (crypto.PrivateKey, error)
+ SigAlgo() x509.SignatureAlgorithm
+}
+
+// A BasicKeyRequest contains the algorithm and key size for a new private key.
+type BasicKeyRequest struct {
+ A string `json:"algo"`
+ S int `json:"size"`
+}
+
+// NewBasicKeyRequest returns a default BasicKeyRequest.
+func NewBasicKeyRequest() *BasicKeyRequest {
+ return &BasicKeyRequest{"ecdsa", curveP256}
+}
+
+// Algo returns the requested key algorithm represented as a string.
+func (kr *BasicKeyRequest) Algo() string {
+ return kr.A
+}
+
+// Size returns the requested key size.
+func (kr *BasicKeyRequest) Size() int {
+ return kr.S
+}
+
+// Generate generates a key as specified in the request. Currently,
+// only ECDSA and RSA are supported.
+func (kr *BasicKeyRequest) Generate() (crypto.PrivateKey, error) {
+ log.Debugf("generate key from request: algo=%s, size=%d", kr.Algo(), kr.Size())
+ switch kr.Algo() {
+ case "rsa":
+ if kr.Size() < 2048 {
+ return nil, errors.New("RSA key is too weak")
+ }
+ if kr.Size() > 8192 {
+ return nil, errors.New("RSA key size too large")
+ }
+ return rsa.GenerateKey(rand.Reader, kr.Size())
+ case "ecdsa":
+ var curve elliptic.Curve
+ switch kr.Size() {
+ case curveP256:
+ curve = elliptic.P256()
+ case curveP384:
+ curve = elliptic.P384()
+ case curveP521:
+ curve = elliptic.P521()
+ default:
+ return nil, errors.New("invalid curve")
+ }
+ return ecdsa.GenerateKey(curve, rand.Reader)
+ default:
+ return nil, errors.New("invalid algorithm")
+ }
+}
+
+// SigAlgo returns an appropriate X.509 signature algorithm given the
+// key request's type and size.
+func (kr *BasicKeyRequest) SigAlgo() x509.SignatureAlgorithm {
+ switch kr.Algo() {
+ case "rsa":
+ switch {
+ case kr.Size() >= 4096:
+ return x509.SHA512WithRSA
+ case kr.Size() >= 3072:
+ return x509.SHA384WithRSA
+ case kr.Size() >= 2048:
+ return x509.SHA256WithRSA
+ default:
+ return x509.SHA1WithRSA
+ }
+ case "ecdsa":
+ switch kr.Size() {
+ case curveP521:
+ return x509.ECDSAWithSHA512
+ case curveP384:
+ return x509.ECDSAWithSHA384
+ case curveP256:
+ return x509.ECDSAWithSHA256
+ default:
+ return x509.ECDSAWithSHA1
+ }
+ default:
+ return x509.UnknownSignatureAlgorithm
+ }
+}
+
+// CAConfig is a section used in the requests initialising a new CA.
+type CAConfig struct {
+ PathLength int `json:"pathlen"`
+ PathLenZero bool `json:"pathlenzero"`
+ Expiry string `json:"expiry"`
+}
+
+// A CertificateRequest encapsulates the API interface to the
+// certificate request functionality.
+type CertificateRequest struct {
+ CN string
+ Names []Name `json:"names"`
+ Hosts []string `json:"hosts"`
+ KeyRequest KeyRequest `json:"key,omitempty"`
+ CA *CAConfig `json:"ca,omitempty"`
+ SerialNumber string `json:"serialnumber,omitempty"`
+}
+
+// New returns a new, empty CertificateRequest with a
+// BasicKeyRequest.
+func New() *CertificateRequest {
+ return &CertificateRequest{
+ KeyRequest: NewBasicKeyRequest(),
+ }
+}
+
+// appendIf appends to a if s is not an empty string.
+func appendIf(s string, a *[]string) {
+ if s != "" {
+ *a = append(*a, s)
+ }
+}
+
+// Name returns the PKIX name for the request.
+func (cr *CertificateRequest) Name() pkix.Name {
+ var name pkix.Name
+ name.CommonName = cr.CN
+
+ for _, n := range cr.Names {
+ appendIf(n.C, &name.Country)
+ appendIf(n.ST, &name.Province)
+ appendIf(n.L, &name.Locality)
+ appendIf(n.O, &name.Organization)
+ appendIf(n.OU, &name.OrganizationalUnit)
+ }
+ name.SerialNumber = cr.SerialNumber
+ return name
+}
+
+// BasicConstraints CSR information RFC 5280, 4.2.1.9
+type BasicConstraints struct {
+ IsCA bool `asn1:"optional"`
+ MaxPathLen int `asn1:"optional,default:-1"`
+}
+
+// ParseRequest takes a certificate request and generates a key and
+// CSR from it. It does no validation -- caveat emptor. It will,
+// however, fail if the key request is not valid (i.e., an unsupported
+// curve or RSA key size). The lack of validation was specifically
+// chosen to allow the end user to define a policy and validate the
+// request appropriately before calling this function.
+func ParseRequest(req *CertificateRequest) (csr, key []byte, err error) {
+ log.Info("received CSR")
+ if req.KeyRequest == nil {
+ req.KeyRequest = NewBasicKeyRequest()
+ }
+
+ log.Infof("generating key: %s-%d", req.KeyRequest.Algo(), req.KeyRequest.Size())
+ priv, err := req.KeyRequest.Generate()
+ if err != nil {
+ err = cferr.Wrap(cferr.PrivateKeyError, cferr.GenerationFailed, err)
+ return
+ }
+
+ switch priv := priv.(type) {
+ case *rsa.PrivateKey:
+ key = x509.MarshalPKCS1PrivateKey(priv)
+ block := pem.Block{
+ Type: "RSA PRIVATE KEY",
+ Bytes: key,
+ }
+ key = pem.EncodeToMemory(&block)
+ case *ecdsa.PrivateKey:
+ key, err = x509.MarshalECPrivateKey(priv)
+ if err != nil {
+ err = cferr.Wrap(cferr.PrivateKeyError, cferr.Unknown, err)
+ return
+ }
+ block := pem.Block{
+ Type: "EC PRIVATE KEY",
+ Bytes: key,
+ }
+ key = pem.EncodeToMemory(&block)
+ default:
+ panic("Generate should have failed to produce a valid key.")
+ }
+
+ csr, err = Generate(priv.(crypto.Signer), req)
+ if err != nil {
+ log.Errorf("failed to generate a CSR: %v", err)
+ err = cferr.Wrap(cferr.CSRError, cferr.BadRequest, err)
+ }
+ return
+}
+
+// ExtractCertificateRequest extracts a CertificateRequest from
+// x509.Certificate. It is aimed to used for generating a new certificate
+// from an existing certificate. For a root certificate, the CA expiry
+// length is calculated as the duration between cert.NotAfter and cert.NotBefore.
+func ExtractCertificateRequest(cert *x509.Certificate) *CertificateRequest {
+ req := New()
+ req.CN = cert.Subject.CommonName
+ req.Names = getNames(cert.Subject)
+ req.Hosts = getHosts(cert)
+ req.SerialNumber = cert.Subject.SerialNumber
+
+ if cert.IsCA {
+ req.CA = new(CAConfig)
+ // CA expiry length is calculated based on the input cert
+ // issue date and expiry date.
+ req.CA.Expiry = cert.NotAfter.Sub(cert.NotBefore).String()
+ req.CA.PathLength = cert.MaxPathLen
+ req.CA.PathLenZero = cert.MaxPathLenZero
+ }
+
+ return req
+}
+
+func getHosts(cert *x509.Certificate) []string {
+ var hosts []string
+ for _, ip := range cert.IPAddresses {
+ hosts = append(hosts, ip.String())
+ }
+ for _, dns := range cert.DNSNames {
+ hosts = append(hosts, dns)
+ }
+ for _, email := range cert.EmailAddresses {
+ hosts = append(hosts, email)
+ }
+
+ return hosts
+}
+
+// getNames returns an array of Names from the certificate
+// It onnly cares about Country, Organization, OrganizationalUnit, Locality, Province
+func getNames(sub pkix.Name) []Name {
+ // anonymous func for finding the max of a list of interger
+ max := func(v1 int, vn ...int) (max int) {
+ max = v1
+ for i := 0; i < len(vn); i++ {
+ if vn[i] > max {
+ max = vn[i]
+ }
+ }
+ return max
+ }
+
+ nc := len(sub.Country)
+ norg := len(sub.Organization)
+ nou := len(sub.OrganizationalUnit)
+ nl := len(sub.Locality)
+ np := len(sub.Province)
+
+ n := max(nc, norg, nou, nl, np)
+
+ names := make([]Name, n)
+ for i := range names {
+ if i < nc {
+ names[i].C = sub.Country[i]
+ }
+ if i < norg {
+ names[i].O = sub.Organization[i]
+ }
+ if i < nou {
+ names[i].OU = sub.OrganizationalUnit[i]
+ }
+ if i < nl {
+ names[i].L = sub.Locality[i]
+ }
+ if i < np {
+ names[i].ST = sub.Province[i]
+ }
+ }
+ return names
+}
+
+// A Generator is responsible for validating certificate requests.
+type Generator struct {
+ Validator func(*CertificateRequest) error
+}
+
+// ProcessRequest validates and processes the incoming request. It is
+// a wrapper around a validator and the ParseRequest function.
+func (g *Generator) ProcessRequest(req *CertificateRequest) (csr, key []byte, err error) {
+
+ log.Info("generate received request")
+ err = g.Validator(req)
+ if err != nil {
+ log.Warningf("invalid request: %v", err)
+ return nil, nil, err
+ }
+
+ csr, key, err = ParseRequest(req)
+ if err != nil {
+ return nil, nil, err
+ }
+ return
+}
+
+// IsNameEmpty returns true if the name has no identifying information in it.
+func IsNameEmpty(n Name) bool {
+ empty := func(s string) bool { return strings.TrimSpace(s) == "" }
+
+ if empty(n.C) && empty(n.ST) && empty(n.L) && empty(n.O) && empty(n.OU) {
+ return true
+ }
+ return false
+}
+
+// Regenerate uses the provided CSR as a template for signing a new
+// CSR using priv.
+func Regenerate(priv crypto.Signer, csr []byte) ([]byte, error) {
+ req, extra, err := helpers.ParseCSR(csr)
+ if err != nil {
+ return nil, err
+ } else if len(extra) > 0 {
+ return nil, errors.New("csr: trailing data in certificate request")
+ }
+
+ return x509.CreateCertificateRequest(rand.Reader, req, priv)
+}
+
+// Generate creates a new CSR from a CertificateRequest structure and
+// an existing key. The KeyRequest field is ignored.
+func Generate(priv crypto.Signer, req *CertificateRequest) (csr []byte, err error) {
+ sigAlgo := helpers.SignerAlgo(priv)
+ if sigAlgo == x509.UnknownSignatureAlgorithm {
+ return nil, cferr.New(cferr.PrivateKeyError, cferr.Unavailable)
+ }
+
+ var tpl = x509.CertificateRequest{
+ Subject: req.Name(),
+ SignatureAlgorithm: sigAlgo,
+ }
+
+ for i := range req.Hosts {
+ if ip := net.ParseIP(req.Hosts[i]); ip != nil {
+ tpl.IPAddresses = append(tpl.IPAddresses, ip)
+ } else if email, err := mail.ParseAddress(req.Hosts[i]); err == nil && email != nil {
+ tpl.EmailAddresses = append(tpl.EmailAddresses, email.Address)
+ } else {
+ tpl.DNSNames = append(tpl.DNSNames, req.Hosts[i])
+ }
+ }
+
+ if req.CA != nil {
+ err = appendCAInfoToCSR(req.CA, &tpl)
+ if err != nil {
+ err = cferr.Wrap(cferr.CSRError, cferr.GenerationFailed, err)
+ return
+ }
+ }
+
+ csr, err = x509.CreateCertificateRequest(rand.Reader, &tpl, priv)
+ if err != nil {
+ log.Errorf("failed to generate a CSR: %v", err)
+ err = cferr.Wrap(cferr.CSRError, cferr.BadRequest, err)
+ return
+ }
+ block := pem.Block{
+ Type: "CERTIFICATE REQUEST",
+ Bytes: csr,
+ }
+
+ log.Info("encoded CSR")
+ csr = pem.EncodeToMemory(&block)
+ return
+}
+
+// appendCAInfoToCSR appends CAConfig BasicConstraint extension to a CSR
+func appendCAInfoToCSR(reqConf *CAConfig, csr *x509.CertificateRequest) error {
+ pathlen := reqConf.PathLength
+ if pathlen == 0 && !reqConf.PathLenZero {
+ pathlen = -1
+ }
+ val, err := asn1.Marshal(BasicConstraints{true, pathlen})
+
+ if err != nil {
+ return err
+ }
+
+ csr.ExtraExtensions = []pkix.Extension{
+ {
+ Id: asn1.ObjectIdentifier{2, 5, 29, 19},
+ Value: val,
+ Critical: true,
+ },
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/cloudflare/cfssl/errors/doc.go b/vendor/github.com/cloudflare/cfssl/errors/doc.go
new file mode 100644
index 0000000000..1910e2662f
--- /dev/null
+++ b/vendor/github.com/cloudflare/cfssl/errors/doc.go
@@ -0,0 +1,46 @@
+/*
+Package errors provides error types returned in CF SSL.
+
+1. Type Error is intended for errors produced by CF SSL packages.
+It formats to a json object that consists of an error message and a 4-digit code for error reasoning.
+
+Example: {"code":1002, "message": "Failed to decode certificate"}
+
+The index of codes are listed below:
+ 1XXX: CertificateError
+ 1000: Unknown
+ 1001: ReadFailed
+ 1002: DecodeFailed
+ 1003: ParseFailed
+ 1100: SelfSigned
+ 12XX: VerifyFailed
+ 121X: CertificateInvalid
+ 1210: NotAuthorizedToSign
+ 1211: Expired
+ 1212: CANotAuthorizedForThisName
+ 1213: TooManyIntermediates
+ 1214: IncompatibleUsage
+ 1220: UnknownAuthority
+ 2XXX: PrivatekeyError
+ 2000: Unknown
+ 2001: ReadFailed
+ 2002: DecodeFailed
+ 2003: ParseFailed
+ 2100: Encrypted
+ 2200: NotRSA
+ 2300: KeyMismatch
+ 2400: GenerationFailed
+ 2500: Unavailable
+ 3XXX: IntermediatesError
+ 4XXX: RootError
+ 5XXX: PolicyError
+ 5100: NoKeyUsages
+ 5200: InvalidPolicy
+ 5300: InvalidRequest
+ 5400: UnknownProfile
+ 6XXX: DialError
+
+2. Type HttpError is intended for CF SSL API to consume. It contains a HTTP status code that will be read and returned
+by the API server.
+*/
+package errors
diff --git a/vendor/github.com/cloudflare/cfssl/errors/error.go b/vendor/github.com/cloudflare/cfssl/errors/error.go
new file mode 100644
index 0000000000..9913a84e10
--- /dev/null
+++ b/vendor/github.com/cloudflare/cfssl/errors/error.go
@@ -0,0 +1,424 @@
+package errors
+
+import (
+ "crypto/x509"
+ "encoding/json"
+ "fmt"
+)
+
+// Error is the error type usually returned by functions in CF SSL package.
+// It contains a 4-digit error code where the most significant digit
+// describes the category where the error occurred and the rest 3 digits
+// describe the specific error reason.
+type Error struct {
+ ErrorCode int `json:"code"`
+ Message string `json:"message"`
+}
+
+// Category is the most significant digit of the error code.
+type Category int
+
+// Reason is the last 3 digits of the error code.
+type Reason int
+
+const (
+ // Success indicates no error occurred.
+ Success Category = 1000 * iota // 0XXX
+
+ // CertificateError indicates a fault in a certificate.
+ CertificateError // 1XXX
+
+ // PrivateKeyError indicates a fault in a private key.
+ PrivateKeyError // 2XXX
+
+ // IntermediatesError indicates a fault in an intermediate.
+ IntermediatesError // 3XXX
+
+ // RootError indicates a fault in a root.
+ RootError // 4XXX
+
+ // PolicyError indicates an error arising from a malformed or
+ // non-existent policy, or a breach of policy.
+ PolicyError // 5XXX
+
+ // DialError indicates a network fault.
+ DialError // 6XXX
+
+ // APIClientError indicates a problem with the API client.
+ APIClientError // 7XXX
+
+ // OCSPError indicates a problem with OCSP signing
+ OCSPError // 8XXX
+
+ // CSRError indicates a problem with CSR parsing
+ CSRError // 9XXX
+
+ // CTError indicates a problem with the certificate transparency process
+ CTError // 10XXX
+
+ // CertStoreError indicates a problem with the certificate store
+ CertStoreError // 11XXX
+)
+
+// None is a non-specified error.
+const (
+ None Reason = iota
+)
+
+// Warning code for a success
+const (
+ BundleExpiringBit int = 1 << iota // 0x01
+ BundleNotUbiquitousBit // 0x02
+)
+
+// Parsing errors
+const (
+ Unknown Reason = iota // X000
+ ReadFailed // X001
+ DecodeFailed // X002
+ ParseFailed // X003
+)
+
+// The following represent certificate non-parsing errors, and must be
+// specified along with CertificateError.
+const (
+ // SelfSigned indicates that a certificate is self-signed and
+ // cannot be used in the manner being attempted.
+ SelfSigned Reason = 100 * (iota + 1) // Code 11XX
+
+ // VerifyFailed is an X.509 verification failure. The least two
+ // significant digits of 12XX is determined as the actual x509
+ // error is examined.
+ VerifyFailed // Code 12XX
+
+ // BadRequest indicates that the certificate request is invalid.
+ BadRequest // Code 13XX
+
+ // MissingSerial indicates that the profile specified
+ // 'ClientProvidesSerialNumbers', but the SignRequest did not include a serial
+ // number.
+ MissingSerial // Code 14XX
+)
+
+const (
+ certificateInvalid = 10 * (iota + 1) //121X
+ unknownAuthority //122x
+)
+
+// The following represent private-key non-parsing errors, and must be
+// specified with PrivateKeyError.
+const (
+ // Encrypted indicates that the private key is a PKCS #8 encrypted
+ // private key. At this time, CFSSL does not support decrypting
+ // these keys.
+ Encrypted Reason = 100 * (iota + 1) //21XX
+
+ // NotRSAOrECC indicates that they key is not an RSA or ECC
+ // private key; these are the only two private key types supported
+ // at this time by CFSSL.
+ NotRSAOrECC //22XX
+
+ // KeyMismatch indicates that the private key does not match
+ // the public key or certificate being presented with the key.
+ KeyMismatch //23XX
+
+ // GenerationFailed indicates that a private key could not
+ // be generated.
+ GenerationFailed //24XX
+
+ // Unavailable indicates that a private key mechanism (such as
+ // PKCS #11) was requested but support for that mechanism is
+ // not available.
+ Unavailable
+)
+
+// The following are policy-related non-parsing errors, and must be
+// specified along with PolicyError.
+const (
+ // NoKeyUsages indicates that the profile does not permit any
+ // key usages for the certificate.
+ NoKeyUsages Reason = 100 * (iota + 1) // 51XX
+
+ // InvalidPolicy indicates that policy being requested is not
+ // a valid policy or does not exist.
+ InvalidPolicy // 52XX
+
+ // InvalidRequest indicates a certificate request violated the
+ // constraints of the policy being applied to the request.
+ InvalidRequest // 53XX
+
+ // UnknownProfile indicates that the profile does not exist.
+ UnknownProfile // 54XX
+
+ UnmatchedWhitelist // 55xx
+)
+
+// The following are API client related errors, and should be
+// specified with APIClientError.
+const (
+ // AuthenticationFailure occurs when the client is unable
+ // to obtain an authentication token for the request.
+ AuthenticationFailure Reason = 100 * (iota + 1)
+
+ // JSONError wraps an encoding/json error.
+ JSONError
+
+ // IOError wraps an io/ioutil error.
+ IOError
+
+ // ClientHTTPError wraps a net/http error.
+ ClientHTTPError
+
+ // ServerRequestFailed covers any other failures from the API
+ // client.
+ ServerRequestFailed
+)
+
+// The following are OCSP related errors, and should be
+// specified with OCSPError
+const (
+ // IssuerMismatch ocurs when the certificate in the OCSP signing
+ // request was not issued by the CA that this responder responds for.
+ IssuerMismatch Reason = 100 * (iota + 1) // 81XX
+
+ // InvalidStatus occurs when the OCSP signing requests includes an
+ // invalid value for the certificate status.
+ InvalidStatus
+)
+
+// Certificate transparency related errors specified with CTError
+const (
+ // PrecertSubmissionFailed occurs when submitting a precertificate to
+ // a log server fails
+ PrecertSubmissionFailed = 100 * (iota + 1)
+)
+
+// Certificate persistence related errors specified with CertStoreError
+const (
+ // InsertionFailed occurs when a SQL insert query failes to complete.
+ InsertionFailed = 100 * (iota + 1)
+ // RecordNotFound occurs when a SQL query targeting on one unique
+ // record failes to update the specified row in the table.
+ RecordNotFound
+)
+
+// The error interface implementation, which formats to a JSON object string.
+func (e *Error) Error() string {
+ marshaled, err := json.Marshal(e)
+ if err != nil {
+ panic(err)
+ }
+ return string(marshaled)
+
+}
+
+// New returns an error that contains an error code and message derived from
+// the given category, reason. Currently, to avoid confusion, it is not
+// allowed to create an error of category Success
+func New(category Category, reason Reason) *Error {
+ errorCode := int(category) + int(reason)
+ var msg string
+ switch category {
+ case OCSPError:
+ switch reason {
+ case ReadFailed:
+ msg = "No certificate provided"
+ case IssuerMismatch:
+ msg = "Certificate not issued by this issuer"
+ case InvalidStatus:
+ msg = "Invalid revocation status"
+ }
+ case CertificateError:
+ switch reason {
+ case Unknown:
+ msg = "Unknown certificate error"
+ case ReadFailed:
+ msg = "Failed to read certificate"
+ case DecodeFailed:
+ msg = "Failed to decode certificate"
+ case ParseFailed:
+ msg = "Failed to parse certificate"
+ case SelfSigned:
+ msg = "Certificate is self signed"
+ case VerifyFailed:
+ msg = "Unable to verify certificate"
+ case BadRequest:
+ msg = "Invalid certificate request"
+ case MissingSerial:
+ msg = "Missing serial number in request"
+ default:
+ panic(fmt.Sprintf("Unsupported CFSSL error reason %d under category CertificateError.",
+ reason))
+
+ }
+ case PrivateKeyError:
+ switch reason {
+ case Unknown:
+ msg = "Unknown private key error"
+ case ReadFailed:
+ msg = "Failed to read private key"
+ case DecodeFailed:
+ msg = "Failed to decode private key"
+ case ParseFailed:
+ msg = "Failed to parse private key"
+ case Encrypted:
+ msg = "Private key is encrypted."
+ case NotRSAOrECC:
+ msg = "Private key algorithm is not RSA or ECC"
+ case KeyMismatch:
+ msg = "Private key does not match public key"
+ case GenerationFailed:
+ msg = "Failed to new private key"
+ case Unavailable:
+ msg = "Private key is unavailable"
+ default:
+ panic(fmt.Sprintf("Unsupported CFSSL error reason %d under category PrivateKeyError.",
+ reason))
+ }
+ case IntermediatesError:
+ switch reason {
+ case Unknown:
+ msg = "Unknown intermediate certificate error"
+ case ReadFailed:
+ msg = "Failed to read intermediate certificate"
+ case DecodeFailed:
+ msg = "Failed to decode intermediate certificate"
+ case ParseFailed:
+ msg = "Failed to parse intermediate certificate"
+ default:
+ panic(fmt.Sprintf("Unsupported CFSSL error reason %d under category IntermediatesError.",
+ reason))
+ }
+ case RootError:
+ switch reason {
+ case Unknown:
+ msg = "Unknown root certificate error"
+ case ReadFailed:
+ msg = "Failed to read root certificate"
+ case DecodeFailed:
+ msg = "Failed to decode root certificate"
+ case ParseFailed:
+ msg = "Failed to parse root certificate"
+ default:
+ panic(fmt.Sprintf("Unsupported CFSSL error reason %d under category RootError.",
+ reason))
+ }
+ case PolicyError:
+ switch reason {
+ case Unknown:
+ msg = "Unknown policy error"
+ case NoKeyUsages:
+ msg = "Invalid policy: no key usage available"
+ case InvalidPolicy:
+ msg = "Invalid or unknown policy"
+ case InvalidRequest:
+ msg = "Policy violation request"
+ case UnknownProfile:
+ msg = "Unknown policy profile"
+ case UnmatchedWhitelist:
+ msg = "Request does not match policy whitelist"
+ default:
+ panic(fmt.Sprintf("Unsupported CFSSL error reason %d under category PolicyError.",
+ reason))
+ }
+ case DialError:
+ switch reason {
+ case Unknown:
+ msg = "Failed to dial remote server"
+ default:
+ panic(fmt.Sprintf("Unsupported CFSSL error reason %d under category DialError.",
+ reason))
+ }
+ case APIClientError:
+ switch reason {
+ case AuthenticationFailure:
+ msg = "API client authentication failure"
+ case JSONError:
+ msg = "API client JSON config error"
+ case ClientHTTPError:
+ msg = "API client HTTP error"
+ case IOError:
+ msg = "API client IO error"
+ case ServerRequestFailed:
+ msg = "API client error: Server request failed"
+ default:
+ panic(fmt.Sprintf("Unsupported CFSSL error reason %d under category APIClientError.",
+ reason))
+ }
+ case CSRError:
+ switch reason {
+ case Unknown:
+ msg = "CSR parsing failed due to unknown error"
+ case ReadFailed:
+ msg = "CSR file read failed"
+ case ParseFailed:
+ msg = "CSR Parsing failed"
+ case DecodeFailed:
+ msg = "CSR Decode failed"
+ case BadRequest:
+ msg = "CSR Bad request"
+ default:
+ panic(fmt.Sprintf("Unsupported CF-SSL error reason %d under category APIClientError.", reason))
+ }
+ case CTError:
+ switch reason {
+ case Unknown:
+ msg = "Certificate transparency parsing failed due to unknown error"
+ case PrecertSubmissionFailed:
+ msg = "Certificate transparency precertificate submission failed"
+ default:
+ panic(fmt.Sprintf("Unsupported CF-SSL error reason %d under category CTError.", reason))
+ }
+ case CertStoreError:
+ switch reason {
+ case Unknown:
+ msg = "Certificate store action failed due to unknown error"
+ default:
+ panic(fmt.Sprintf("Unsupported CF-SSL error reason %d under category CertStoreError.", reason))
+ }
+
+ default:
+ panic(fmt.Sprintf("Unsupported CFSSL error type: %d.",
+ category))
+ }
+ return &Error{ErrorCode: errorCode, Message: msg}
+}
+
+// Wrap returns an error that contains the given error and an error code derived from
+// the given category, reason and the error. Currently, to avoid confusion, it is not
+// allowed to create an error of category Success
+func Wrap(category Category, reason Reason, err error) *Error {
+ errorCode := int(category) + int(reason)
+ if err == nil {
+ panic("Wrap needs a supplied error to initialize.")
+ }
+
+ // do not double wrap a error
+ switch err.(type) {
+ case *Error:
+ panic("Unable to wrap a wrapped error.")
+ }
+
+ switch category {
+ case CertificateError:
+ // given VerifyFailed , report the status with more detailed status code
+ // for some certificate errors we care.
+ if reason == VerifyFailed {
+ switch errorType := err.(type) {
+ case x509.CertificateInvalidError:
+ errorCode += certificateInvalid + int(errorType.Reason)
+ case x509.UnknownAuthorityError:
+ errorCode += unknownAuthority
+ }
+ }
+ case PrivateKeyError, IntermediatesError, RootError, PolicyError, DialError,
+ APIClientError, CSRError, CTError, CertStoreError:
+ // no-op, just use the error
+ default:
+ panic(fmt.Sprintf("Unsupported CFSSL error type: %d.",
+ category))
+ }
+
+ return &Error{ErrorCode: errorCode, Message: err.Error()}
+
+}
diff --git a/vendor/github.com/cloudflare/cfssl/errors/http.go b/vendor/github.com/cloudflare/cfssl/errors/http.go
new file mode 100644
index 0000000000..c9c0a39c70
--- /dev/null
+++ b/vendor/github.com/cloudflare/cfssl/errors/http.go
@@ -0,0 +1,47 @@
+package errors
+
+import (
+ "errors"
+ "net/http"
+)
+
+// HTTPError is an augmented error with a HTTP status code.
+type HTTPError struct {
+ StatusCode int
+ error
+}
+
+// Error implements the error interface.
+func (e *HTTPError) Error() string {
+ return e.error.Error()
+}
+
+// NewMethodNotAllowed returns an appropriate error in the case that
+// an HTTP client uses an invalid method (i.e. a GET in place of a POST)
+// on an API endpoint.
+func NewMethodNotAllowed(method string) *HTTPError {
+ return &HTTPError{http.StatusMethodNotAllowed, errors.New(`Method is not allowed:"` + method + `"`)}
+}
+
+// NewBadRequest creates a HttpError with the given error and error code 400.
+func NewBadRequest(err error) *HTTPError {
+ return &HTTPError{http.StatusBadRequest, err}
+}
+
+// NewBadRequestString returns a HttpError with the supplied message
+// and error code 400.
+func NewBadRequestString(s string) *HTTPError {
+ return NewBadRequest(errors.New(s))
+}
+
+// NewBadRequestMissingParameter returns a 400 HttpError as a required
+// parameter is missing in the HTTP request.
+func NewBadRequestMissingParameter(s string) *HTTPError {
+ return NewBadRequestString(`Missing parameter "` + s + `"`)
+}
+
+// NewBadRequestUnwantedParameter returns a 400 HttpError as a unnecessary
+// parameter is present in the HTTP request.
+func NewBadRequestUnwantedParameter(s string) *HTTPError {
+ return NewBadRequestString(`Unwanted parameter "` + s + `"`)
+}
diff --git a/vendor/github.com/cloudflare/cfssl/helpers/derhelpers/derhelpers.go b/vendor/github.com/cloudflare/cfssl/helpers/derhelpers/derhelpers.go
new file mode 100644
index 0000000000..bcc7418508
--- /dev/null
+++ b/vendor/github.com/cloudflare/cfssl/helpers/derhelpers/derhelpers.go
@@ -0,0 +1,42 @@
+// Package derhelpers implements common functionality
+// on DER encoded data
+package derhelpers
+
+import (
+ "crypto"
+ "crypto/ecdsa"
+ "crypto/rsa"
+ "crypto/x509"
+
+ cferr "github.com/cloudflare/cfssl/errors"
+)
+
+// ParsePrivateKeyDER parses a PKCS #1, PKCS #8, or elliptic curve
+// DER-encoded private key. The key must not be in PEM format.
+func ParsePrivateKeyDER(keyDER []byte) (key crypto.Signer, err error) {
+ generalKey, err := x509.ParsePKCS8PrivateKey(keyDER)
+ if err != nil {
+ generalKey, err = x509.ParsePKCS1PrivateKey(keyDER)
+ if err != nil {
+ generalKey, err = x509.ParseECPrivateKey(keyDER)
+ if err != nil {
+ // We don't include the actual error into
+ // the final error. The reason might be
+ // we don't want to leak any info about
+ // the private key.
+ return nil, cferr.New(cferr.PrivateKeyError,
+ cferr.ParseFailed)
+ }
+ }
+ }
+
+ switch generalKey.(type) {
+ case *rsa.PrivateKey:
+ return generalKey.(*rsa.PrivateKey), nil
+ case *ecdsa.PrivateKey:
+ return generalKey.(*ecdsa.PrivateKey), nil
+ }
+
+ // should never reach here
+ return nil, cferr.New(cferr.PrivateKeyError, cferr.ParseFailed)
+}
diff --git a/vendor/github.com/cloudflare/cfssl/helpers/helpers.go b/vendor/github.com/cloudflare/cfssl/helpers/helpers.go
new file mode 100644
index 0000000000..48d096a987
--- /dev/null
+++ b/vendor/github.com/cloudflare/cfssl/helpers/helpers.go
@@ -0,0 +1,518 @@
+// Package helpers implements utility functionality common to many
+// CFSSL packages.
+package helpers
+
+import (
+ "bytes"
+ "crypto"
+ "crypto/ecdsa"
+ "crypto/elliptic"
+ "crypto/rsa"
+ "crypto/tls"
+ "crypto/x509"
+ "encoding/asn1"
+ "encoding/pem"
+ "errors"
+ "io/ioutil"
+ "math/big"
+
+ "strings"
+ "time"
+
+ "github.com/cloudflare/cfssl/crypto/pkcs7"
+ cferr "github.com/cloudflare/cfssl/errors"
+ "github.com/cloudflare/cfssl/helpers/derhelpers"
+ "github.com/cloudflare/cfssl/log"
+ "golang.org/x/crypto/pkcs12"
+)
+
+// OneYear is a time.Duration representing a year's worth of seconds.
+const OneYear = 8760 * time.Hour
+
+// OneDay is a time.Duration representing a day's worth of seconds.
+const OneDay = 24 * time.Hour
+
+// InclusiveDate returns the time.Time representation of a date - 1
+// nanosecond. This allows time.After to be used inclusively.
+func InclusiveDate(year int, month time.Month, day int) time.Time {
+ return time.Date(year, month, day, 0, 0, 0, 0, time.UTC).Add(-1 * time.Nanosecond)
+}
+
+// Jul2012 is the July 2012 CAB Forum deadline for when CAs must stop
+// issuing certificates valid for more than 5 years.
+var Jul2012 = InclusiveDate(2012, time.July, 01)
+
+// Apr2015 is the April 2015 CAB Forum deadline for when CAs must stop
+// issuing certificates valid for more than 39 months.
+var Apr2015 = InclusiveDate(2015, time.April, 01)
+
+// KeyLength returns the bit size of ECDSA or RSA PublicKey
+func KeyLength(key interface{}) int {
+ if key == nil {
+ return 0
+ }
+ if ecdsaKey, ok := key.(*ecdsa.PublicKey); ok {
+ return ecdsaKey.Curve.Params().BitSize
+ } else if rsaKey, ok := key.(*rsa.PublicKey); ok {
+ return rsaKey.N.BitLen()
+ }
+
+ return 0
+}
+
+// ExpiryTime returns the time when the certificate chain is expired.
+func ExpiryTime(chain []*x509.Certificate) (notAfter time.Time) {
+ if len(chain) == 0 {
+ return
+ }
+
+ notAfter = chain[0].NotAfter
+ for _, cert := range chain {
+ if notAfter.After(cert.NotAfter) {
+ notAfter = cert.NotAfter
+ }
+ }
+ return
+}
+
+// MonthsValid returns the number of months for which a certificate is valid.
+func MonthsValid(c *x509.Certificate) int {
+ issued := c.NotBefore
+ expiry := c.NotAfter
+ years := (expiry.Year() - issued.Year())
+ months := years*12 + int(expiry.Month()) - int(issued.Month())
+
+ // Round up if valid for less than a full month
+ if expiry.Day() > issued.Day() {
+ months++
+ }
+ return months
+}
+
+// ValidExpiry determines if a certificate is valid for an acceptable
+// length of time per the CA/Browser Forum baseline requirements.
+// See https://cabforum.org/wp-content/uploads/CAB-Forum-BR-1.3.0.pdf
+func ValidExpiry(c *x509.Certificate) bool {
+ issued := c.NotBefore
+
+ var maxMonths int
+ switch {
+ case issued.After(Apr2015):
+ maxMonths = 39
+ case issued.After(Jul2012):
+ maxMonths = 60
+ case issued.Before(Jul2012):
+ maxMonths = 120
+ }
+
+ if MonthsValid(c) > maxMonths {
+ return false
+ }
+ return true
+}
+
+// SignatureString returns the TLS signature string corresponding to
+// an X509 signature algorithm.
+func SignatureString(alg x509.SignatureAlgorithm) string {
+ switch alg {
+ case x509.MD2WithRSA:
+ return "MD2WithRSA"
+ case x509.MD5WithRSA:
+ return "MD5WithRSA"
+ case x509.SHA1WithRSA:
+ return "SHA1WithRSA"
+ case x509.SHA256WithRSA:
+ return "SHA256WithRSA"
+ case x509.SHA384WithRSA:
+ return "SHA384WithRSA"
+ case x509.SHA512WithRSA:
+ return "SHA512WithRSA"
+ case x509.DSAWithSHA1:
+ return "DSAWithSHA1"
+ case x509.DSAWithSHA256:
+ return "DSAWithSHA256"
+ case x509.ECDSAWithSHA1:
+ return "ECDSAWithSHA1"
+ case x509.ECDSAWithSHA256:
+ return "ECDSAWithSHA256"
+ case x509.ECDSAWithSHA384:
+ return "ECDSAWithSHA384"
+ case x509.ECDSAWithSHA512:
+ return "ECDSAWithSHA512"
+ default:
+ return "Unknown Signature"
+ }
+}
+
+// HashAlgoString returns the hash algorithm name contains in the signature
+// method.
+func HashAlgoString(alg x509.SignatureAlgorithm) string {
+ switch alg {
+ case x509.MD2WithRSA:
+ return "MD2"
+ case x509.MD5WithRSA:
+ return "MD5"
+ case x509.SHA1WithRSA:
+ return "SHA1"
+ case x509.SHA256WithRSA:
+ return "SHA256"
+ case x509.SHA384WithRSA:
+ return "SHA384"
+ case x509.SHA512WithRSA:
+ return "SHA512"
+ case x509.DSAWithSHA1:
+ return "SHA1"
+ case x509.DSAWithSHA256:
+ return "SHA256"
+ case x509.ECDSAWithSHA1:
+ return "SHA1"
+ case x509.ECDSAWithSHA256:
+ return "SHA256"
+ case x509.ECDSAWithSHA384:
+ return "SHA384"
+ case x509.ECDSAWithSHA512:
+ return "SHA512"
+ default:
+ return "Unknown Hash Algorithm"
+ }
+}
+
+// EncodeCertificatesPEM encodes a number of x509 certficates to PEM
+func EncodeCertificatesPEM(certs []*x509.Certificate) []byte {
+ var buffer bytes.Buffer
+ for _, cert := range certs {
+ pem.Encode(&buffer, &pem.Block{
+ Type: "CERTIFICATE",
+ Bytes: cert.Raw,
+ })
+ }
+
+ return buffer.Bytes()
+}
+
+// EncodeCertificatePEM encodes a single x509 certficates to PEM
+func EncodeCertificatePEM(cert *x509.Certificate) []byte {
+ return EncodeCertificatesPEM([]*x509.Certificate{cert})
+}
+
+// ParseCertificatesPEM parses a sequence of PEM-encoded certificate and returns them,
+// can handle PEM encoded PKCS #7 structures.
+func ParseCertificatesPEM(certsPEM []byte) ([]*x509.Certificate, error) {
+ var certs []*x509.Certificate
+ var err error
+ certsPEM = bytes.TrimSpace(certsPEM)
+ for len(certsPEM) > 0 {
+ var cert []*x509.Certificate
+ cert, certsPEM, err = ParseOneCertificateFromPEM(certsPEM)
+ if err != nil {
+
+ return nil, cferr.New(cferr.CertificateError, cferr.ParseFailed)
+ } else if cert == nil {
+ break
+ }
+
+ certs = append(certs, cert...)
+ }
+ if len(certsPEM) > 0 {
+ return nil, cferr.New(cferr.CertificateError, cferr.DecodeFailed)
+ }
+ return certs, nil
+}
+
+// ParseCertificatesDER parses a DER encoding of a certificate object and possibly private key,
+// either PKCS #7, PKCS #12, or raw x509.
+func ParseCertificatesDER(certsDER []byte, password string) (certs []*x509.Certificate, key crypto.Signer, err error) {
+ certsDER = bytes.TrimSpace(certsDER)
+ pkcs7data, err := pkcs7.ParsePKCS7(certsDER)
+ if err != nil {
+ var pkcs12data interface{}
+ certs = make([]*x509.Certificate, 1)
+ pkcs12data, certs[0], err = pkcs12.Decode(certsDER, password)
+ if err != nil {
+ certs, err = x509.ParseCertificates(certsDER)
+ if err != nil {
+ return nil, nil, cferr.New(cferr.CertificateError, cferr.DecodeFailed)
+ }
+ } else {
+ key = pkcs12data.(crypto.Signer)
+ }
+ } else {
+ if pkcs7data.ContentInfo != "SignedData" {
+ return nil, nil, cferr.Wrap(cferr.CertificateError, cferr.DecodeFailed, errors.New("can only extract certificates from signed data content info"))
+ }
+ certs = pkcs7data.Content.SignedData.Certificates
+ }
+ if certs == nil {
+ return nil, key, cferr.New(cferr.CertificateError, cferr.DecodeFailed)
+ }
+ return certs, key, nil
+}
+
+// ParseSelfSignedCertificatePEM parses a PEM-encoded certificate and check if it is self-signed.
+func ParseSelfSignedCertificatePEM(certPEM []byte) (*x509.Certificate, error) {
+ cert, err := ParseCertificatePEM(certPEM)
+ if err != nil {
+ return nil, err
+ }
+
+ if err := cert.CheckSignature(cert.SignatureAlgorithm, cert.RawTBSCertificate, cert.Signature); err != nil {
+ return nil, cferr.Wrap(cferr.CertificateError, cferr.VerifyFailed, err)
+ }
+ return cert, nil
+}
+
+// ParseCertificatePEM parses and returns a PEM-encoded certificate,
+// can handle PEM encoded PKCS #7 structures.
+func ParseCertificatePEM(certPEM []byte) (*x509.Certificate, error) {
+ certPEM = bytes.TrimSpace(certPEM)
+ cert, rest, err := ParseOneCertificateFromPEM(certPEM)
+ if err != nil {
+ // Log the actual parsing error but throw a default parse error message.
+ log.Debugf("Certificate parsing error: %v", err)
+ return nil, cferr.New(cferr.CertificateError, cferr.ParseFailed)
+ } else if cert == nil {
+ return nil, cferr.New(cferr.CertificateError, cferr.DecodeFailed)
+ } else if len(rest) > 0 {
+ return nil, cferr.Wrap(cferr.CertificateError, cferr.ParseFailed, errors.New("the PEM file should contain only one object"))
+ } else if len(cert) > 1 {
+ return nil, cferr.Wrap(cferr.CertificateError, cferr.ParseFailed, errors.New("the PKCS7 object in the PEM file should contain only one certificate"))
+ }
+ return cert[0], nil
+}
+
+// ParseOneCertificateFromPEM attempts to parse one PEM encoded certificate object,
+// either a raw x509 certificate or a PKCS #7 structure possibly containing
+// multiple certificates, from the top of certsPEM, which itself may
+// contain multiple PEM encoded certificate objects.
+func ParseOneCertificateFromPEM(certsPEM []byte) ([]*x509.Certificate, []byte, error) {
+
+ block, rest := pem.Decode(certsPEM)
+ if block == nil {
+ return nil, rest, nil
+ }
+
+ cert, err := x509.ParseCertificate(block.Bytes)
+ if err != nil {
+ pkcs7data, err := pkcs7.ParsePKCS7(block.Bytes)
+ if err != nil {
+ return nil, rest, err
+ }
+ if pkcs7data.ContentInfo != "SignedData" {
+ return nil, rest, errors.New("only PKCS #7 Signed Data Content Info supported for certificate parsing")
+ }
+ certs := pkcs7data.Content.SignedData.Certificates
+ if certs == nil {
+ return nil, rest, errors.New("PKCS #7 structure contains no certificates")
+ }
+ return certs, rest, nil
+ }
+ var certs = []*x509.Certificate{cert}
+ return certs, rest, nil
+}
+
+// LoadPEMCertPool loads a pool of PEM certificates from file.
+func LoadPEMCertPool(certsFile string) (*x509.CertPool, error) {
+ if certsFile == "" {
+ return nil, nil
+ }
+ pemCerts, err := ioutil.ReadFile(certsFile)
+ if err != nil {
+ return nil, err
+ }
+
+ return PEMToCertPool(pemCerts)
+}
+
+// PEMToCertPool concerts PEM certificates to a CertPool.
+func PEMToCertPool(pemCerts []byte) (*x509.CertPool, error) {
+ if len(pemCerts) == 0 {
+ return nil, nil
+ }
+
+ certPool := x509.NewCertPool()
+ if !certPool.AppendCertsFromPEM(pemCerts) {
+ return nil, errors.New("failed to load cert pool")
+ }
+
+ return certPool, nil
+}
+
+// ParsePrivateKeyPEM parses and returns a PEM-encoded private
+// key. The private key may be either an unencrypted PKCS#8, PKCS#1,
+// or elliptic private key.
+func ParsePrivateKeyPEM(keyPEM []byte) (key crypto.Signer, err error) {
+ return ParsePrivateKeyPEMWithPassword(keyPEM, nil)
+}
+
+// ParsePrivateKeyPEMWithPassword parses and returns a PEM-encoded private
+// key. The private key may be a potentially encrypted PKCS#8, PKCS#1,
+// or elliptic private key.
+func ParsePrivateKeyPEMWithPassword(keyPEM []byte, password []byte) (key crypto.Signer, err error) {
+ keyDER, err := GetKeyDERFromPEM(keyPEM, password)
+ if err != nil {
+ return nil, err
+ }
+
+ return derhelpers.ParsePrivateKeyDER(keyDER)
+}
+
+// GetKeyDERFromPEM parses a PEM-encoded private key and returns DER-format key bytes.
+func GetKeyDERFromPEM(in []byte, password []byte) ([]byte, error) {
+ keyDER, _ := pem.Decode(in)
+ if keyDER != nil {
+ if procType, ok := keyDER.Headers["Proc-Type"]; ok {
+ if strings.Contains(procType, "ENCRYPTED") {
+ if password != nil {
+ return x509.DecryptPEMBlock(keyDER, password)
+ }
+ return nil, cferr.New(cferr.PrivateKeyError, cferr.Encrypted)
+ }
+ }
+ return keyDER.Bytes, nil
+ }
+
+ return nil, cferr.New(cferr.PrivateKeyError, cferr.DecodeFailed)
+}
+
+// CheckSignature verifies a signature made by the key on a CSR, such
+// as on the CSR itself.
+func CheckSignature(csr *x509.CertificateRequest, algo x509.SignatureAlgorithm, signed, signature []byte) error {
+ var hashType crypto.Hash
+
+ switch algo {
+ case x509.SHA1WithRSA, x509.ECDSAWithSHA1:
+ hashType = crypto.SHA1
+ case x509.SHA256WithRSA, x509.ECDSAWithSHA256:
+ hashType = crypto.SHA256
+ case x509.SHA384WithRSA, x509.ECDSAWithSHA384:
+ hashType = crypto.SHA384
+ case x509.SHA512WithRSA, x509.ECDSAWithSHA512:
+ hashType = crypto.SHA512
+ default:
+ return x509.ErrUnsupportedAlgorithm
+ }
+
+ if !hashType.Available() {
+ return x509.ErrUnsupportedAlgorithm
+ }
+ h := hashType.New()
+
+ h.Write(signed)
+ digest := h.Sum(nil)
+
+ switch pub := csr.PublicKey.(type) {
+ case *rsa.PublicKey:
+ return rsa.VerifyPKCS1v15(pub, hashType, digest, signature)
+ case *ecdsa.PublicKey:
+ ecdsaSig := new(struct{ R, S *big.Int })
+ if _, err := asn1.Unmarshal(signature, ecdsaSig); err != nil {
+ return err
+ }
+ if ecdsaSig.R.Sign() <= 0 || ecdsaSig.S.Sign() <= 0 {
+ return errors.New("x509: ECDSA signature contained zero or negative values")
+ }
+ if !ecdsa.Verify(pub, digest, ecdsaSig.R, ecdsaSig.S) {
+ return errors.New("x509: ECDSA verification failure")
+ }
+ return nil
+ }
+ return x509.ErrUnsupportedAlgorithm
+}
+
+// ParseCSR parses a PEM- or DER-encoded PKCS #10 certificate signing request.
+func ParseCSR(in []byte) (csr *x509.CertificateRequest, rest []byte, err error) {
+ in = bytes.TrimSpace(in)
+ p, rest := pem.Decode(in)
+ if p != nil {
+ if p.Type != "NEW CERTIFICATE REQUEST" && p.Type != "CERTIFICATE REQUEST" {
+ return nil, rest, cferr.New(cferr.CSRError, cferr.BadRequest)
+ }
+
+ csr, err = x509.ParseCertificateRequest(p.Bytes)
+ } else {
+ csr, err = x509.ParseCertificateRequest(in)
+ }
+
+ if err != nil {
+ return nil, rest, err
+ }
+
+ err = CheckSignature(csr, csr.SignatureAlgorithm, csr.RawTBSCertificateRequest, csr.Signature)
+ if err != nil {
+ return nil, rest, err
+ }
+
+ return csr, rest, nil
+}
+
+// ParseCSRPEM parses a PEM-encoded certificiate signing request.
+// It does not check the signature. This is useful for dumping data from a CSR
+// locally.
+func ParseCSRPEM(csrPEM []byte) (*x509.CertificateRequest, error) {
+ block, _ := pem.Decode([]byte(csrPEM))
+ der := block.Bytes
+ csrObject, err := x509.ParseCertificateRequest(der)
+
+ if err != nil {
+ return nil, err
+ }
+
+ return csrObject, nil
+}
+
+// SignerAlgo returns an X.509 signature algorithm from a crypto.Signer.
+func SignerAlgo(priv crypto.Signer) x509.SignatureAlgorithm {
+ switch pub := priv.Public().(type) {
+ case *rsa.PublicKey:
+ bitLength := pub.N.BitLen()
+ switch {
+ case bitLength >= 4096:
+ return x509.SHA512WithRSA
+ case bitLength >= 3072:
+ return x509.SHA384WithRSA
+ case bitLength >= 2048:
+ return x509.SHA256WithRSA
+ default:
+ return x509.SHA1WithRSA
+ }
+ case *ecdsa.PublicKey:
+ switch pub.Curve {
+ case elliptic.P521():
+ return x509.ECDSAWithSHA512
+ case elliptic.P384():
+ return x509.ECDSAWithSHA384
+ case elliptic.P256():
+ return x509.ECDSAWithSHA256
+ default:
+ return x509.ECDSAWithSHA1
+ }
+ default:
+ return x509.UnknownSignatureAlgorithm
+ }
+}
+
+// LoadClientCertificate load key/certificate from pem files
+func LoadClientCertificate(certFile string, keyFile string) (*tls.Certificate, error) {
+ if certFile != "" && keyFile != "" {
+ cert, err := tls.LoadX509KeyPair(certFile, keyFile)
+ if err != nil {
+ log.Critical("Unable to read client certificate from file: %s or key from file: %s", certFile, keyFile)
+ return nil, err
+ }
+ log.Debug("Client certificate loaded ")
+ return &cert, nil
+ }
+ return nil, nil
+}
+
+// CreateTLSConfig creates a tls.Config object from certs and roots
+func CreateTLSConfig(remoteCAs *x509.CertPool, cert *tls.Certificate) *tls.Config {
+ var certs []tls.Certificate
+ if cert != nil {
+ certs = []tls.Certificate{*cert}
+ }
+ return &tls.Config{
+ Certificates: certs,
+ RootCAs: remoteCAs,
+ }
+}
diff --git a/vendor/github.com/cloudflare/cfssl/info/info.go b/vendor/github.com/cloudflare/cfssl/info/info.go
new file mode 100644
index 0000000000..926a411ffb
--- /dev/null
+++ b/vendor/github.com/cloudflare/cfssl/info/info.go
@@ -0,0 +1,15 @@
+// Package info contains the definitions for the info endpoint
+package info
+
+// Req is the request struct for an info API request.
+type Req struct {
+ Label string `json:"label"`
+ Profile string `json:"profile"`
+}
+
+// Resp is the response for an Info API request.
+type Resp struct {
+ Certificate string `json:"certificate"`
+ Usage []string `json:"usages"`
+ ExpiryString string `json:"expiry"`
+}
diff --git a/vendor/github.com/cloudflare/cfssl/initca/initca.go b/vendor/github.com/cloudflare/cfssl/initca/initca.go
new file mode 100644
index 0000000000..c9ab3bbc35
--- /dev/null
+++ b/vendor/github.com/cloudflare/cfssl/initca/initca.go
@@ -0,0 +1,224 @@
+// Package initca contains code to initialise a certificate authority,
+// generating a new root key and certificate.
+package initca
+
+import (
+ "crypto"
+ "crypto/ecdsa"
+ "crypto/rsa"
+ "crypto/x509"
+ "errors"
+ "io/ioutil"
+ "time"
+
+ "github.com/cloudflare/cfssl/config"
+ "github.com/cloudflare/cfssl/csr"
+ cferr "github.com/cloudflare/cfssl/errors"
+ "github.com/cloudflare/cfssl/helpers"
+ "github.com/cloudflare/cfssl/log"
+ "github.com/cloudflare/cfssl/signer"
+ "github.com/cloudflare/cfssl/signer/local"
+)
+
+// validator contains the default validation logic for certificate
+// authority certificates. The only requirement here is that the
+// certificate have a non-empty subject field.
+func validator(req *csr.CertificateRequest) error {
+ if req.CN != "" {
+ return nil
+ }
+
+ if len(req.Names) == 0 {
+ return cferr.Wrap(cferr.PolicyError, cferr.InvalidRequest, errors.New("missing subject information"))
+ }
+
+ for i := range req.Names {
+ if csr.IsNameEmpty(req.Names[i]) {
+ return cferr.Wrap(cferr.PolicyError, cferr.InvalidRequest, errors.New("missing subject information"))
+ }
+ }
+
+ return nil
+}
+
+// New creates a new root certificate from the certificate request.
+func New(req *csr.CertificateRequest) (cert, csrPEM, key []byte, err error) {
+ policy := CAPolicy()
+ if req.CA != nil {
+ if req.CA.Expiry != "" {
+ policy.Default.ExpiryString = req.CA.Expiry
+ policy.Default.Expiry, err = time.ParseDuration(req.CA.Expiry)
+ if err != nil {
+ return
+ }
+ }
+
+ policy.Default.CAConstraint.MaxPathLen = req.CA.PathLength
+ if req.CA.PathLength != 0 && req.CA.PathLenZero == true {
+ log.Infof("ignore invalid 'pathlenzero' value")
+ } else {
+ policy.Default.CAConstraint.MaxPathLenZero = req.CA.PathLenZero
+ }
+ }
+
+ g := &csr.Generator{Validator: validator}
+ csrPEM, key, err = g.ProcessRequest(req)
+ if err != nil {
+ log.Errorf("failed to process request: %v", err)
+ key = nil
+ return
+ }
+
+ priv, err := helpers.ParsePrivateKeyPEM(key)
+ if err != nil {
+ log.Errorf("failed to parse private key: %v", err)
+ return
+ }
+
+ s, err := local.NewSigner(priv, nil, signer.DefaultSigAlgo(priv), policy)
+ if err != nil {
+ log.Errorf("failed to create signer: %v", err)
+ return
+ }
+
+ signReq := signer.SignRequest{Hosts: req.Hosts, Request: string(csrPEM)}
+ cert, err = s.Sign(signReq)
+
+ return
+
+}
+
+// NewFromPEM creates a new root certificate from the key file passed in.
+func NewFromPEM(req *csr.CertificateRequest, keyFile string) (cert, csrPEM []byte, err error) {
+ privData, err := ioutil.ReadFile(keyFile)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ priv, err := helpers.ParsePrivateKeyPEM(privData)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ return NewFromSigner(req, priv)
+}
+
+// RenewFromPEM re-creates a root certificate from the CA cert and key
+// files. The resulting root certificate will have the input CA certificate
+// as the template and have the same expiry length. E.g. the exsiting CA
+// is valid for a year from Jan 01 2015 to Jan 01 2016, the renewed certificate
+// will be valid from now and expire in one year as well.
+func RenewFromPEM(caFile, keyFile string) ([]byte, error) {
+ caBytes, err := ioutil.ReadFile(caFile)
+ if err != nil {
+ return nil, err
+ }
+
+ ca, err := helpers.ParseCertificatePEM(caBytes)
+ if err != nil {
+ return nil, err
+ }
+
+ keyBytes, err := ioutil.ReadFile(keyFile)
+ if err != nil {
+ return nil, err
+ }
+
+ key, err := helpers.ParsePrivateKeyPEM(keyBytes)
+ if err != nil {
+ return nil, err
+ }
+
+ return RenewFromSigner(ca, key)
+
+}
+
+// NewFromSigner creates a new root certificate from a crypto.Signer.
+func NewFromSigner(req *csr.CertificateRequest, priv crypto.Signer) (cert, csrPEM []byte, err error) {
+ policy := CAPolicy()
+ if req.CA != nil {
+ if req.CA.Expiry != "" {
+ policy.Default.ExpiryString = req.CA.Expiry
+ policy.Default.Expiry, err = time.ParseDuration(req.CA.Expiry)
+ if err != nil {
+ return nil, nil, err
+ }
+ }
+
+ policy.Default.CAConstraint.MaxPathLen = req.CA.PathLength
+ if req.CA.PathLength != 0 && req.CA.PathLenZero == true {
+ log.Infof("ignore invalid 'pathlenzero' value")
+ } else {
+ policy.Default.CAConstraint.MaxPathLenZero = req.CA.PathLenZero
+ }
+ }
+
+ csrPEM, err = csr.Generate(priv, req)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ s, err := local.NewSigner(priv, nil, signer.DefaultSigAlgo(priv), policy)
+ if err != nil {
+ log.Errorf("failed to create signer: %v", err)
+ return
+ }
+
+ signReq := signer.SignRequest{Request: string(csrPEM)}
+ cert, err = s.Sign(signReq)
+ return
+}
+
+// RenewFromSigner re-creates a root certificate from the CA cert and crypto.Signer.
+// The resulting root certificate will have ca certificate
+// as the template and have the same expiry length. E.g. the exsiting CA
+// is valid for a year from Jan 01 2015 to Jan 01 2016, the renewed certificate
+// will be valid from now and expire in one year as well.
+func RenewFromSigner(ca *x509.Certificate, priv crypto.Signer) ([]byte, error) {
+ if !ca.IsCA {
+ return nil, errors.New("input certificate is not a CA cert")
+ }
+
+ // matching certificate public key vs private key
+ switch {
+ case ca.PublicKeyAlgorithm == x509.RSA:
+
+ var rsaPublicKey *rsa.PublicKey
+ var ok bool
+ if rsaPublicKey, ok = priv.Public().(*rsa.PublicKey); !ok {
+ return nil, cferr.New(cferr.PrivateKeyError, cferr.KeyMismatch)
+ }
+ if ca.PublicKey.(*rsa.PublicKey).N.Cmp(rsaPublicKey.N) != 0 {
+ return nil, cferr.New(cferr.PrivateKeyError, cferr.KeyMismatch)
+ }
+ case ca.PublicKeyAlgorithm == x509.ECDSA:
+ var ecdsaPublicKey *ecdsa.PublicKey
+ var ok bool
+ if ecdsaPublicKey, ok = priv.Public().(*ecdsa.PublicKey); !ok {
+ return nil, cferr.New(cferr.PrivateKeyError, cferr.KeyMismatch)
+ }
+ if ca.PublicKey.(*ecdsa.PublicKey).X.Cmp(ecdsaPublicKey.X) != 0 {
+ return nil, cferr.New(cferr.PrivateKeyError, cferr.KeyMismatch)
+ }
+ default:
+ return nil, cferr.New(cferr.PrivateKeyError, cferr.NotRSAOrECC)
+ }
+
+ req := csr.ExtractCertificateRequest(ca)
+
+ cert, _, err := NewFromSigner(req, priv)
+ return cert, err
+
+}
+
+// CAPolicy contains the CA issuing policy as default policy.
+var CAPolicy = func() *config.Signing {
+ return &config.Signing{
+ Default: &config.SigningProfile{
+ Usage: []string{"cert sign", "crl sign"},
+ ExpiryString: "43800h",
+ Expiry: 5 * helpers.OneYear,
+ CAConstraint: config.CAConstraint{IsCA: true},
+ },
+ }
+}
diff --git a/vendor/github.com/cloudflare/cfssl/log/log.go b/vendor/github.com/cloudflare/cfssl/log/log.go
new file mode 100644
index 0000000000..956c9d4604
--- /dev/null
+++ b/vendor/github.com/cloudflare/cfssl/log/log.go
@@ -0,0 +1,162 @@
+// Package log implements a wrapper around the Go standard library's
+// logging package. Clients should set the current log level; only
+// messages below that level will actually be logged. For example, if
+// Level is set to LevelWarning, only log messages at the Warning,
+// Error, and Critical levels will be logged.
+package log
+
+import (
+ "fmt"
+ "log"
+ "os"
+)
+
+// The following constants represent logging levels in increasing levels of seriousness.
+const (
+ // LevelDebug is the log level for Debug statements.
+ LevelDebug = iota
+ // LevelInfo is the log level for Info statements.
+ LevelInfo
+ // LevelWarning is the log level for Warning statements.
+ LevelWarning
+ // LevelError is the log level for Error statements.
+ LevelError
+ // LevelCritical is the log level for Critical statements.
+ LevelCritical
+ // LevelFatal is the log level for Fatal statements.
+ LevelFatal
+)
+
+var levelPrefix = [...]string{
+ LevelDebug: "DEBUG",
+ LevelInfo: "INFO",
+ LevelWarning: "WARNING",
+ LevelError: "ERROR",
+ LevelCritical: "CRITICAL",
+ LevelFatal: "FATAL",
+}
+
+// Level stores the current logging level.
+var Level = LevelInfo
+
+// SyslogWriter specifies the necessary methods for an alternate output
+// destination passed in via SetLogger.
+//
+// SyslogWriter is satisfied by *syslog.Writer.
+type SyslogWriter interface {
+ Debug(string)
+ Info(string)
+ Warning(string)
+ Err(string)
+ Crit(string)
+ Emerg(string)
+}
+
+// syslogWriter stores the SetLogger() parameter.
+var syslogWriter SyslogWriter
+
+// SetLogger sets the output used for output by this package.
+// A *syslog.Writer is a good choice for the logger parameter.
+// Call with a nil parameter to revert to default behavior.
+func SetLogger(logger SyslogWriter) {
+ syslogWriter = logger
+}
+
+func print(l int, msg string) {
+ if l >= Level {
+ if syslogWriter != nil {
+ switch l {
+ case LevelDebug:
+ syslogWriter.Debug(msg)
+ case LevelInfo:
+ syslogWriter.Info(msg)
+ case LevelWarning:
+ syslogWriter.Warning(msg)
+ case LevelError:
+ syslogWriter.Err(msg)
+ case LevelCritical:
+ syslogWriter.Crit(msg)
+ case LevelFatal:
+ syslogWriter.Emerg(msg)
+ }
+ } else {
+ log.Printf("[%s] %s", levelPrefix[l], msg)
+ }
+ }
+}
+
+func outputf(l int, format string, v []interface{}) {
+ print(l, fmt.Sprintf(format, v...))
+}
+
+func output(l int, v []interface{}) {
+ print(l, fmt.Sprint(v...))
+}
+
+// Fatalf logs a formatted message at the "fatal" level and then exits. The
+// arguments are handled in the same manner as fmt.Printf.
+func Fatalf(format string, v ...interface{}) {
+ outputf(LevelFatal, format, v)
+ os.Exit(1)
+}
+
+// Fatal logs its arguments at the "fatal" level and then exits.
+func Fatal(v ...interface{}) {
+ output(LevelFatal, v)
+ os.Exit(1)
+}
+
+// Criticalf logs a formatted message at the "critical" level. The
+// arguments are handled in the same manner as fmt.Printf.
+func Criticalf(format string, v ...interface{}) {
+ outputf(LevelCritical, format, v)
+}
+
+// Critical logs its arguments at the "critical" level.
+func Critical(v ...interface{}) {
+ output(LevelCritical, v)
+}
+
+// Errorf logs a formatted message at the "error" level. The arguments
+// are handled in the same manner as fmt.Printf.
+func Errorf(format string, v ...interface{}) {
+ outputf(LevelError, format, v)
+}
+
+// Error logs its arguments at the "error" level.
+func Error(v ...interface{}) {
+ output(LevelError, v)
+}
+
+// Warningf logs a formatted message at the "warning" level. The
+// arguments are handled in the same manner as fmt.Printf.
+func Warningf(format string, v ...interface{}) {
+ outputf(LevelWarning, format, v)
+}
+
+// Warning logs its arguments at the "warning" level.
+func Warning(v ...interface{}) {
+ output(LevelWarning, v)
+}
+
+// Infof logs a formatted message at the "info" level. The arguments
+// are handled in the same manner as fmt.Printf.
+func Infof(format string, v ...interface{}) {
+ outputf(LevelInfo, format, v)
+}
+
+// Info logs its arguments at the "info" level.
+func Info(v ...interface{}) {
+ output(LevelInfo, v)
+}
+
+// Debugf logs a formatted message at the "debug" level. The arguments
+// are handled in the same manner as fmt.Printf.
+func Debugf(format string, v ...interface{}) {
+ outputf(LevelDebug, format, v)
+}
+
+// Debug logs its arguments at the "debug" level.
+func Debug(v ...interface{}) {
+ output(LevelDebug, v)
+}
diff --git a/vendor/github.com/cloudflare/cfssl/ocsp/config/config.go b/vendor/github.com/cloudflare/cfssl/ocsp/config/config.go
new file mode 100644
index 0000000000..a19b113d4e
--- /dev/null
+++ b/vendor/github.com/cloudflare/cfssl/ocsp/config/config.go
@@ -0,0 +1,13 @@
+// Package config in the ocsp directory provides configuration data for an OCSP
+// signer.
+package config
+
+import "time"
+
+// Config contains configuration information required to set up an OCSP signer.
+type Config struct {
+ CACertFile string
+ ResponderCertFile string
+ KeyFile string
+ Interval time.Duration
+}
diff --git a/vendor/github.com/cloudflare/cfssl/revoke/revoke.go b/vendor/github.com/cloudflare/cfssl/revoke/revoke.go
new file mode 100644
index 0000000000..af5d5a894c
--- /dev/null
+++ b/vendor/github.com/cloudflare/cfssl/revoke/revoke.go
@@ -0,0 +1,302 @@
+// Package revoke provides functionality for checking the validity of
+// a cert. Specifically, the temporal validity of the certificate is
+// checked first, then any CRL and OCSP url in the cert is checked.
+package revoke
+
+import (
+ "bytes"
+ "crypto"
+ "crypto/x509"
+ "crypto/x509/pkix"
+ "encoding/base64"
+ "encoding/pem"
+ "errors"
+ "io/ioutil"
+ "net/http"
+ neturl "net/url"
+ "time"
+
+ "golang.org/x/crypto/ocsp"
+
+ "github.com/cloudflare/cfssl/helpers"
+ "github.com/cloudflare/cfssl/log"
+)
+
+// HardFail determines whether the failure to check the revocation
+// status of a certificate (i.e. due to network failure) causes
+// verification to fail (a hard failure).
+var HardFail = false
+
+// CRLSet associates a PKIX certificate list with the URL the CRL is
+// fetched from.
+var CRLSet = map[string]*pkix.CertificateList{}
+
+// We can't handle LDAP certificates, so this checks to see if the
+// URL string points to an LDAP resource so that we can ignore it.
+func ldapURL(url string) bool {
+ u, err := neturl.Parse(url)
+ if err != nil {
+ log.Warningf("error parsing url %s: %v", url, err)
+ return false
+ }
+ if u.Scheme == "ldap" {
+ return true
+ }
+ return false
+}
+
+// revCheck should check the certificate for any revocations. It
+// returns a pair of booleans: the first indicates whether the certificate
+// is revoked, the second indicates whether the revocations were
+// successfully checked.. This leads to the following combinations:
+//
+// false, false: an error was encountered while checking revocations.
+//
+// false, true: the certificate was checked successfully and
+// it is not revoked.
+//
+// true, true: the certificate was checked successfully and
+// it is revoked.
+//
+// true, false: failure to check revocation status causes
+// verification to fail
+func revCheck(cert *x509.Certificate) (revoked, ok bool) {
+ for _, url := range cert.CRLDistributionPoints {
+ if ldapURL(url) {
+ log.Infof("skipping LDAP CRL: %s", url)
+ continue
+ }
+
+ if revoked, ok := certIsRevokedCRL(cert, url); !ok {
+ log.Warning("error checking revocation via CRL")
+ if HardFail {
+ return true, false
+ }
+ return false, false
+ } else if revoked {
+ log.Info("certificate is revoked via CRL")
+ return true, true
+ }
+
+ if revoked, ok := certIsRevokedOCSP(cert, HardFail); !ok {
+ log.Warning("error checking revocation via OCSP")
+ if HardFail {
+ return true, false
+ }
+ return false, false
+ } else if revoked {
+ log.Info("certificate is revoked via OCSP")
+ return true, true
+ }
+ }
+
+ return false, true
+}
+
+// fetchCRL fetches and parses a CRL.
+func fetchCRL(url string) (*pkix.CertificateList, error) {
+ resp, err := http.Get(url)
+ if err != nil {
+ return nil, err
+ } else if resp.StatusCode >= 300 {
+ return nil, errors.New("failed to retrieve CRL")
+ }
+
+ body, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return nil, err
+ }
+ resp.Body.Close()
+
+ return x509.ParseCRL(body)
+}
+
+func getIssuer(cert *x509.Certificate) *x509.Certificate {
+ var issuer *x509.Certificate
+ var err error
+ for _, issuingCert := range cert.IssuingCertificateURL {
+ issuer, err = fetchRemote(issuingCert)
+ if err != nil {
+ continue
+ }
+ break
+ }
+
+ return issuer
+
+}
+
+// check a cert against a specific CRL. Returns the same bool pair
+// as revCheck.
+func certIsRevokedCRL(cert *x509.Certificate, url string) (revoked, ok bool) {
+ crl, ok := CRLSet[url]
+ if ok && crl == nil {
+ ok = false
+ delete(CRLSet, url)
+ }
+
+ var shouldFetchCRL = true
+ if ok {
+ if !crl.HasExpired(time.Now()) {
+ shouldFetchCRL = false
+ }
+ }
+
+ issuer := getIssuer(cert)
+
+ if shouldFetchCRL {
+ var err error
+ crl, err = fetchCRL(url)
+ if err != nil {
+ log.Warningf("failed to fetch CRL: %v", err)
+ return false, false
+ }
+
+ // check CRL signature
+ if issuer != nil {
+ err = issuer.CheckCRLSignature(crl)
+ if err != nil {
+ log.Warningf("failed to verify CRL: %v", err)
+ return false, false
+ }
+ }
+
+ CRLSet[url] = crl
+ }
+
+ for _, revoked := range crl.TBSCertList.RevokedCertificates {
+ if cert.SerialNumber.Cmp(revoked.SerialNumber) == 0 {
+ log.Info("Serial number match: intermediate is revoked.")
+ return true, true
+ }
+ }
+
+ return false, true
+}
+
+// VerifyCertificate ensures that the certificate passed in hasn't
+// expired and checks the CRL for the server.
+func VerifyCertificate(cert *x509.Certificate) (revoked, ok bool) {
+ if !time.Now().Before(cert.NotAfter) {
+ log.Infof("Certificate expired %s\n", cert.NotAfter)
+ return true, true
+ } else if !time.Now().After(cert.NotBefore) {
+ log.Infof("Certificate isn't valid until %s\n", cert.NotBefore)
+ return true, true
+ }
+
+ return revCheck(cert)
+}
+
+func fetchRemote(url string) (*x509.Certificate, error) {
+ resp, err := http.Get(url)
+ if err != nil {
+ return nil, err
+ }
+
+ in, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return nil, err
+ }
+ resp.Body.Close()
+
+ p, _ := pem.Decode(in)
+ if p != nil {
+ return helpers.ParseCertificatePEM(in)
+ }
+
+ return x509.ParseCertificate(in)
+}
+
+var ocspOpts = ocsp.RequestOptions{
+ Hash: crypto.SHA1,
+}
+
+func certIsRevokedOCSP(leaf *x509.Certificate, strict bool) (revoked, ok bool) {
+ var err error
+
+ ocspURLs := leaf.OCSPServer
+ if len(ocspURLs) == 0 {
+ // OCSP not enabled for this certificate.
+ return false, true
+ }
+
+ issuer := getIssuer(leaf)
+
+ if issuer == nil {
+ return false, false
+ }
+
+ ocspRequest, err := ocsp.CreateRequest(leaf, issuer, &ocspOpts)
+ if err != nil {
+ return
+ }
+
+ for _, server := range ocspURLs {
+ resp, err := sendOCSPRequest(server, ocspRequest, issuer)
+ if err != nil {
+ if strict {
+ return
+ }
+ continue
+ }
+ if err = resp.CheckSignatureFrom(issuer); err != nil {
+ return false, false
+ }
+
+ // There wasn't an error fetching the OCSP status.
+ ok = true
+
+ if resp.Status != ocsp.Good {
+ // The certificate was revoked.
+ revoked = true
+ }
+
+ return
+ }
+ return
+}
+
+// sendOCSPRequest attempts to request an OCSP response from the
+// server. The error only indicates a failure to *fetch* the
+// certificate, and *does not* mean the certificate is valid.
+func sendOCSPRequest(server string, req []byte, issuer *x509.Certificate) (*ocsp.Response, error) {
+ var resp *http.Response
+ var err error
+ if len(req) > 256 {
+ buf := bytes.NewBuffer(req)
+ resp, err = http.Post(server, "application/ocsp-request", buf)
+ } else {
+ reqURL := server + "/" + base64.StdEncoding.EncodeToString(req)
+ resp, err = http.Get(reqURL)
+ }
+
+ if err != nil {
+ return nil, err
+ }
+
+ if resp.StatusCode != http.StatusOK {
+ return nil, errors.New("failed to retrieve OSCP")
+ }
+
+ body, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return nil, err
+ }
+ resp.Body.Close()
+
+ switch {
+ case bytes.Equal(body, ocsp.UnauthorizedErrorResponse):
+ return nil, errors.New("OSCP unauthorized")
+ case bytes.Equal(body, ocsp.MalformedRequestErrorResponse):
+ return nil, errors.New("OSCP malformed")
+ case bytes.Equal(body, ocsp.InternalErrorErrorResponse):
+ return nil, errors.New("OSCP internal error")
+ case bytes.Equal(body, ocsp.TryLaterErrorResponse):
+ return nil, errors.New("OSCP try later")
+ case bytes.Equal(body, ocsp.SigRequredErrorResponse):
+ return nil, errors.New("OSCP signature required")
+ }
+
+ return ocsp.ParseResponse(body, issuer)
+}
diff --git a/vendor/github.com/cloudflare/cfssl/signer/local/local.go b/vendor/github.com/cloudflare/cfssl/signer/local/local.go
new file mode 100644
index 0000000000..7e79e68f5e
--- /dev/null
+++ b/vendor/github.com/cloudflare/cfssl/signer/local/local.go
@@ -0,0 +1,475 @@
+// Package local implements certificate signature functionality for CFSSL.
+package local
+
+import (
+ "bytes"
+ "crypto"
+ "crypto/rand"
+ "crypto/x509"
+ "crypto/x509/pkix"
+ "encoding/asn1"
+ "encoding/binary"
+ "encoding/hex"
+ "encoding/pem"
+ "errors"
+ "io"
+ "io/ioutil"
+ "math/big"
+ "net"
+ "net/http"
+ "net/mail"
+ "os"
+
+ "github.com/cloudflare/cfssl/certdb"
+ "github.com/cloudflare/cfssl/config"
+ cferr "github.com/cloudflare/cfssl/errors"
+ "github.com/cloudflare/cfssl/helpers"
+ "github.com/cloudflare/cfssl/info"
+ "github.com/cloudflare/cfssl/log"
+ "github.com/cloudflare/cfssl/signer"
+ "github.com/google/certificate-transparency/go"
+ "github.com/google/certificate-transparency/go/client"
+)
+
+// Signer contains a signer that uses the standard library to
+// support both ECDSA and RSA CA keys.
+type Signer struct {
+ ca *x509.Certificate
+ priv crypto.Signer
+ policy *config.Signing
+ sigAlgo x509.SignatureAlgorithm
+ dbAccessor certdb.Accessor
+}
+
+// NewSigner creates a new Signer directly from a
+// private key and certificate, with optional policy.
+func NewSigner(priv crypto.Signer, cert *x509.Certificate, sigAlgo x509.SignatureAlgorithm, policy *config.Signing) (*Signer, error) {
+ if policy == nil {
+ policy = &config.Signing{
+ Profiles: map[string]*config.SigningProfile{},
+ Default: config.DefaultConfig()}
+ }
+
+ if !policy.Valid() {
+ return nil, cferr.New(cferr.PolicyError, cferr.InvalidPolicy)
+ }
+
+ return &Signer{
+ ca: cert,
+ priv: priv,
+ sigAlgo: sigAlgo,
+ policy: policy,
+ }, nil
+}
+
+// NewSignerFromFile generates a new local signer from a caFile
+// and a caKey file, both PEM encoded.
+func NewSignerFromFile(caFile, caKeyFile string, policy *config.Signing) (*Signer, error) {
+ log.Debug("Loading CA: ", caFile)
+ ca, err := ioutil.ReadFile(caFile)
+ if err != nil {
+ return nil, err
+ }
+ log.Debug("Loading CA key: ", caKeyFile)
+ cakey, err := ioutil.ReadFile(caKeyFile)
+ if err != nil {
+ return nil, cferr.Wrap(cferr.CertificateError, cferr.ReadFailed, err)
+ }
+
+ parsedCa, err := helpers.ParseCertificatePEM(ca)
+ if err != nil {
+ return nil, err
+ }
+
+ strPassword := os.Getenv("CFSSL_CA_PK_PASSWORD")
+ password := []byte(strPassword)
+ if strPassword == "" {
+ password = nil
+ }
+
+ priv, err := helpers.ParsePrivateKeyPEMWithPassword(cakey, password)
+ if err != nil {
+ log.Debug("Malformed private key %v", err)
+ return nil, err
+ }
+
+ return NewSigner(priv, parsedCa, signer.DefaultSigAlgo(priv), policy)
+}
+
+func (s *Signer) sign(template *x509.Certificate, profile *config.SigningProfile) (cert []byte, err error) {
+ var distPoints = template.CRLDistributionPoints
+ err = signer.FillTemplate(template, s.policy.Default, profile)
+ if distPoints != nil && len(distPoints) > 0 {
+ template.CRLDistributionPoints = distPoints
+ }
+ if err != nil {
+ return
+ }
+
+ var initRoot bool
+ if s.ca == nil {
+ if !template.IsCA {
+ err = cferr.New(cferr.PolicyError, cferr.InvalidRequest)
+ return
+ }
+ template.DNSNames = nil
+ template.EmailAddresses = nil
+ s.ca = template
+ initRoot = true
+ }
+
+ derBytes, err := x509.CreateCertificate(rand.Reader, template, s.ca, template.PublicKey, s.priv)
+ if err != nil {
+ return nil, cferr.Wrap(cferr.CertificateError, cferr.Unknown, err)
+ }
+ if initRoot {
+ s.ca, err = x509.ParseCertificate(derBytes)
+ if err != nil {
+ return nil, cferr.Wrap(cferr.CertificateError, cferr.ParseFailed, err)
+ }
+ }
+
+ cert = pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: derBytes})
+ log.Infof("signed certificate with serial number %d", template.SerialNumber)
+ return
+}
+
+// replaceSliceIfEmpty replaces the contents of replaced with newContents if
+// the slice referenced by replaced is empty
+func replaceSliceIfEmpty(replaced, newContents *[]string) {
+ if len(*replaced) == 0 {
+ *replaced = *newContents
+ }
+}
+
+// PopulateSubjectFromCSR has functionality similar to Name, except
+// it fills the fields of the resulting pkix.Name with req's if the
+// subject's corresponding fields are empty
+func PopulateSubjectFromCSR(s *signer.Subject, req pkix.Name) pkix.Name {
+ // if no subject, use req
+ if s == nil {
+ return req
+ }
+
+ name := s.Name()
+
+ if name.CommonName == "" {
+ name.CommonName = req.CommonName
+ }
+
+ replaceSliceIfEmpty(&name.Country, &req.Country)
+ replaceSliceIfEmpty(&name.Province, &req.Province)
+ replaceSliceIfEmpty(&name.Locality, &req.Locality)
+ replaceSliceIfEmpty(&name.Organization, &req.Organization)
+ replaceSliceIfEmpty(&name.OrganizationalUnit, &req.OrganizationalUnit)
+ if name.SerialNumber == "" {
+ name.SerialNumber = req.SerialNumber
+ }
+ return name
+}
+
+// OverrideHosts fills template's IPAddresses, EmailAddresses, and DNSNames with the
+// content of hosts, if it is not nil.
+func OverrideHosts(template *x509.Certificate, hosts []string) {
+ if hosts != nil {
+ template.IPAddresses = []net.IP{}
+ template.EmailAddresses = []string{}
+ template.DNSNames = []string{}
+ }
+
+ for i := range hosts {
+ if ip := net.ParseIP(hosts[i]); ip != nil {
+ template.IPAddresses = append(template.IPAddresses, ip)
+ } else if email, err := mail.ParseAddress(hosts[i]); err == nil && email != nil {
+ template.EmailAddresses = append(template.EmailAddresses, email.Address)
+ } else {
+ template.DNSNames = append(template.DNSNames, hosts[i])
+ }
+ }
+
+}
+
+// Sign signs a new certificate based on the PEM-encoded client
+// certificate or certificate request with the signing profile,
+// specified by profileName.
+func (s *Signer) Sign(req signer.SignRequest) (cert []byte, err error) {
+ profile, err := signer.Profile(s, req.Profile)
+ if err != nil {
+ return
+ }
+
+ block, _ := pem.Decode([]byte(req.Request))
+ if block == nil {
+ return nil, cferr.New(cferr.CSRError, cferr.DecodeFailed)
+ }
+
+ if block.Type != "NEW CERTIFICATE REQUEST" && block.Type != "CERTIFICATE REQUEST" {
+ return nil, cferr.Wrap(cferr.CSRError,
+ cferr.BadRequest, errors.New("not a certificate or csr"))
+ }
+
+ csrTemplate, err := signer.ParseCertificateRequest(s, block.Bytes)
+ if err != nil {
+ return nil, err
+ }
+
+ // Copy out only the fields from the CSR authorized by policy.
+ safeTemplate := x509.Certificate{}
+ // If the profile contains no explicit whitelist, assume that all fields
+ // should be copied from the CSR.
+ if profile.CSRWhitelist == nil {
+ safeTemplate = *csrTemplate
+ } else {
+ if profile.CSRWhitelist.Subject {
+ safeTemplate.Subject = csrTemplate.Subject
+ }
+ if profile.CSRWhitelist.PublicKeyAlgorithm {
+ safeTemplate.PublicKeyAlgorithm = csrTemplate.PublicKeyAlgorithm
+ }
+ if profile.CSRWhitelist.PublicKey {
+ safeTemplate.PublicKey = csrTemplate.PublicKey
+ }
+ if profile.CSRWhitelist.SignatureAlgorithm {
+ safeTemplate.SignatureAlgorithm = csrTemplate.SignatureAlgorithm
+ }
+ if profile.CSRWhitelist.DNSNames {
+ safeTemplate.DNSNames = csrTemplate.DNSNames
+ }
+ if profile.CSRWhitelist.IPAddresses {
+ safeTemplate.IPAddresses = csrTemplate.IPAddresses
+ }
+ if profile.CSRWhitelist.EmailAddresses {
+ safeTemplate.EmailAddresses = csrTemplate.EmailAddresses
+ }
+ }
+
+ if req.CRLOverride != "" {
+ safeTemplate.CRLDistributionPoints = []string{req.CRLOverride}
+ }
+
+ if safeTemplate.IsCA {
+ if !profile.CAConstraint.IsCA {
+ log.Error("local signer policy disallows issuing CA certificate")
+ return nil, cferr.New(cferr.PolicyError, cferr.InvalidRequest)
+ }
+
+ if s.ca != nil && s.ca.MaxPathLen > 0 {
+ if safeTemplate.MaxPathLen >= s.ca.MaxPathLen {
+ log.Error("local signer certificate disallows CA MaxPathLen extending")
+ // do not sign a cert with pathlen > current
+ return nil, cferr.New(cferr.PolicyError, cferr.InvalidRequest)
+ }
+ } else if s.ca != nil && s.ca.MaxPathLen == 0 && s.ca.MaxPathLenZero {
+ log.Error("local signer certificate disallows issuing CA certificate")
+ // signer has pathlen of 0, do not sign more intermediate CAs
+ return nil, cferr.New(cferr.PolicyError, cferr.InvalidRequest)
+ }
+ }
+
+ OverrideHosts(&safeTemplate, req.Hosts)
+ safeTemplate.Subject = PopulateSubjectFromCSR(req.Subject, safeTemplate.Subject)
+
+ // If there is a whitelist, ensure that both the Common Name and SAN DNSNames match
+ if profile.NameWhitelist != nil {
+ if safeTemplate.Subject.CommonName != "" {
+ if profile.NameWhitelist.Find([]byte(safeTemplate.Subject.CommonName)) == nil {
+ return nil, cferr.New(cferr.PolicyError, cferr.UnmatchedWhitelist)
+ }
+ }
+ for _, name := range safeTemplate.DNSNames {
+ if profile.NameWhitelist.Find([]byte(name)) == nil {
+ return nil, cferr.New(cferr.PolicyError, cferr.UnmatchedWhitelist)
+ }
+ }
+ for _, name := range safeTemplate.EmailAddresses {
+ if profile.NameWhitelist.Find([]byte(name)) == nil {
+ return nil, cferr.New(cferr.PolicyError, cferr.UnmatchedWhitelist)
+ }
+ }
+ }
+
+ if profile.ClientProvidesSerialNumbers {
+ if req.Serial == nil {
+ return nil, cferr.New(cferr.CertificateError, cferr.MissingSerial)
+ }
+ safeTemplate.SerialNumber = req.Serial
+ } else {
+ // RFC 5280 4.1.2.2:
+ // Certificate users MUST be able to handle serialNumber
+ // values up to 20 octets. Conforming CAs MUST NOT use
+ // serialNumber values longer than 20 octets.
+ //
+ // If CFSSL is providing the serial numbers, it makes
+ // sense to use the max supported size.
+ serialNumber := make([]byte, 20)
+ _, err = io.ReadFull(rand.Reader, serialNumber)
+ if err != nil {
+ return nil, cferr.Wrap(cferr.CertificateError, cferr.Unknown, err)
+ }
+
+ // SetBytes interprets buf as the bytes of a big-endian
+ // unsigned integer. The leading byte should be masked
+ // off to ensure it isn't negative.
+ serialNumber[0] &= 0x7F
+
+ safeTemplate.SerialNumber = new(big.Int).SetBytes(serialNumber)
+ }
+
+ if len(req.Extensions) > 0 {
+ for _, ext := range req.Extensions {
+ oid := asn1.ObjectIdentifier(ext.ID)
+ if !profile.ExtensionWhitelist[oid.String()] {
+ return nil, cferr.New(cferr.CertificateError, cferr.InvalidRequest)
+ }
+
+ rawValue, err := hex.DecodeString(ext.Value)
+ if err != nil {
+ return nil, cferr.Wrap(cferr.CertificateError, cferr.InvalidRequest, err)
+ }
+
+ safeTemplate.ExtraExtensions = append(safeTemplate.ExtraExtensions, pkix.Extension{
+ Id: oid,
+ Critical: ext.Critical,
+ Value: rawValue,
+ })
+ }
+ }
+
+ var certTBS = safeTemplate
+
+ if len(profile.CTLogServers) > 0 {
+ // Add a poison extension which prevents validation
+ var poisonExtension = pkix.Extension{Id: signer.CTPoisonOID, Critical: true, Value: []byte{0x05, 0x00}}
+ var poisonedPreCert = certTBS
+ poisonedPreCert.ExtraExtensions = append(safeTemplate.ExtraExtensions, poisonExtension)
+ cert, err = s.sign(&poisonedPreCert, profile)
+ if err != nil {
+ return
+ }
+
+ derCert, _ := pem.Decode(cert)
+ prechain := []ct.ASN1Cert{derCert.Bytes, s.ca.Raw}
+ var sctList []ct.SignedCertificateTimestamp
+
+ for _, server := range profile.CTLogServers {
+ log.Infof("submitting poisoned precertificate to %s", server)
+ var ctclient = client.New(server, nil)
+ var resp *ct.SignedCertificateTimestamp
+ resp, err = ctclient.AddPreChain(prechain)
+ if err != nil {
+ return nil, cferr.Wrap(cferr.CTError, cferr.PrecertSubmissionFailed, err)
+ }
+ sctList = append(sctList, *resp)
+ }
+
+ var serializedSCTList []byte
+ serializedSCTList, err = serializeSCTList(sctList)
+ if err != nil {
+ return nil, cferr.Wrap(cferr.CTError, cferr.Unknown, err)
+ }
+
+ // Serialize again as an octet string before embedding
+ serializedSCTList, err = asn1.Marshal(serializedSCTList)
+ if err != nil {
+ return nil, cferr.Wrap(cferr.CTError, cferr.Unknown, err)
+ }
+
+ var SCTListExtension = pkix.Extension{Id: signer.SCTListOID, Critical: false, Value: serializedSCTList}
+ certTBS.ExtraExtensions = append(certTBS.ExtraExtensions, SCTListExtension)
+ }
+ var signedCert []byte
+ signedCert, err = s.sign(&certTBS, profile)
+ if err != nil {
+ return nil, err
+ }
+
+ if s.dbAccessor != nil {
+ var certRecord = certdb.CertificateRecord{
+ Serial: certTBS.SerialNumber.String(),
+ // this relies on the specific behavior of x509.CreateCertificate
+ // which updates certTBS AuthorityKeyId from the signer's SubjectKeyId
+ AKI: hex.EncodeToString(certTBS.AuthorityKeyId),
+ CALabel: req.Label,
+ Status: "good",
+ Expiry: certTBS.NotAfter,
+ PEM: string(signedCert),
+ }
+
+ err = s.dbAccessor.InsertCertificate(certRecord)
+ if err != nil {
+ return nil, err
+ }
+ log.Debug("saved certificate with serial number ", certTBS.SerialNumber)
+ }
+
+ return signedCert, nil
+}
+
+func serializeSCTList(sctList []ct.SignedCertificateTimestamp) ([]byte, error) {
+ var buf bytes.Buffer
+ for _, sct := range sctList {
+ sct, err := ct.SerializeSCT(sct)
+ if err != nil {
+ return nil, err
+ }
+ binary.Write(&buf, binary.BigEndian, uint16(len(sct)))
+ buf.Write(sct)
+ }
+
+ var sctListLengthField = make([]byte, 2)
+ binary.BigEndian.PutUint16(sctListLengthField, uint16(buf.Len()))
+ return bytes.Join([][]byte{sctListLengthField, buf.Bytes()}, nil), nil
+}
+
+// Info return a populated info.Resp struct or an error.
+func (s *Signer) Info(req info.Req) (resp *info.Resp, err error) {
+ cert, err := s.Certificate(req.Label, req.Profile)
+ if err != nil {
+ return
+ }
+
+ profile, err := signer.Profile(s, req.Profile)
+ if err != nil {
+ return
+ }
+
+ resp = new(info.Resp)
+ if cert.Raw != nil {
+ resp.Certificate = string(bytes.TrimSpace(pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: cert.Raw})))
+ }
+ resp.Usage = profile.Usage
+ resp.ExpiryString = profile.ExpiryString
+
+ return
+}
+
+// SigAlgo returns the RSA signer's signature algorithm.
+func (s *Signer) SigAlgo() x509.SignatureAlgorithm {
+ return s.sigAlgo
+}
+
+// Certificate returns the signer's certificate.
+func (s *Signer) Certificate(label, profile string) (*x509.Certificate, error) {
+ cert := *s.ca
+ return &cert, nil
+}
+
+// SetPolicy sets the signer's signature policy.
+func (s *Signer) SetPolicy(policy *config.Signing) {
+ s.policy = policy
+}
+
+// SetDBAccessor sets the signers' cert db accessor
+func (s *Signer) SetDBAccessor(dba certdb.Accessor) {
+ s.dbAccessor = dba
+}
+
+// SetReqModifier does nothing for local
+func (s *Signer) SetReqModifier(func(*http.Request, []byte)) {
+ // noop
+}
+
+// Policy returns the signer's policy.
+func (s *Signer) Policy() *config.Signing {
+ return s.policy
+}
diff --git a/vendor/github.com/cloudflare/cfssl/signer/remote/remote.go b/vendor/github.com/cloudflare/cfssl/signer/remote/remote.go
new file mode 100644
index 0000000000..fb980dea1d
--- /dev/null
+++ b/vendor/github.com/cloudflare/cfssl/signer/remote/remote.go
@@ -0,0 +1,128 @@
+package remote
+
+import (
+ "crypto/x509"
+ "encoding/json"
+ "errors"
+ "net/http"
+
+ "github.com/cloudflare/cfssl/api/client"
+ "github.com/cloudflare/cfssl/certdb"
+ "github.com/cloudflare/cfssl/config"
+ cferr "github.com/cloudflare/cfssl/errors"
+ "github.com/cloudflare/cfssl/helpers"
+ "github.com/cloudflare/cfssl/info"
+ "github.com/cloudflare/cfssl/signer"
+)
+
+// A Signer represents a CFSSL instance running as signing server.
+// fulfills the signer.Signer interface
+type Signer struct {
+ policy *config.Signing
+ reqModifier func(*http.Request, []byte)
+}
+
+// NewSigner creates a new remote Signer directly from a
+// signing policy.
+func NewSigner(policy *config.Signing) (*Signer, error) {
+ if policy != nil {
+ if !policy.Valid() {
+ return nil, cferr.New(cferr.PolicyError,
+ cferr.InvalidPolicy)
+ }
+ return &Signer{policy: policy}, nil
+ }
+
+ return nil, cferr.New(cferr.PolicyError,
+ cferr.InvalidPolicy)
+}
+
+// Sign sends a signature request to the remote CFSSL server,
+// receiving a signed certificate or an error in response. The hostname,
+// csr, and profileName are used as with a local signing operation, and
+// the label is used to select a signing root in a multi-root CA.
+func (s *Signer) Sign(req signer.SignRequest) (cert []byte, err error) {
+ resp, err := s.remoteOp(req, req.Profile, "sign")
+ if err != nil {
+ return
+ }
+ if cert, ok := resp.([]byte); ok {
+ return cert, nil
+ }
+ return
+}
+
+// Info sends an info request to the remote CFSSL server, receiving an
+// Resp struct or an error in response.
+func (s *Signer) Info(req info.Req) (resp *info.Resp, err error) {
+ respInterface, err := s.remoteOp(req, req.Profile, "info")
+ if err != nil {
+ return
+ }
+ if resp, ok := respInterface.(*info.Resp); ok {
+ return resp, nil
+ }
+ return
+}
+
+// Helper function to perform a remote sign or info request.
+func (s *Signer) remoteOp(req interface{}, profile, target string) (resp interface{}, err error) {
+ jsonData, err := json.Marshal(req)
+ if err != nil {
+ return nil, cferr.Wrap(cferr.APIClientError, cferr.JSONError, err)
+ }
+
+ p, err := signer.Profile(s, profile)
+ if err != nil {
+ return
+ }
+
+ server := client.NewServerTLS(p.RemoteServer, helpers.CreateTLSConfig(p.RemoteCAs, p.ClientCert))
+ if server == nil {
+ return nil, cferr.Wrap(cferr.PolicyError, cferr.InvalidRequest,
+ errors.New("failed to connect to remote"))
+ }
+
+ server.SetReqModifier(s.reqModifier)
+
+ // There's no auth provider for the "info" method
+ if target == "info" {
+ resp, err = server.Info(jsonData)
+ } else if p.RemoteProvider != nil {
+ resp, err = server.AuthSign(jsonData, nil, p.RemoteProvider)
+ } else {
+ resp, err = server.Sign(jsonData)
+ }
+
+ if err != nil {
+ return nil, err
+ }
+
+ return
+}
+
+// SigAlgo returns the RSA signer's signature algorithm.
+func (s *Signer) SigAlgo() x509.SignatureAlgorithm {
+ // TODO: implement this as a remote info call
+ return x509.UnknownSignatureAlgorithm
+}
+
+// SetPolicy sets the signer's signature policy.
+func (s *Signer) SetPolicy(policy *config.Signing) {
+ s.policy = policy
+}
+
+// SetDBAccessor sets the signers' cert db accessor
+func (s *Signer) SetDBAccessor(dba certdb.Accessor) {
+ // noop
+}
+
+// SetReqModifier sets the function to call to modify the HTTP request prior to sending it
+func (s *Signer) SetReqModifier(mod func(*http.Request, []byte)) {
+ s.reqModifier = mod
+}
+
+// Policy returns the signer's policy.
+func (s *Signer) Policy() *config.Signing {
+ return s.policy
+}
diff --git a/vendor/github.com/cloudflare/cfssl/signer/signer.go b/vendor/github.com/cloudflare/cfssl/signer/signer.go
new file mode 100644
index 0000000000..fa7b917825
--- /dev/null
+++ b/vendor/github.com/cloudflare/cfssl/signer/signer.go
@@ -0,0 +1,414 @@
+// Package signer implements certificate signature functionality for CFSSL.
+package signer
+
+import (
+ "crypto"
+ "crypto/ecdsa"
+ "crypto/elliptic"
+ "crypto/rsa"
+ "crypto/sha1"
+ "crypto/x509"
+ "crypto/x509/pkix"
+ "encoding/asn1"
+ "errors"
+ "math/big"
+ "net/http"
+ "strings"
+ "time"
+
+ "github.com/cloudflare/cfssl/certdb"
+ "github.com/cloudflare/cfssl/config"
+ "github.com/cloudflare/cfssl/csr"
+ cferr "github.com/cloudflare/cfssl/errors"
+ "github.com/cloudflare/cfssl/helpers"
+ "github.com/cloudflare/cfssl/info"
+)
+
+// Subject contains the information that should be used to override the
+// subject information when signing a certificate.
+type Subject struct {
+ CN string
+ Names []csr.Name `json:"names"`
+ SerialNumber string
+}
+
+// Extension represents a raw extension to be included in the certificate. The
+// "value" field must be hex encoded.
+type Extension struct {
+ ID config.OID `json:"id"`
+ Critical bool `json:"critical"`
+ Value string `json:"value"`
+}
+
+// SignRequest stores a signature request, which contains the hostname,
+// the CSR, optional subject information, and the signature profile.
+//
+// Extensions provided in the signRequest are copied into the certificate, as
+// long as they are in the ExtensionWhitelist for the signer's policy.
+// Extensions requested in the CSR are ignored, except for those processed by
+// ParseCertificateRequest (mainly subjectAltName).
+type SignRequest struct {
+ Hosts []string `json:"hosts"`
+ Request string `json:"certificate_request"`
+ Subject *Subject `json:"subject,omitempty"`
+ Profile string `json:"profile"`
+ CRLOverride string `json:"crl_override"`
+ Label string `json:"label"`
+ Serial *big.Int `json:"serial,omitempty"`
+ Extensions []Extension `json:"extensions,omitempty"`
+}
+
+// appendIf appends to a if s is not an empty string.
+func appendIf(s string, a *[]string) {
+ if s != "" {
+ *a = append(*a, s)
+ }
+}
+
+// Name returns the PKIX name for the subject.
+func (s *Subject) Name() pkix.Name {
+ var name pkix.Name
+ name.CommonName = s.CN
+
+ for _, n := range s.Names {
+ appendIf(n.C, &name.Country)
+ appendIf(n.ST, &name.Province)
+ appendIf(n.L, &name.Locality)
+ appendIf(n.O, &name.Organization)
+ appendIf(n.OU, &name.OrganizationalUnit)
+ }
+ name.SerialNumber = s.SerialNumber
+ return name
+}
+
+// SplitHosts takes a comma-spearated list of hosts and returns a slice
+// with the hosts split
+func SplitHosts(hostList string) []string {
+ if hostList == "" {
+ return nil
+ }
+
+ return strings.Split(hostList, ",")
+}
+
+// A Signer contains a CA's certificate and private key for signing
+// certificates, a Signing policy to refer to and a SignatureAlgorithm.
+type Signer interface {
+ Info(info.Req) (*info.Resp, error)
+ Policy() *config.Signing
+ SetDBAccessor(certdb.Accessor)
+ SetPolicy(*config.Signing)
+ SigAlgo() x509.SignatureAlgorithm
+ Sign(req SignRequest) (cert []byte, err error)
+ SetReqModifier(func(*http.Request, []byte))
+}
+
+// Profile gets the specific profile from the signer
+func Profile(s Signer, profile string) (*config.SigningProfile, error) {
+ var p *config.SigningProfile
+ policy := s.Policy()
+ if policy != nil && policy.Profiles != nil && profile != "" {
+ p = policy.Profiles[profile]
+ }
+
+ if p == nil && policy != nil {
+ p = policy.Default
+ }
+
+ if p == nil {
+ return nil, cferr.Wrap(cferr.APIClientError, cferr.ClientHTTPError, errors.New("profile must not be nil"))
+ }
+ return p, nil
+}
+
+// DefaultSigAlgo returns an appropriate X.509 signature algorithm given
+// the CA's private key.
+func DefaultSigAlgo(priv crypto.Signer) x509.SignatureAlgorithm {
+ pub := priv.Public()
+ switch pub := pub.(type) {
+ case *rsa.PublicKey:
+ keySize := pub.N.BitLen()
+ switch {
+ case keySize >= 4096:
+ return x509.SHA512WithRSA
+ case keySize >= 3072:
+ return x509.SHA384WithRSA
+ case keySize >= 2048:
+ return x509.SHA256WithRSA
+ default:
+ return x509.SHA1WithRSA
+ }
+ case *ecdsa.PublicKey:
+ switch pub.Curve {
+ case elliptic.P256():
+ return x509.ECDSAWithSHA256
+ case elliptic.P384():
+ return x509.ECDSAWithSHA384
+ case elliptic.P521():
+ return x509.ECDSAWithSHA512
+ default:
+ return x509.ECDSAWithSHA1
+ }
+ default:
+ return x509.UnknownSignatureAlgorithm
+ }
+}
+
+// ParseCertificateRequest takes an incoming certificate request and
+// builds a certificate template from it.
+func ParseCertificateRequest(s Signer, csrBytes []byte) (template *x509.Certificate, err error) {
+ csrv, err := x509.ParseCertificateRequest(csrBytes)
+ if err != nil {
+ err = cferr.Wrap(cferr.CSRError, cferr.ParseFailed, err)
+ return
+ }
+
+ err = helpers.CheckSignature(csrv, csrv.SignatureAlgorithm, csrv.RawTBSCertificateRequest, csrv.Signature)
+ if err != nil {
+ err = cferr.Wrap(cferr.CSRError, cferr.KeyMismatch, err)
+ return
+ }
+
+ template = &x509.Certificate{
+ Subject: csrv.Subject,
+ PublicKeyAlgorithm: csrv.PublicKeyAlgorithm,
+ PublicKey: csrv.PublicKey,
+ SignatureAlgorithm: s.SigAlgo(),
+ DNSNames: csrv.DNSNames,
+ IPAddresses: csrv.IPAddresses,
+ EmailAddresses: csrv.EmailAddresses,
+ }
+
+ for _, val := range csrv.Extensions {
+ // Check the CSR for the X.509 BasicConstraints (RFC 5280, 4.2.1.9)
+ // extension and append to template if necessary
+ if val.Id.Equal(asn1.ObjectIdentifier{2, 5, 29, 19}) {
+ var constraints csr.BasicConstraints
+ var rest []byte
+
+ if rest, err = asn1.Unmarshal(val.Value, &constraints); err != nil {
+ return nil, cferr.Wrap(cferr.CSRError, cferr.ParseFailed, err)
+ } else if len(rest) != 0 {
+ return nil, cferr.Wrap(cferr.CSRError, cferr.ParseFailed, errors.New("x509: trailing data after X.509 BasicConstraints"))
+ }
+
+ template.BasicConstraintsValid = true
+ template.IsCA = constraints.IsCA
+ template.MaxPathLen = constraints.MaxPathLen
+ template.MaxPathLenZero = template.MaxPathLen == 0
+ }
+ }
+
+ return
+}
+
+type subjectPublicKeyInfo struct {
+ Algorithm pkix.AlgorithmIdentifier
+ SubjectPublicKey asn1.BitString
+}
+
+// ComputeSKI derives an SKI from the certificate's public key in a
+// standard manner. This is done by computing the SHA-1 digest of the
+// SubjectPublicKeyInfo component of the certificate.
+func ComputeSKI(template *x509.Certificate) ([]byte, error) {
+ pub := template.PublicKey
+ encodedPub, err := x509.MarshalPKIXPublicKey(pub)
+ if err != nil {
+ return nil, err
+ }
+
+ var subPKI subjectPublicKeyInfo
+ _, err = asn1.Unmarshal(encodedPub, &subPKI)
+ if err != nil {
+ return nil, err
+ }
+
+ pubHash := sha1.Sum(subPKI.SubjectPublicKey.Bytes)
+ return pubHash[:], nil
+}
+
+// FillTemplate is a utility function that tries to load as much of
+// the certificate template as possible from the profiles and current
+// template. It fills in the key uses, expiration, revocation URLs
+// and SKI.
+func FillTemplate(template *x509.Certificate, defaultProfile, profile *config.SigningProfile) error {
+ ski, err := ComputeSKI(template)
+
+ var (
+ eku []x509.ExtKeyUsage
+ ku x509.KeyUsage
+ backdate time.Duration
+ expiry time.Duration
+ notBefore time.Time
+ notAfter time.Time
+ crlURL, ocspURL string
+ issuerURL = profile.IssuerURL
+ )
+
+ // The third value returned from Usages is a list of unknown key usages.
+ // This should be used when validating the profile at load, and isn't used
+ // here.
+ ku, eku, _ = profile.Usages()
+ if profile.IssuerURL == nil {
+ issuerURL = defaultProfile.IssuerURL
+ }
+
+ if ku == 0 && len(eku) == 0 {
+ return cferr.New(cferr.PolicyError, cferr.NoKeyUsages)
+ }
+
+ if expiry = profile.Expiry; expiry == 0 {
+ expiry = defaultProfile.Expiry
+ }
+
+ if crlURL = profile.CRL; crlURL == "" {
+ crlURL = defaultProfile.CRL
+ }
+ if ocspURL = profile.OCSP; ocspURL == "" {
+ ocspURL = defaultProfile.OCSP
+ }
+ if backdate = profile.Backdate; backdate == 0 {
+ backdate = -5 * time.Minute
+ } else {
+ backdate = -1 * profile.Backdate
+ }
+
+ if !profile.NotBefore.IsZero() {
+ notBefore = profile.NotBefore.UTC()
+ } else {
+ notBefore = time.Now().Round(time.Minute).Add(backdate).UTC()
+ }
+
+ if !profile.NotAfter.IsZero() {
+ notAfter = profile.NotAfter.UTC()
+ } else {
+ notAfter = notBefore.Add(expiry).UTC()
+ }
+
+ template.NotBefore = notBefore
+ template.NotAfter = notAfter
+ template.KeyUsage = ku
+ template.ExtKeyUsage = eku
+ template.BasicConstraintsValid = true
+ template.IsCA = profile.CAConstraint.IsCA
+ if template.IsCA {
+ template.MaxPathLen = profile.CAConstraint.MaxPathLen
+ if template.MaxPathLen == 0 {
+ template.MaxPathLenZero = profile.CAConstraint.MaxPathLenZero
+ }
+ template.DNSNames = nil
+ template.EmailAddresses = nil
+ }
+ template.SubjectKeyId = ski
+
+ if ocspURL != "" {
+ template.OCSPServer = []string{ocspURL}
+ }
+ if crlURL != "" {
+ template.CRLDistributionPoints = []string{crlURL}
+ }
+
+ if len(issuerURL) != 0 {
+ template.IssuingCertificateURL = issuerURL
+ }
+ if len(profile.Policies) != 0 {
+ err = addPolicies(template, profile.Policies)
+ if err != nil {
+ return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, err)
+ }
+ }
+ if profile.OCSPNoCheck {
+ ocspNoCheckExtension := pkix.Extension{
+ Id: asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 48, 1, 5},
+ Critical: false,
+ Value: []byte{0x05, 0x00},
+ }
+ template.ExtraExtensions = append(template.ExtraExtensions, ocspNoCheckExtension)
+ }
+
+ return nil
+}
+
+type policyInformation struct {
+ PolicyIdentifier asn1.ObjectIdentifier
+ Qualifiers []interface{} `asn1:"tag:optional,omitempty"`
+}
+
+type cpsPolicyQualifier struct {
+ PolicyQualifierID asn1.ObjectIdentifier
+ Qualifier string `asn1:"tag:optional,ia5"`
+}
+
+type userNotice struct {
+ ExplicitText string `asn1:"tag:optional,utf8"`
+}
+type userNoticePolicyQualifier struct {
+ PolicyQualifierID asn1.ObjectIdentifier
+ Qualifier userNotice
+}
+
+var (
+ // Per https://tools.ietf.org/html/rfc3280.html#page-106, this represents:
+ // iso(1) identified-organization(3) dod(6) internet(1) security(5)
+ // mechanisms(5) pkix(7) id-qt(2) id-qt-cps(1)
+ iDQTCertificationPracticeStatement = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 2, 1}
+ // iso(1) identified-organization(3) dod(6) internet(1) security(5)
+ // mechanisms(5) pkix(7) id-qt(2) id-qt-unotice(2)
+ iDQTUserNotice = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 2, 2}
+
+ // CTPoisonOID is the object ID of the critical poison extension for precertificates
+ // https://tools.ietf.org/html/rfc6962#page-9
+ CTPoisonOID = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 11129, 2, 4, 3}
+
+ // SCTListOID is the object ID for the Signed Certificate Timestamp certificate extension
+ // https://tools.ietf.org/html/rfc6962#page-14
+ SCTListOID = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 11129, 2, 4, 2}
+)
+
+// addPolicies adds Certificate Policies and optional Policy Qualifiers to a
+// certificate, based on the input config. Go's x509 library allows setting
+// Certificate Policies easily, but does not support nested Policy Qualifiers
+// under those policies. So we need to construct the ASN.1 structure ourselves.
+func addPolicies(template *x509.Certificate, policies []config.CertificatePolicy) error {
+ asn1PolicyList := []policyInformation{}
+
+ for _, policy := range policies {
+ pi := policyInformation{
+ // The PolicyIdentifier is an OID assigned to a given issuer.
+ PolicyIdentifier: asn1.ObjectIdentifier(policy.ID),
+ }
+ for _, qualifier := range policy.Qualifiers {
+ switch qualifier.Type {
+ case "id-qt-unotice":
+ pi.Qualifiers = append(pi.Qualifiers,
+ userNoticePolicyQualifier{
+ PolicyQualifierID: iDQTUserNotice,
+ Qualifier: userNotice{
+ ExplicitText: qualifier.Value,
+ },
+ })
+ case "id-qt-cps":
+ pi.Qualifiers = append(pi.Qualifiers,
+ cpsPolicyQualifier{
+ PolicyQualifierID: iDQTCertificationPracticeStatement,
+ Qualifier: qualifier.Value,
+ })
+ default:
+ return errors.New("Invalid qualifier type in Policies " + qualifier.Type)
+ }
+ }
+ asn1PolicyList = append(asn1PolicyList, pi)
+ }
+
+ asn1Bytes, err := asn1.Marshal(asn1PolicyList)
+ if err != nil {
+ return err
+ }
+
+ template.ExtraExtensions = append(template.ExtraExtensions, pkix.Extension{
+ Id: asn1.ObjectIdentifier{2, 5, 29, 32},
+ Critical: false,
+ Value: asn1Bytes,
+ })
+ return nil
+}
diff --git a/vendor/github.com/cloudflare/cfssl/signer/universal/universal.go b/vendor/github.com/cloudflare/cfssl/signer/universal/universal.go
new file mode 100644
index 0000000000..0010200534
--- /dev/null
+++ b/vendor/github.com/cloudflare/cfssl/signer/universal/universal.go
@@ -0,0 +1,221 @@
+// Package universal implements a signer that can do remote or local
+package universal
+
+import (
+ "crypto/x509"
+ "net/http"
+
+ "github.com/cloudflare/cfssl/certdb"
+ "github.com/cloudflare/cfssl/config"
+ cferr "github.com/cloudflare/cfssl/errors"
+ "github.com/cloudflare/cfssl/info"
+ "github.com/cloudflare/cfssl/signer"
+ "github.com/cloudflare/cfssl/signer/local"
+ "github.com/cloudflare/cfssl/signer/remote"
+)
+
+// Signer represents a universal signer which is both local and remote
+// to fulfill the signer.Signer interface.
+type Signer struct {
+ local signer.Signer
+ remote signer.Signer
+ policy *config.Signing
+}
+
+// Root is used to define where the universal signer gets its public
+// certificate and private keys for signing.
+type Root struct {
+ Config map[string]string
+ ForceRemote bool
+}
+
+// a localSignerCheck looks at the Config keys in the Root, and
+// decides whether it has enough information to produce a signer.
+type localSignerCheck func(root *Root, policy *config.Signing) (signer.Signer, bool, error)
+
+// fileBackedSigner determines whether a file-backed local signer is supported.
+func fileBackedSigner(root *Root, policy *config.Signing) (signer.Signer, bool, error) {
+ keyFile := root.Config["key-file"]
+ certFile := root.Config["cert-file"]
+
+ if keyFile == "" {
+ return nil, false, nil
+ }
+
+ signer, err := local.NewSignerFromFile(certFile, keyFile, policy)
+ return signer, true, err
+}
+
+var localSignerList = []localSignerCheck{
+ fileBackedSigner,
+}
+
+// PrependLocalSignerToList prepends signer to the local signer's list
+func PrependLocalSignerToList(signer localSignerCheck) {
+ localSignerList = append([]localSignerCheck{signer}, localSignerList...)
+}
+
+func newLocalSigner(root Root, policy *config.Signing) (s signer.Signer, err error) {
+ // shouldProvide indicates whether the
+ // function *should* have produced a key. If
+ // it's true, we should use the signer and
+ // error returned. Otherwise, keep looking for
+ // signers.
+ var shouldProvide bool
+
+ // localSignerList is defined in the
+ // universal_signers*.go files. These activate
+ // and deactivate signers based on build
+ // flags; for example,
+ // universal_signers_pkcs11.go contains a list
+ // of valid signers when PKCS #11 is turned
+ // on.
+ for _, possibleSigner := range localSignerList {
+ s, shouldProvide, err = possibleSigner(&root, policy)
+ if shouldProvide {
+ break
+ }
+ }
+
+ if s == nil {
+ err = cferr.New(cferr.PrivateKeyError, cferr.Unknown)
+ }
+
+ return s, err
+}
+
+func newUniversalSigner(root Root, policy *config.Signing) (*Signer, error) {
+ ls, err := newLocalSigner(root, policy)
+ if err != nil {
+ return nil, err
+ }
+
+ rs, err := remote.NewSigner(policy)
+ if err != nil {
+ return nil, err
+ }
+
+ s := &Signer{
+ policy: policy,
+ local: ls,
+ remote: rs,
+ }
+ return s, err
+}
+
+// NewSigner generates a new certificate signer from a Root structure.
+// This is one of two standard signers: local or remote. If the root
+// structure specifies a force remote, then a remote signer is created,
+// otherwise either a remote or local signer is generated based on the
+// policy. For a local signer, the CertFile and KeyFile need to be
+// defined in Root.
+func NewSigner(root Root, policy *config.Signing) (signer.Signer, error) {
+ if policy == nil {
+ policy = &config.Signing{
+ Profiles: map[string]*config.SigningProfile{},
+ Default: config.DefaultConfig(),
+ }
+ }
+
+ if !policy.Valid() {
+ return nil, cferr.New(cferr.PolicyError, cferr.InvalidPolicy)
+ }
+
+ var s signer.Signer
+ var err error
+ if root.ForceRemote {
+ s, err = remote.NewSigner(policy)
+ } else {
+ if policy.NeedsLocalSigner() && policy.NeedsRemoteSigner() {
+ s, err = newUniversalSigner(root, policy)
+ } else {
+ if policy.NeedsLocalSigner() {
+ s, err = newLocalSigner(root, policy)
+ }
+ if policy.NeedsRemoteSigner() {
+ s, err = remote.NewSigner(policy)
+ }
+ }
+ }
+
+ return s, err
+}
+
+// getMatchingProfile returns the SigningProfile that matches the profile passed.
+// if an empty profile string is passed it returns the default profile.
+func (s *Signer) getMatchingProfile(profile string) (*config.SigningProfile, error) {
+ if profile == "" {
+ return s.policy.Default, nil
+ }
+
+ for p, signingProfile := range s.policy.Profiles {
+ if p == profile {
+ return signingProfile, nil
+ }
+ }
+
+ return nil, cferr.New(cferr.PolicyError, cferr.UnknownProfile)
+}
+
+// Sign sends a signature request to either the remote or local signer,
+// receiving a signed certificate or an error in response.
+func (s *Signer) Sign(req signer.SignRequest) (cert []byte, err error) {
+ profile, err := s.getMatchingProfile(req.Profile)
+ if err != nil {
+ return cert, err
+ }
+
+ if profile.RemoteServer != "" {
+ return s.remote.Sign(req)
+ }
+ return s.local.Sign(req)
+
+}
+
+// Info sends an info request to the remote or local CFSSL server
+// receiving an Resp struct or an error in response.
+func (s *Signer) Info(req info.Req) (resp *info.Resp, err error) {
+ profile, err := s.getMatchingProfile(req.Profile)
+ if err != nil {
+ return resp, err
+ }
+
+ if profile.RemoteServer != "" {
+ return s.remote.Info(req)
+ }
+ return s.local.Info(req)
+
+}
+
+// SetDBAccessor sets the signer's cert db accessor.
+func (s *Signer) SetDBAccessor(dba certdb.Accessor) {
+ s.local.SetDBAccessor(dba)
+ s.remote.SetDBAccessor(dba)
+}
+
+// SetReqModifier sets the function to call to modify the HTTP request prior to sending it
+func (s *Signer) SetReqModifier(mod func(*http.Request, []byte)) {
+ s.local.SetReqModifier(mod)
+ s.remote.SetReqModifier(mod)
+}
+
+// SigAlgo returns the RSA signer's signature algorithm.
+func (s *Signer) SigAlgo() x509.SignatureAlgorithm {
+ if s.local != nil {
+ return s.local.SigAlgo()
+ }
+
+ // currently remote.SigAlgo just returns
+ // x509.UnknownSignatureAlgorithm.
+ return s.remote.SigAlgo()
+}
+
+// SetPolicy sets the signer's signature policy.
+func (s *Signer) SetPolicy(policy *config.Signing) {
+ s.policy = policy
+}
+
+// Policy returns the signer's policy.
+func (s *Signer) Policy() *config.Signing {
+ return s.policy
+}
diff --git a/vendor/github.com/docker/docker/LICENSE b/vendor/github.com/docker/docker/LICENSE
new file mode 100644
index 0000000000..8f3fee627a
--- /dev/null
+++ b/vendor/github.com/docker/docker/LICENSE
@@ -0,0 +1,191 @@
+
+ Apache License
+ Version 2.0, January 2004
+ https://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ Copyright 2013-2016 Docker, Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ https://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/docker/docker/NOTICE b/vendor/github.com/docker/docker/NOTICE
new file mode 100644
index 0000000000..8a37c1c7bc
--- /dev/null
+++ b/vendor/github.com/docker/docker/NOTICE
@@ -0,0 +1,19 @@
+Docker
+Copyright 2012-2016 Docker, Inc.
+
+This product includes software developed at Docker, Inc. (https://www.docker.com).
+
+This product contains software (https://github.com/kr/pty) developed
+by Keith Rarick, licensed under the MIT License.
+
+The following is courtesy of our legal counsel:
+
+
+Use and transfer of Docker may be subject to certain restrictions by the
+United States and other governments.
+It is your responsibility to ensure that your use and/or transfer does not
+violate applicable laws.
+
+For more information, please see https://www.bis.doc.gov
+
+See also https://www.apache.org/dev/crypto.html and/or seek legal counsel.
diff --git a/vendor/github.com/docker/docker/opts/hosts.go b/vendor/github.com/docker/docker/opts/hosts.go
new file mode 100644
index 0000000000..266df1e537
--- /dev/null
+++ b/vendor/github.com/docker/docker/opts/hosts.go
@@ -0,0 +1,151 @@
+package opts
+
+import (
+ "fmt"
+ "net"
+ "net/url"
+ "strconv"
+ "strings"
+)
+
+var (
+ // DefaultHTTPPort Default HTTP Port used if only the protocol is provided to -H flag e.g. docker daemon -H tcp://
+ // These are the IANA registered port numbers for use with Docker
+ // see http://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.xhtml?search=docker
+ DefaultHTTPPort = 2375 // Default HTTP Port
+ // DefaultTLSHTTPPort Default HTTP Port used when TLS enabled
+ DefaultTLSHTTPPort = 2376 // Default TLS encrypted HTTP Port
+ // DefaultUnixSocket Path for the unix socket.
+ // Docker daemon by default always listens on the default unix socket
+ DefaultUnixSocket = "/var/run/docker.sock"
+ // DefaultTCPHost constant defines the default host string used by docker on Windows
+ DefaultTCPHost = fmt.Sprintf("tcp://%s:%d", DefaultHTTPHost, DefaultHTTPPort)
+ // DefaultTLSHost constant defines the default host string used by docker for TLS sockets
+ DefaultTLSHost = fmt.Sprintf("tcp://%s:%d", DefaultHTTPHost, DefaultTLSHTTPPort)
+ // DefaultNamedPipe defines the default named pipe used by docker on Windows
+ DefaultNamedPipe = `//./pipe/docker_engine`
+)
+
+// ValidateHost validates that the specified string is a valid host and returns it.
+func ValidateHost(val string) (string, error) {
+ host := strings.TrimSpace(val)
+ // The empty string means default and is not handled by parseDockerDaemonHost
+ if host != "" {
+ _, err := parseDockerDaemonHost(host)
+ if err != nil {
+ return val, err
+ }
+ }
+ // Note: unlike most flag validators, we don't return the mutated value here
+ // we need to know what the user entered later (using ParseHost) to adjust for tls
+ return val, nil
+}
+
+// ParseHost and set defaults for a Daemon host string
+func ParseHost(defaultToTLS bool, val string) (string, error) {
+ host := strings.TrimSpace(val)
+ if host == "" {
+ if defaultToTLS {
+ host = DefaultTLSHost
+ } else {
+ host = DefaultHost
+ }
+ } else {
+ var err error
+ host, err = parseDockerDaemonHost(host)
+ if err != nil {
+ return val, err
+ }
+ }
+ return host, nil
+}
+
+// parseDockerDaemonHost parses the specified address and returns an address that will be used as the host.
+// Depending of the address specified, this may return one of the global Default* strings defined in hosts.go.
+func parseDockerDaemonHost(addr string) (string, error) {
+ addrParts := strings.SplitN(addr, "://", 2)
+ if len(addrParts) == 1 && addrParts[0] != "" {
+ addrParts = []string{"tcp", addrParts[0]}
+ }
+
+ switch addrParts[0] {
+ case "tcp":
+ return ParseTCPAddr(addrParts[1], DefaultTCPHost)
+ case "unix":
+ return parseSimpleProtoAddr("unix", addrParts[1], DefaultUnixSocket)
+ case "npipe":
+ return parseSimpleProtoAddr("npipe", addrParts[1], DefaultNamedPipe)
+ case "fd":
+ return addr, nil
+ default:
+ return "", fmt.Errorf("Invalid bind address format: %s", addr)
+ }
+}
+
+// parseSimpleProtoAddr parses and validates that the specified address is a valid
+// socket address for simple protocols like unix and npipe. It returns a formatted
+// socket address, either using the address parsed from addr, or the contents of
+// defaultAddr if addr is a blank string.
+func parseSimpleProtoAddr(proto, addr, defaultAddr string) (string, error) {
+ addr = strings.TrimPrefix(addr, proto+"://")
+ if strings.Contains(addr, "://") {
+ return "", fmt.Errorf("Invalid proto, expected %s: %s", proto, addr)
+ }
+ if addr == "" {
+ addr = defaultAddr
+ }
+ return fmt.Sprintf("%s://%s", proto, addr), nil
+}
+
+// ParseTCPAddr parses and validates that the specified address is a valid TCP
+// address. It returns a formatted TCP address, either using the address parsed
+// from tryAddr, or the contents of defaultAddr if tryAddr is a blank string.
+// tryAddr is expected to have already been Trim()'d
+// defaultAddr must be in the full `tcp://host:port` form
+func ParseTCPAddr(tryAddr string, defaultAddr string) (string, error) {
+ if tryAddr == "" || tryAddr == "tcp://" {
+ return defaultAddr, nil
+ }
+ addr := strings.TrimPrefix(tryAddr, "tcp://")
+ if strings.Contains(addr, "://") || addr == "" {
+ return "", fmt.Errorf("Invalid proto, expected tcp: %s", tryAddr)
+ }
+
+ defaultAddr = strings.TrimPrefix(defaultAddr, "tcp://")
+ defaultHost, defaultPort, err := net.SplitHostPort(defaultAddr)
+ if err != nil {
+ return "", err
+ }
+ // url.Parse fails for trailing colon on IPv6 brackets on Go 1.5, but
+ // not 1.4. See https://github.com/golang/go/issues/12200 and
+ // https://github.com/golang/go/issues/6530.
+ if strings.HasSuffix(addr, "]:") {
+ addr += defaultPort
+ }
+
+ u, err := url.Parse("tcp://" + addr)
+ if err != nil {
+ return "", err
+ }
+ host, port, err := net.SplitHostPort(u.Host)
+ if err != nil {
+ // try port addition once
+ host, port, err = net.SplitHostPort(net.JoinHostPort(u.Host, defaultPort))
+ }
+ if err != nil {
+ return "", fmt.Errorf("Invalid bind address format: %s", tryAddr)
+ }
+
+ if host == "" {
+ host = defaultHost
+ }
+ if port == "" {
+ port = defaultPort
+ }
+ p, err := strconv.Atoi(port)
+ if err != nil && p == 0 {
+ return "", fmt.Errorf("Invalid bind address format: %s", tryAddr)
+ }
+
+ return fmt.Sprintf("tcp://%s%s", net.JoinHostPort(host, port), u.Path), nil
+}
diff --git a/vendor/github.com/docker/docker/opts/hosts_unix.go b/vendor/github.com/docker/docker/opts/hosts_unix.go
new file mode 100644
index 0000000000..611407a9d9
--- /dev/null
+++ b/vendor/github.com/docker/docker/opts/hosts_unix.go
@@ -0,0 +1,8 @@
+// +build !windows
+
+package opts
+
+import "fmt"
+
+// DefaultHost constant defines the default host string used by docker on other hosts than Windows
+var DefaultHost = fmt.Sprintf("unix://%s", DefaultUnixSocket)
diff --git a/vendor/github.com/docker/docker/opts/hosts_windows.go b/vendor/github.com/docker/docker/opts/hosts_windows.go
new file mode 100644
index 0000000000..7c239e00f1
--- /dev/null
+++ b/vendor/github.com/docker/docker/opts/hosts_windows.go
@@ -0,0 +1,6 @@
+// +build windows
+
+package opts
+
+// DefaultHost constant defines the default host string used by docker on Windows
+var DefaultHost = "npipe://" + DefaultNamedPipe
diff --git a/vendor/github.com/docker/docker/opts/ip.go b/vendor/github.com/docker/docker/opts/ip.go
new file mode 100644
index 0000000000..c7b0dc9947
--- /dev/null
+++ b/vendor/github.com/docker/docker/opts/ip.go
@@ -0,0 +1,42 @@
+package opts
+
+import (
+ "fmt"
+ "net"
+)
+
+// IPOpt holds an IP. It is used to store values from CLI flags.
+type IPOpt struct {
+ *net.IP
+}
+
+// NewIPOpt creates a new IPOpt from a reference net.IP and a
+// string representation of an IP. If the string is not a valid
+// IP it will fallback to the specified reference.
+func NewIPOpt(ref *net.IP, defaultVal string) *IPOpt {
+ o := &IPOpt{
+ IP: ref,
+ }
+ o.Set(defaultVal)
+ return o
+}
+
+// Set sets an IPv4 or IPv6 address from a given string. If the given
+// string is not parseable as an IP address it returns an error.
+func (o *IPOpt) Set(val string) error {
+ ip := net.ParseIP(val)
+ if ip == nil {
+ return fmt.Errorf("%s is not an ip address", val)
+ }
+ *o.IP = ip
+ return nil
+}
+
+// String returns the IP address stored in the IPOpt. If stored IP is a
+// nil pointer, it returns an empty string.
+func (o *IPOpt) String() string {
+ if *o.IP == nil {
+ return ""
+ }
+ return o.IP.String()
+}
diff --git a/vendor/github.com/docker/docker/opts/opts.go b/vendor/github.com/docker/docker/opts/opts.go
new file mode 100644
index 0000000000..1b9d6b294a
--- /dev/null
+++ b/vendor/github.com/docker/docker/opts/opts.go
@@ -0,0 +1,321 @@
+package opts
+
+import (
+ "fmt"
+ "net"
+ "regexp"
+ "strings"
+
+ "github.com/docker/engine-api/types/filters"
+)
+
+var (
+ alphaRegexp = regexp.MustCompile(`[a-zA-Z]`)
+ domainRegexp = regexp.MustCompile(`^(:?(:?[a-zA-Z0-9]|(:?[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9]))(:?\.(:?[a-zA-Z0-9]|(:?[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])))*)\.?\s*$`)
+)
+
+// ListOpts holds a list of values and a validation function.
+type ListOpts struct {
+ values *[]string
+ validator ValidatorFctType
+}
+
+// NewListOpts creates a new ListOpts with the specified validator.
+func NewListOpts(validator ValidatorFctType) ListOpts {
+ var values []string
+ return *NewListOptsRef(&values, validator)
+}
+
+// NewListOptsRef creates a new ListOpts with the specified values and validator.
+func NewListOptsRef(values *[]string, validator ValidatorFctType) *ListOpts {
+ return &ListOpts{
+ values: values,
+ validator: validator,
+ }
+}
+
+func (opts *ListOpts) String() string {
+ return fmt.Sprintf("%v", []string((*opts.values)))
+}
+
+// Set validates if needed the input value and adds it to the
+// internal slice.
+func (opts *ListOpts) Set(value string) error {
+ if opts.validator != nil {
+ v, err := opts.validator(value)
+ if err != nil {
+ return err
+ }
+ value = v
+ }
+ (*opts.values) = append((*opts.values), value)
+ return nil
+}
+
+// Delete removes the specified element from the slice.
+func (opts *ListOpts) Delete(key string) {
+ for i, k := range *opts.values {
+ if k == key {
+ (*opts.values) = append((*opts.values)[:i], (*opts.values)[i+1:]...)
+ return
+ }
+ }
+}
+
+// GetMap returns the content of values in a map in order to avoid
+// duplicates.
+func (opts *ListOpts) GetMap() map[string]struct{} {
+ ret := make(map[string]struct{})
+ for _, k := range *opts.values {
+ ret[k] = struct{}{}
+ }
+ return ret
+}
+
+// GetAll returns the values of slice.
+func (opts *ListOpts) GetAll() []string {
+ return (*opts.values)
+}
+
+// GetAllOrEmpty returns the values of the slice
+// or an empty slice when there are no values.
+func (opts *ListOpts) GetAllOrEmpty() []string {
+ v := *opts.values
+ if v == nil {
+ return make([]string, 0)
+ }
+ return v
+}
+
+// Get checks the existence of the specified key.
+func (opts *ListOpts) Get(key string) bool {
+ for _, k := range *opts.values {
+ if k == key {
+ return true
+ }
+ }
+ return false
+}
+
+// Len returns the amount of element in the slice.
+func (opts *ListOpts) Len() int {
+ return len((*opts.values))
+}
+
+// Type returns a string name for this Option type
+func (opts *ListOpts) Type() string {
+ return "list"
+}
+
+// NamedOption is an interface that list and map options
+// with names implement.
+type NamedOption interface {
+ Name() string
+}
+
+// NamedListOpts is a ListOpts with a configuration name.
+// This struct is useful to keep reference to the assigned
+// field name in the internal configuration struct.
+type NamedListOpts struct {
+ name string
+ ListOpts
+}
+
+var _ NamedOption = &NamedListOpts{}
+
+// NewNamedListOptsRef creates a reference to a new NamedListOpts struct.
+func NewNamedListOptsRef(name string, values *[]string, validator ValidatorFctType) *NamedListOpts {
+ return &NamedListOpts{
+ name: name,
+ ListOpts: *NewListOptsRef(values, validator),
+ }
+}
+
+// Name returns the name of the NamedListOpts in the configuration.
+func (o *NamedListOpts) Name() string {
+ return o.name
+}
+
+//MapOpts holds a map of values and a validation function.
+type MapOpts struct {
+ values map[string]string
+ validator ValidatorFctType
+}
+
+// Set validates if needed the input value and add it to the
+// internal map, by splitting on '='.
+func (opts *MapOpts) Set(value string) error {
+ if opts.validator != nil {
+ v, err := opts.validator(value)
+ if err != nil {
+ return err
+ }
+ value = v
+ }
+ vals := strings.SplitN(value, "=", 2)
+ if len(vals) == 1 {
+ (opts.values)[vals[0]] = ""
+ } else {
+ (opts.values)[vals[0]] = vals[1]
+ }
+ return nil
+}
+
+// GetAll returns the values of MapOpts as a map.
+func (opts *MapOpts) GetAll() map[string]string {
+ return opts.values
+}
+
+func (opts *MapOpts) String() string {
+ return fmt.Sprintf("%v", map[string]string((opts.values)))
+}
+
+// Type returns a string name for this Option type
+func (opts *MapOpts) Type() string {
+ return "map"
+}
+
+// NewMapOpts creates a new MapOpts with the specified map of values and a validator.
+func NewMapOpts(values map[string]string, validator ValidatorFctType) *MapOpts {
+ if values == nil {
+ values = make(map[string]string)
+ }
+ return &MapOpts{
+ values: values,
+ validator: validator,
+ }
+}
+
+// NamedMapOpts is a MapOpts struct with a configuration name.
+// This struct is useful to keep reference to the assigned
+// field name in the internal configuration struct.
+type NamedMapOpts struct {
+ name string
+ MapOpts
+}
+
+var _ NamedOption = &NamedMapOpts{}
+
+// NewNamedMapOpts creates a reference to a new NamedMapOpts struct.
+func NewNamedMapOpts(name string, values map[string]string, validator ValidatorFctType) *NamedMapOpts {
+ return &NamedMapOpts{
+ name: name,
+ MapOpts: *NewMapOpts(values, validator),
+ }
+}
+
+// Name returns the name of the NamedMapOpts in the configuration.
+func (o *NamedMapOpts) Name() string {
+ return o.name
+}
+
+// ValidatorFctType defines a validator function that returns a validated string and/or an error.
+type ValidatorFctType func(val string) (string, error)
+
+// ValidatorFctListType defines a validator function that returns a validated list of string and/or an error
+type ValidatorFctListType func(val string) ([]string, error)
+
+// ValidateIPAddress validates an Ip address.
+func ValidateIPAddress(val string) (string, error) {
+ var ip = net.ParseIP(strings.TrimSpace(val))
+ if ip != nil {
+ return ip.String(), nil
+ }
+ return "", fmt.Errorf("%s is not an ip address", val)
+}
+
+// ValidateDNSSearch validates domain for resolvconf search configuration.
+// A zero length domain is represented by a dot (.).
+func ValidateDNSSearch(val string) (string, error) {
+ if val = strings.Trim(val, " "); val == "." {
+ return val, nil
+ }
+ return validateDomain(val)
+}
+
+func validateDomain(val string) (string, error) {
+ if alphaRegexp.FindString(val) == "" {
+ return "", fmt.Errorf("%s is not a valid domain", val)
+ }
+ ns := domainRegexp.FindSubmatch([]byte(val))
+ if len(ns) > 0 && len(ns[1]) < 255 {
+ return string(ns[1]), nil
+ }
+ return "", fmt.Errorf("%s is not a valid domain", val)
+}
+
+// ValidateLabel validates that the specified string is a valid label, and returns it.
+// Labels are in the form on key=value.
+func ValidateLabel(val string) (string, error) {
+ if strings.Count(val, "=") < 1 {
+ return "", fmt.Errorf("bad attribute format: %s", val)
+ }
+ return val, nil
+}
+
+// ValidateSysctl validates a sysctl and returns it.
+func ValidateSysctl(val string) (string, error) {
+ validSysctlMap := map[string]bool{
+ "kernel.msgmax": true,
+ "kernel.msgmnb": true,
+ "kernel.msgmni": true,
+ "kernel.sem": true,
+ "kernel.shmall": true,
+ "kernel.shmmax": true,
+ "kernel.shmmni": true,
+ "kernel.shm_rmid_forced": true,
+ }
+ validSysctlPrefixes := []string{
+ "net.",
+ "fs.mqueue.",
+ }
+ arr := strings.Split(val, "=")
+ if len(arr) < 2 {
+ return "", fmt.Errorf("sysctl '%s' is not whitelisted", val)
+ }
+ if validSysctlMap[arr[0]] {
+ return val, nil
+ }
+
+ for _, vp := range validSysctlPrefixes {
+ if strings.HasPrefix(arr[0], vp) {
+ return val, nil
+ }
+ }
+ return "", fmt.Errorf("sysctl '%s' is not whitelisted", val)
+}
+
+// FilterOpt is a flag type for validating filters
+type FilterOpt struct {
+ filter filters.Args
+}
+
+// NewFilterOpt returns a new FilterOpt
+func NewFilterOpt() FilterOpt {
+ return FilterOpt{filter: filters.NewArgs()}
+}
+
+func (o *FilterOpt) String() string {
+ repr, err := filters.ToParam(o.filter)
+ if err != nil {
+ return "invalid filters"
+ }
+ return repr
+}
+
+// Set sets the value of the opt by parsing the command line value
+func (o *FilterOpt) Set(value string) error {
+ var err error
+ o.filter, err = filters.ParseFlag(value, o.filter)
+ return err
+}
+
+// Type returns the option type
+func (o *FilterOpt) Type() string {
+ return "filter"
+}
+
+// Value returns the value of this option
+func (o *FilterOpt) Value() filters.Args {
+ return o.filter
+}
diff --git a/vendor/github.com/docker/docker/opts/opts_unix.go b/vendor/github.com/docker/docker/opts/opts_unix.go
new file mode 100644
index 0000000000..f1ce844a8f
--- /dev/null
+++ b/vendor/github.com/docker/docker/opts/opts_unix.go
@@ -0,0 +1,6 @@
+// +build !windows
+
+package opts
+
+// DefaultHTTPHost Default HTTP Host used if only port is provided to -H flag e.g. docker daemon -H tcp://:8080
+const DefaultHTTPHost = "localhost"
diff --git a/vendor/github.com/docker/docker/opts/opts_windows.go b/vendor/github.com/docker/docker/opts/opts_windows.go
new file mode 100644
index 0000000000..ebe40c969c
--- /dev/null
+++ b/vendor/github.com/docker/docker/opts/opts_windows.go
@@ -0,0 +1,56 @@
+package opts
+
+// TODO Windows. Identify bug in GOLang 1.5.1+ and/or Windows Server 2016 TP5.
+// @jhowardmsft, @swernli.
+//
+// On Windows, this mitigates a problem with the default options of running
+// a docker client against a local docker daemon on TP5.
+//
+// What was found that if the default host is "localhost", even if the client
+// (and daemon as this is local) is not physically on a network, and the DNS
+// cache is flushed (ipconfig /flushdns), then the client will pause for
+// exactly one second when connecting to the daemon for calls. For example
+// using docker run windowsservercore cmd, the CLI will send a create followed
+// by an attach. You see the delay between the attach finishing and the attach
+// being seen by the daemon.
+//
+// Here's some daemon debug logs with additional debug spew put in. The
+// AfterWriteJSON log is the very last thing the daemon does as part of the
+// create call. The POST /attach is the second CLI call. Notice the second
+// time gap.
+//
+// time="2015-11-06T13:38:37.259627400-08:00" level=debug msg="After createRootfs"
+// time="2015-11-06T13:38:37.263626300-08:00" level=debug msg="After setHostConfig"
+// time="2015-11-06T13:38:37.267631200-08:00" level=debug msg="before createContainerPl...."
+// time="2015-11-06T13:38:37.271629500-08:00" level=debug msg=ToDiskLocking....
+// time="2015-11-06T13:38:37.275643200-08:00" level=debug msg="loggin event...."
+// time="2015-11-06T13:38:37.277627600-08:00" level=debug msg="logged event...."
+// time="2015-11-06T13:38:37.279631800-08:00" level=debug msg="In defer func"
+// time="2015-11-06T13:38:37.282628100-08:00" level=debug msg="After daemon.create"
+// time="2015-11-06T13:38:37.286651700-08:00" level=debug msg="return 2"
+// time="2015-11-06T13:38:37.289629500-08:00" level=debug msg="Returned from daemon.ContainerCreate"
+// time="2015-11-06T13:38:37.311629100-08:00" level=debug msg="After WriteJSON"
+// ... 1 second gap here....
+// time="2015-11-06T13:38:38.317866200-08:00" level=debug msg="Calling POST /v1.22/containers/984758282b842f779e805664b2c95d563adc9a979c8a3973e68c807843ee4757/attach"
+// time="2015-11-06T13:38:38.326882500-08:00" level=info msg="POST /v1.22/containers/984758282b842f779e805664b2c95d563adc9a979c8a3973e68c807843ee4757/attach?stderr=1&stdin=1&stdout=1&stream=1"
+//
+// We suspect this is either a bug introduced in GOLang 1.5.1, or that a change
+// in GOLang 1.5.1 (from 1.4.3) is exposing a bug in Windows. In theory,
+// the Windows networking stack is supposed to resolve "localhost" internally,
+// without hitting DNS, or even reading the hosts file (which is why localhost
+// is commented out in the hosts file on Windows).
+//
+// We have validated that working around this using the actual IPv4 localhost
+// address does not cause the delay.
+//
+// This does not occur with the docker client built with 1.4.3 on the same
+// Windows build, regardless of whether the daemon is built using 1.5.1
+// or 1.4.3. It does not occur on Linux. We also verified we see the same thing
+// on a cross-compiled Windows binary (from Linux).
+//
+// Final note: This is a mitigation, not a 'real' fix. It is still susceptible
+// to the delay if a user were to do 'docker run -H=tcp://localhost:2375...'
+// explicitly.
+
+// DefaultHTTPHost Default HTTP Host used if only port is provided to -H flag e.g. docker daemon -H tcp://:8080
+const DefaultHTTPHost = "127.0.0.1"
diff --git a/vendor/github.com/docker/docker/pkg/archive/README.md b/vendor/github.com/docker/docker/pkg/archive/README.md
new file mode 100644
index 0000000000..7307d9694f
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/README.md
@@ -0,0 +1 @@
+This code provides helper functions for dealing with archive files.
diff --git a/vendor/github.com/docker/docker/pkg/archive/archive.go b/vendor/github.com/docker/docker/pkg/archive/archive.go
new file mode 100644
index 0000000000..ad3d65b2fc
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/archive.go
@@ -0,0 +1,1147 @@
+package archive
+
+import (
+ "archive/tar"
+ "bufio"
+ "bytes"
+ "compress/bzip2"
+ "compress/gzip"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "runtime"
+ "strings"
+ "syscall"
+
+ "github.com/Sirupsen/logrus"
+ "github.com/docker/docker/pkg/fileutils"
+ "github.com/docker/docker/pkg/idtools"
+ "github.com/docker/docker/pkg/ioutils"
+ "github.com/docker/docker/pkg/pools"
+ "github.com/docker/docker/pkg/promise"
+ "github.com/docker/docker/pkg/system"
+)
+
+type (
+ // Archive is a type of io.ReadCloser which has two interfaces Read and Closer.
+ Archive io.ReadCloser
+ // Reader is a type of io.Reader.
+ Reader io.Reader
+ // Compression is the state represents if compressed or not.
+ Compression int
+ // WhiteoutFormat is the format of whiteouts unpacked
+ WhiteoutFormat int
+ // TarChownOptions wraps the chown options UID and GID.
+ TarChownOptions struct {
+ UID, GID int
+ }
+ // TarOptions wraps the tar options.
+ TarOptions struct {
+ IncludeFiles []string
+ ExcludePatterns []string
+ Compression Compression
+ NoLchown bool
+ UIDMaps []idtools.IDMap
+ GIDMaps []idtools.IDMap
+ ChownOpts *TarChownOptions
+ IncludeSourceDir bool
+ // WhiteoutFormat is the expected on disk format for whiteout files.
+ // This format will be converted to the standard format on pack
+ // and from the standard format on unpack.
+ WhiteoutFormat WhiteoutFormat
+ // When unpacking, specifies whether overwriting a directory with a
+ // non-directory is allowed and vice versa.
+ NoOverwriteDirNonDir bool
+ // For each include when creating an archive, the included name will be
+ // replaced with the matching name from this map.
+ RebaseNames map[string]string
+ }
+
+ // Archiver allows the reuse of most utility functions of this package
+ // with a pluggable Untar function. Also, to facilitate the passing of
+ // specific id mappings for untar, an archiver can be created with maps
+ // which will then be passed to Untar operations
+ Archiver struct {
+ Untar func(io.Reader, string, *TarOptions) error
+ UIDMaps []idtools.IDMap
+ GIDMaps []idtools.IDMap
+ }
+
+ // breakoutError is used to differentiate errors related to breaking out
+ // When testing archive breakout in the unit tests, this error is expected
+ // in order for the test to pass.
+ breakoutError error
+)
+
+var (
+ // ErrNotImplemented is the error message of function not implemented.
+ ErrNotImplemented = errors.New("Function not implemented")
+ defaultArchiver = &Archiver{Untar: Untar, UIDMaps: nil, GIDMaps: nil}
+)
+
+const (
+ // HeaderSize is the size in bytes of a tar header
+ HeaderSize = 512
+)
+
+const (
+ // Uncompressed represents the uncompressed.
+ Uncompressed Compression = iota
+ // Bzip2 is bzip2 compression algorithm.
+ Bzip2
+ // Gzip is gzip compression algorithm.
+ Gzip
+ // Xz is xz compression algorithm.
+ Xz
+)
+
+const (
+ // AUFSWhiteoutFormat is the default format for whiteouts
+ AUFSWhiteoutFormat WhiteoutFormat = iota
+ // OverlayWhiteoutFormat formats whiteout according to the overlay
+ // standard.
+ OverlayWhiteoutFormat
+)
+
+// IsArchive checks for the magic bytes of a tar or any supported compression
+// algorithm.
+func IsArchive(header []byte) bool {
+ compression := DetectCompression(header)
+ if compression != Uncompressed {
+ return true
+ }
+ r := tar.NewReader(bytes.NewBuffer(header))
+ _, err := r.Next()
+ return err == nil
+}
+
+// IsArchivePath checks if the (possibly compressed) file at the given path
+// starts with a tar file header.
+func IsArchivePath(path string) bool {
+ file, err := os.Open(path)
+ if err != nil {
+ return false
+ }
+ defer file.Close()
+ rdr, err := DecompressStream(file)
+ if err != nil {
+ return false
+ }
+ r := tar.NewReader(rdr)
+ _, err = r.Next()
+ return err == nil
+}
+
+// DetectCompression detects the compression algorithm of the source.
+func DetectCompression(source []byte) Compression {
+ for compression, m := range map[Compression][]byte{
+ Bzip2: {0x42, 0x5A, 0x68},
+ Gzip: {0x1F, 0x8B, 0x08},
+ Xz: {0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00},
+ } {
+ if len(source) < len(m) {
+ logrus.Debug("Len too short")
+ continue
+ }
+ if bytes.Compare(m, source[:len(m)]) == 0 {
+ return compression
+ }
+ }
+ return Uncompressed
+}
+
+func xzDecompress(archive io.Reader) (io.ReadCloser, <-chan struct{}, error) {
+ args := []string{"xz", "-d", "-c", "-q"}
+
+ return cmdStream(exec.Command(args[0], args[1:]...), archive)
+}
+
+// DecompressStream decompresses the archive and returns a ReaderCloser with the decompressed archive.
+func DecompressStream(archive io.Reader) (io.ReadCloser, error) {
+ p := pools.BufioReader32KPool
+ buf := p.Get(archive)
+ bs, err := buf.Peek(10)
+ if err != nil && err != io.EOF {
+ // Note: we'll ignore any io.EOF error because there are some odd
+ // cases where the layer.tar file will be empty (zero bytes) and
+ // that results in an io.EOF from the Peek() call. So, in those
+ // cases we'll just treat it as a non-compressed stream and
+ // that means just create an empty layer.
+ // See Issue 18170
+ return nil, err
+ }
+
+ compression := DetectCompression(bs)
+ switch compression {
+ case Uncompressed:
+ readBufWrapper := p.NewReadCloserWrapper(buf, buf)
+ return readBufWrapper, nil
+ case Gzip:
+ gzReader, err := gzip.NewReader(buf)
+ if err != nil {
+ return nil, err
+ }
+ readBufWrapper := p.NewReadCloserWrapper(buf, gzReader)
+ return readBufWrapper, nil
+ case Bzip2:
+ bz2Reader := bzip2.NewReader(buf)
+ readBufWrapper := p.NewReadCloserWrapper(buf, bz2Reader)
+ return readBufWrapper, nil
+ case Xz:
+ xzReader, chdone, err := xzDecompress(buf)
+ if err != nil {
+ return nil, err
+ }
+ readBufWrapper := p.NewReadCloserWrapper(buf, xzReader)
+ return ioutils.NewReadCloserWrapper(readBufWrapper, func() error {
+ <-chdone
+ return readBufWrapper.Close()
+ }), nil
+ default:
+ return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension())
+ }
+}
+
+// CompressStream compresseses the dest with specified compression algorithm.
+func CompressStream(dest io.Writer, compression Compression) (io.WriteCloser, error) {
+ p := pools.BufioWriter32KPool
+ buf := p.Get(dest)
+ switch compression {
+ case Uncompressed:
+ writeBufWrapper := p.NewWriteCloserWrapper(buf, buf)
+ return writeBufWrapper, nil
+ case Gzip:
+ gzWriter := gzip.NewWriter(dest)
+ writeBufWrapper := p.NewWriteCloserWrapper(buf, gzWriter)
+ return writeBufWrapper, nil
+ case Bzip2, Xz:
+ // archive/bzip2 does not support writing, and there is no xz support at all
+ // However, this is not a problem as docker only currently generates gzipped tars
+ return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension())
+ default:
+ return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension())
+ }
+}
+
+// Extension returns the extension of a file that uses the specified compression algorithm.
+func (compression *Compression) Extension() string {
+ switch *compression {
+ case Uncompressed:
+ return "tar"
+ case Bzip2:
+ return "tar.bz2"
+ case Gzip:
+ return "tar.gz"
+ case Xz:
+ return "tar.xz"
+ }
+ return ""
+}
+
+type tarWhiteoutConverter interface {
+ ConvertWrite(*tar.Header, string, os.FileInfo) error
+ ConvertRead(*tar.Header, string) (bool, error)
+}
+
+type tarAppender struct {
+ TarWriter *tar.Writer
+ Buffer *bufio.Writer
+
+ // for hardlink mapping
+ SeenFiles map[uint64]string
+ UIDMaps []idtools.IDMap
+ GIDMaps []idtools.IDMap
+
+ // For packing and unpacking whiteout files in the
+ // non standard format. The whiteout files defined
+ // by the AUFS standard are used as the tar whiteout
+ // standard.
+ WhiteoutConverter tarWhiteoutConverter
+}
+
+// canonicalTarName provides a platform-independent and consistent posix-style
+//path for files and directories to be archived regardless of the platform.
+func canonicalTarName(name string, isDir bool) (string, error) {
+ name, err := CanonicalTarNameForPath(name)
+ if err != nil {
+ return "", err
+ }
+
+ // suffix with '/' for directories
+ if isDir && !strings.HasSuffix(name, "/") {
+ name += "/"
+ }
+ return name, nil
+}
+
+// addTarFile adds to the tar archive a file from `path` as `name`
+func (ta *tarAppender) addTarFile(path, name string) error {
+ fi, err := os.Lstat(path)
+ if err != nil {
+ return err
+ }
+
+ link := ""
+ if fi.Mode()&os.ModeSymlink != 0 {
+ if link, err = os.Readlink(path); err != nil {
+ return err
+ }
+ }
+
+ hdr, err := tar.FileInfoHeader(fi, link)
+ if err != nil {
+ return err
+ }
+ hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode)))
+
+ name, err = canonicalTarName(name, fi.IsDir())
+ if err != nil {
+ return fmt.Errorf("tar: cannot canonicalize path: %v", err)
+ }
+ hdr.Name = name
+
+ inode, err := setHeaderForSpecialDevice(hdr, ta, name, fi.Sys())
+ if err != nil {
+ return err
+ }
+
+ // if it's not a directory and has more than 1 link,
+ // it's hardlinked, so set the type flag accordingly
+ if !fi.IsDir() && hasHardlinks(fi) {
+ // a link should have a name that it links too
+ // and that linked name should be first in the tar archive
+ if oldpath, ok := ta.SeenFiles[inode]; ok {
+ hdr.Typeflag = tar.TypeLink
+ hdr.Linkname = oldpath
+ hdr.Size = 0 // This Must be here for the writer math to add up!
+ } else {
+ ta.SeenFiles[inode] = name
+ }
+ }
+
+ capability, _ := system.Lgetxattr(path, "security.capability")
+ if capability != nil {
+ hdr.Xattrs = make(map[string]string)
+ hdr.Xattrs["security.capability"] = string(capability)
+ }
+
+ //handle re-mapping container ID mappings back to host ID mappings before
+ //writing tar headers/files. We skip whiteout files because they were written
+ //by the kernel and already have proper ownership relative to the host
+ if !strings.HasPrefix(filepath.Base(hdr.Name), WhiteoutPrefix) && (ta.UIDMaps != nil || ta.GIDMaps != nil) {
+ uid, gid, err := getFileUIDGID(fi.Sys())
+ if err != nil {
+ return err
+ }
+ xUID, err := idtools.ToContainer(uid, ta.UIDMaps)
+ if err != nil {
+ return err
+ }
+ xGID, err := idtools.ToContainer(gid, ta.GIDMaps)
+ if err != nil {
+ return err
+ }
+ hdr.Uid = xUID
+ hdr.Gid = xGID
+ }
+
+ if ta.WhiteoutConverter != nil {
+ if err := ta.WhiteoutConverter.ConvertWrite(hdr, path, fi); err != nil {
+ return err
+ }
+ }
+
+ if err := ta.TarWriter.WriteHeader(hdr); err != nil {
+ return err
+ }
+
+ if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 {
+ file, err := os.Open(path)
+ if err != nil {
+ return err
+ }
+
+ ta.Buffer.Reset(ta.TarWriter)
+ defer ta.Buffer.Reset(nil)
+ _, err = io.Copy(ta.Buffer, file)
+ file.Close()
+ if err != nil {
+ return err
+ }
+ err = ta.Buffer.Flush()
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, Lchown bool, chownOpts *TarChownOptions) error {
+ // hdr.Mode is in linux format, which we can use for sycalls,
+ // but for os.Foo() calls we need the mode converted to os.FileMode,
+ // so use hdrInfo.Mode() (they differ for e.g. setuid bits)
+ hdrInfo := hdr.FileInfo()
+
+ switch hdr.Typeflag {
+ case tar.TypeDir:
+ // Create directory unless it exists as a directory already.
+ // In that case we just want to merge the two
+ if fi, err := os.Lstat(path); !(err == nil && fi.IsDir()) {
+ if err := os.Mkdir(path, hdrInfo.Mode()); err != nil {
+ return err
+ }
+ }
+
+ case tar.TypeReg, tar.TypeRegA:
+ // Source is regular file
+ file, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY, hdrInfo.Mode())
+ if err != nil {
+ return err
+ }
+ if _, err := io.Copy(file, reader); err != nil {
+ file.Close()
+ return err
+ }
+ file.Close()
+
+ case tar.TypeBlock, tar.TypeChar, tar.TypeFifo:
+ // Handle this is an OS-specific way
+ if err := handleTarTypeBlockCharFifo(hdr, path); err != nil {
+ return err
+ }
+
+ case tar.TypeLink:
+ targetPath := filepath.Join(extractDir, hdr.Linkname)
+ // check for hardlink breakout
+ if !strings.HasPrefix(targetPath, extractDir) {
+ return breakoutError(fmt.Errorf("invalid hardlink %q -> %q", targetPath, hdr.Linkname))
+ }
+ if err := os.Link(targetPath, path); err != nil {
+ return err
+ }
+
+ case tar.TypeSymlink:
+ // path -> hdr.Linkname = targetPath
+ // e.g. /extractDir/path/to/symlink -> ../2/file = /extractDir/path/2/file
+ targetPath := filepath.Join(filepath.Dir(path), hdr.Linkname)
+
+ // the reason we don't need to check symlinks in the path (with FollowSymlinkInScope) is because
+ // that symlink would first have to be created, which would be caught earlier, at this very check:
+ if !strings.HasPrefix(targetPath, extractDir) {
+ return breakoutError(fmt.Errorf("invalid symlink %q -> %q", path, hdr.Linkname))
+ }
+ if err := os.Symlink(hdr.Linkname, path); err != nil {
+ return err
+ }
+
+ case tar.TypeXGlobalHeader:
+ logrus.Debug("PAX Global Extended Headers found and ignored")
+ return nil
+
+ default:
+ return fmt.Errorf("Unhandled tar header type %d\n", hdr.Typeflag)
+ }
+
+ // Lchown is not supported on Windows.
+ if Lchown && runtime.GOOS != "windows" {
+ if chownOpts == nil {
+ chownOpts = &TarChownOptions{UID: hdr.Uid, GID: hdr.Gid}
+ }
+ if err := os.Lchown(path, chownOpts.UID, chownOpts.GID); err != nil {
+ return err
+ }
+ }
+
+ var errors []string
+ for key, value := range hdr.Xattrs {
+ if err := system.Lsetxattr(path, key, []byte(value), 0); err != nil {
+ if err == syscall.ENOTSUP {
+ // We ignore errors here because not all graphdrivers support
+ // xattrs *cough* old versions of AUFS *cough*. However only
+ // ENOTSUP should be emitted in that case, otherwise we still
+ // bail.
+ errors = append(errors, err.Error())
+ continue
+ }
+ return err
+ }
+
+ }
+
+ if len(errors) > 0 {
+ logrus.WithFields(logrus.Fields{
+ "errors": errors,
+ }).Warn("ignored xattrs in archive: underlying filesystem doesn't support them")
+ }
+
+ // There is no LChmod, so ignore mode for symlink. Also, this
+ // must happen after chown, as that can modify the file mode
+ if err := handleLChmod(hdr, path, hdrInfo); err != nil {
+ return err
+ }
+
+ aTime := hdr.AccessTime
+ if aTime.Before(hdr.ModTime) {
+ // Last access time should never be before last modified time.
+ aTime = hdr.ModTime
+ }
+
+ // system.Chtimes doesn't support a NOFOLLOW flag atm
+ if hdr.Typeflag == tar.TypeLink {
+ if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) {
+ if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil {
+ return err
+ }
+ }
+ } else if hdr.Typeflag != tar.TypeSymlink {
+ if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil {
+ return err
+ }
+ } else {
+ ts := []syscall.Timespec{timeToTimespec(aTime), timeToTimespec(hdr.ModTime)}
+ if err := system.LUtimesNano(path, ts); err != nil && err != system.ErrNotSupportedPlatform {
+ return err
+ }
+ }
+ return nil
+}
+
+// Tar creates an archive from the directory at `path`, and returns it as a
+// stream of bytes.
+func Tar(path string, compression Compression) (io.ReadCloser, error) {
+ return TarWithOptions(path, &TarOptions{Compression: compression})
+}
+
+// TarWithOptions creates an archive from the directory at `path`, only including files whose relative
+// paths are included in `options.IncludeFiles` (if non-nil) or not in `options.ExcludePatterns`.
+func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) {
+
+ // Fix the source path to work with long path names. This is a no-op
+ // on platforms other than Windows.
+ srcPath = fixVolumePathPrefix(srcPath)
+
+ patterns, patDirs, exceptions, err := fileutils.CleanPatterns(options.ExcludePatterns)
+
+ if err != nil {
+ return nil, err
+ }
+
+ pipeReader, pipeWriter := io.Pipe()
+
+ compressWriter, err := CompressStream(pipeWriter, options.Compression)
+ if err != nil {
+ return nil, err
+ }
+
+ go func() {
+ ta := &tarAppender{
+ TarWriter: tar.NewWriter(compressWriter),
+ Buffer: pools.BufioWriter32KPool.Get(nil),
+ SeenFiles: make(map[uint64]string),
+ UIDMaps: options.UIDMaps,
+ GIDMaps: options.GIDMaps,
+ WhiteoutConverter: getWhiteoutConverter(options.WhiteoutFormat),
+ }
+
+ defer func() {
+ // Make sure to check the error on Close.
+ if err := ta.TarWriter.Close(); err != nil {
+ logrus.Errorf("Can't close tar writer: %s", err)
+ }
+ if err := compressWriter.Close(); err != nil {
+ logrus.Errorf("Can't close compress writer: %s", err)
+ }
+ if err := pipeWriter.Close(); err != nil {
+ logrus.Errorf("Can't close pipe writer: %s", err)
+ }
+ }()
+
+ // this buffer is needed for the duration of this piped stream
+ defer pools.BufioWriter32KPool.Put(ta.Buffer)
+
+ // In general we log errors here but ignore them because
+ // during e.g. a diff operation the container can continue
+ // mutating the filesystem and we can see transient errors
+ // from this
+
+ stat, err := os.Lstat(srcPath)
+ if err != nil {
+ return
+ }
+
+ if !stat.IsDir() {
+ // We can't later join a non-dir with any includes because the
+ // 'walk' will error if "file/." is stat-ed and "file" is not a
+ // directory. So, we must split the source path and use the
+ // basename as the include.
+ if len(options.IncludeFiles) > 0 {
+ logrus.Warn("Tar: Can't archive a file with includes")
+ }
+
+ dir, base := SplitPathDirEntry(srcPath)
+ srcPath = dir
+ options.IncludeFiles = []string{base}
+ }
+
+ if len(options.IncludeFiles) == 0 {
+ options.IncludeFiles = []string{"."}
+ }
+
+ seen := make(map[string]bool)
+
+ for _, include := range options.IncludeFiles {
+ rebaseName := options.RebaseNames[include]
+
+ walkRoot := getWalkRoot(srcPath, include)
+ filepath.Walk(walkRoot, func(filePath string, f os.FileInfo, err error) error {
+ if err != nil {
+ logrus.Errorf("Tar: Can't stat file %s to tar: %s", srcPath, err)
+ return nil
+ }
+
+ relFilePath, err := filepath.Rel(srcPath, filePath)
+ if err != nil || (!options.IncludeSourceDir && relFilePath == "." && f.IsDir()) {
+ // Error getting relative path OR we are looking
+ // at the source directory path. Skip in both situations.
+ return nil
+ }
+
+ if options.IncludeSourceDir && include == "." && relFilePath != "." {
+ relFilePath = strings.Join([]string{".", relFilePath}, string(filepath.Separator))
+ }
+
+ skip := false
+
+ // If "include" is an exact match for the current file
+ // then even if there's an "excludePatterns" pattern that
+ // matches it, don't skip it. IOW, assume an explicit 'include'
+ // is asking for that file no matter what - which is true
+ // for some files, like .dockerignore and Dockerfile (sometimes)
+ if include != relFilePath {
+ skip, err = fileutils.OptimizedMatches(relFilePath, patterns, patDirs)
+ if err != nil {
+ logrus.Errorf("Error matching %s: %v", relFilePath, err)
+ return err
+ }
+ }
+
+ if skip {
+ // If we want to skip this file and its a directory
+ // then we should first check to see if there's an
+ // excludes pattern (eg !dir/file) that starts with this
+ // dir. If so then we can't skip this dir.
+
+ // Its not a dir then so we can just return/skip.
+ if !f.IsDir() {
+ return nil
+ }
+
+ // No exceptions (!...) in patterns so just skip dir
+ if !exceptions {
+ return filepath.SkipDir
+ }
+
+ dirSlash := relFilePath + string(filepath.Separator)
+
+ for _, pat := range patterns {
+ if pat[0] != '!' {
+ continue
+ }
+ pat = pat[1:] + string(filepath.Separator)
+ if strings.HasPrefix(pat, dirSlash) {
+ // found a match - so can't skip this dir
+ return nil
+ }
+ }
+
+ // No matching exclusion dir so just skip dir
+ return filepath.SkipDir
+ }
+
+ if seen[relFilePath] {
+ return nil
+ }
+ seen[relFilePath] = true
+
+ // Rename the base resource.
+ if rebaseName != "" {
+ var replacement string
+ if rebaseName != string(filepath.Separator) {
+ // Special case the root directory to replace with an
+ // empty string instead so that we don't end up with
+ // double slashes in the paths.
+ replacement = rebaseName
+ }
+
+ relFilePath = strings.Replace(relFilePath, include, replacement, 1)
+ }
+
+ if err := ta.addTarFile(filePath, relFilePath); err != nil {
+ logrus.Errorf("Can't add file %s to tar: %s", filePath, err)
+ // if pipe is broken, stop writing tar stream to it
+ if err == io.ErrClosedPipe {
+ return err
+ }
+ }
+ return nil
+ })
+ }
+ }()
+
+ return pipeReader, nil
+}
+
+// Unpack unpacks the decompressedArchive to dest with options.
+func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) error {
+ tr := tar.NewReader(decompressedArchive)
+ trBuf := pools.BufioReader32KPool.Get(nil)
+ defer pools.BufioReader32KPool.Put(trBuf)
+
+ var dirs []*tar.Header
+ remappedRootUID, remappedRootGID, err := idtools.GetRootUIDGID(options.UIDMaps, options.GIDMaps)
+ if err != nil {
+ return err
+ }
+ whiteoutConverter := getWhiteoutConverter(options.WhiteoutFormat)
+
+ // Iterate through the files in the archive.
+loop:
+ for {
+ hdr, err := tr.Next()
+ if err == io.EOF {
+ // end of tar archive
+ break
+ }
+ if err != nil {
+ return err
+ }
+
+ // Normalize name, for safety and for a simple is-root check
+ // This keeps "../" as-is, but normalizes "/../" to "/". Or Windows:
+ // This keeps "..\" as-is, but normalizes "\..\" to "\".
+ hdr.Name = filepath.Clean(hdr.Name)
+
+ for _, exclude := range options.ExcludePatterns {
+ if strings.HasPrefix(hdr.Name, exclude) {
+ continue loop
+ }
+ }
+
+ // After calling filepath.Clean(hdr.Name) above, hdr.Name will now be in
+ // the filepath format for the OS on which the daemon is running. Hence
+ // the check for a slash-suffix MUST be done in an OS-agnostic way.
+ if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) {
+ // Not the root directory, ensure that the parent directory exists
+ parent := filepath.Dir(hdr.Name)
+ parentPath := filepath.Join(dest, parent)
+ if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) {
+ err = idtools.MkdirAllNewAs(parentPath, 0777, remappedRootUID, remappedRootGID)
+ if err != nil {
+ return err
+ }
+ }
+ }
+
+ path := filepath.Join(dest, hdr.Name)
+ rel, err := filepath.Rel(dest, path)
+ if err != nil {
+ return err
+ }
+ if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) {
+ return breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest))
+ }
+
+ // If path exits we almost always just want to remove and replace it
+ // The only exception is when it is a directory *and* the file from
+ // the layer is also a directory. Then we want to merge them (i.e.
+ // just apply the metadata from the layer).
+ if fi, err := os.Lstat(path); err == nil {
+ if options.NoOverwriteDirNonDir && fi.IsDir() && hdr.Typeflag != tar.TypeDir {
+ // If NoOverwriteDirNonDir is true then we cannot replace
+ // an existing directory with a non-directory from the archive.
+ return fmt.Errorf("cannot overwrite directory %q with non-directory %q", path, dest)
+ }
+
+ if options.NoOverwriteDirNonDir && !fi.IsDir() && hdr.Typeflag == tar.TypeDir {
+ // If NoOverwriteDirNonDir is true then we cannot replace
+ // an existing non-directory with a directory from the archive.
+ return fmt.Errorf("cannot overwrite non-directory %q with directory %q", path, dest)
+ }
+
+ if fi.IsDir() && hdr.Name == "." {
+ continue
+ }
+
+ if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) {
+ if err := os.RemoveAll(path); err != nil {
+ return err
+ }
+ }
+ }
+ trBuf.Reset(tr)
+
+ // if the options contain a uid & gid maps, convert header uid/gid
+ // entries using the maps such that lchown sets the proper mapped
+ // uid/gid after writing the file. We only perform this mapping if
+ // the file isn't already owned by the remapped root UID or GID, as
+ // that specific uid/gid has no mapping from container -> host, and
+ // those files already have the proper ownership for inside the
+ // container.
+ if hdr.Uid != remappedRootUID {
+ xUID, err := idtools.ToHost(hdr.Uid, options.UIDMaps)
+ if err != nil {
+ return err
+ }
+ hdr.Uid = xUID
+ }
+ if hdr.Gid != remappedRootGID {
+ xGID, err := idtools.ToHost(hdr.Gid, options.GIDMaps)
+ if err != nil {
+ return err
+ }
+ hdr.Gid = xGID
+ }
+
+ if whiteoutConverter != nil {
+ writeFile, err := whiteoutConverter.ConvertRead(hdr, path)
+ if err != nil {
+ return err
+ }
+ if !writeFile {
+ continue
+ }
+ }
+
+ if err := createTarFile(path, dest, hdr, trBuf, !options.NoLchown, options.ChownOpts); err != nil {
+ return err
+ }
+
+ // Directory mtimes must be handled at the end to avoid further
+ // file creation in them to modify the directory mtime
+ if hdr.Typeflag == tar.TypeDir {
+ dirs = append(dirs, hdr)
+ }
+ }
+
+ for _, hdr := range dirs {
+ path := filepath.Join(dest, hdr.Name)
+
+ if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// Untar reads a stream of bytes from `archive`, parses it as a tar archive,
+// and unpacks it into the directory at `dest`.
+// The archive may be compressed with one of the following algorithms:
+// identity (uncompressed), gzip, bzip2, xz.
+// FIXME: specify behavior when target path exists vs. doesn't exist.
+func Untar(tarArchive io.Reader, dest string, options *TarOptions) error {
+ return untarHandler(tarArchive, dest, options, true)
+}
+
+// UntarUncompressed reads a stream of bytes from `archive`, parses it as a tar archive,
+// and unpacks it into the directory at `dest`.
+// The archive must be an uncompressed stream.
+func UntarUncompressed(tarArchive io.Reader, dest string, options *TarOptions) error {
+ return untarHandler(tarArchive, dest, options, false)
+}
+
+// Handler for teasing out the automatic decompression
+func untarHandler(tarArchive io.Reader, dest string, options *TarOptions, decompress bool) error {
+ if tarArchive == nil {
+ return fmt.Errorf("Empty archive")
+ }
+ dest = filepath.Clean(dest)
+ if options == nil {
+ options = &TarOptions{}
+ }
+ if options.ExcludePatterns == nil {
+ options.ExcludePatterns = []string{}
+ }
+
+ r := tarArchive
+ if decompress {
+ decompressedArchive, err := DecompressStream(tarArchive)
+ if err != nil {
+ return err
+ }
+ defer decompressedArchive.Close()
+ r = decompressedArchive
+ }
+
+ return Unpack(r, dest, options)
+}
+
+// TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other.
+// If either Tar or Untar fails, TarUntar aborts and returns the error.
+func (archiver *Archiver) TarUntar(src, dst string) error {
+ logrus.Debugf("TarUntar(%s %s)", src, dst)
+ archive, err := TarWithOptions(src, &TarOptions{Compression: Uncompressed})
+ if err != nil {
+ return err
+ }
+ defer archive.Close()
+
+ var options *TarOptions
+ if archiver.UIDMaps != nil || archiver.GIDMaps != nil {
+ options = &TarOptions{
+ UIDMaps: archiver.UIDMaps,
+ GIDMaps: archiver.GIDMaps,
+ }
+ }
+ return archiver.Untar(archive, dst, options)
+}
+
+// TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other.
+// If either Tar or Untar fails, TarUntar aborts and returns the error.
+func TarUntar(src, dst string) error {
+ return defaultArchiver.TarUntar(src, dst)
+}
+
+// UntarPath untar a file from path to a destination, src is the source tar file path.
+func (archiver *Archiver) UntarPath(src, dst string) error {
+ archive, err := os.Open(src)
+ if err != nil {
+ return err
+ }
+ defer archive.Close()
+ var options *TarOptions
+ if archiver.UIDMaps != nil || archiver.GIDMaps != nil {
+ options = &TarOptions{
+ UIDMaps: archiver.UIDMaps,
+ GIDMaps: archiver.GIDMaps,
+ }
+ }
+ return archiver.Untar(archive, dst, options)
+}
+
+// UntarPath is a convenience function which looks for an archive
+// at filesystem path `src`, and unpacks it at `dst`.
+func UntarPath(src, dst string) error {
+ return defaultArchiver.UntarPath(src, dst)
+}
+
+// CopyWithTar creates a tar archive of filesystem path `src`, and
+// unpacks it at filesystem path `dst`.
+// The archive is streamed directly with fixed buffering and no
+// intermediary disk IO.
+func (archiver *Archiver) CopyWithTar(src, dst string) error {
+ srcSt, err := os.Stat(src)
+ if err != nil {
+ return err
+ }
+ if !srcSt.IsDir() {
+ return archiver.CopyFileWithTar(src, dst)
+ }
+
+ // if this archiver is set up with ID mapping we need to create
+ // the new destination directory with the remapped root UID/GID pair
+ // as owner
+ rootUID, rootGID, err := idtools.GetRootUIDGID(archiver.UIDMaps, archiver.GIDMaps)
+ if err != nil {
+ return err
+ }
+ // Create dst, copy src's content into it
+ logrus.Debugf("Creating dest directory: %s", dst)
+ if err := idtools.MkdirAllNewAs(dst, 0755, rootUID, rootGID); err != nil {
+ return err
+ }
+ logrus.Debugf("Calling TarUntar(%s, %s)", src, dst)
+ return archiver.TarUntar(src, dst)
+}
+
+// CopyWithTar creates a tar archive of filesystem path `src`, and
+// unpacks it at filesystem path `dst`.
+// The archive is streamed directly with fixed buffering and no
+// intermediary disk IO.
+func CopyWithTar(src, dst string) error {
+ return defaultArchiver.CopyWithTar(src, dst)
+}
+
+// CopyFileWithTar emulates the behavior of the 'cp' command-line
+// for a single file. It copies a regular file from path `src` to
+// path `dst`, and preserves all its metadata.
+func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) {
+ logrus.Debugf("CopyFileWithTar(%s, %s)", src, dst)
+ srcSt, err := os.Stat(src)
+ if err != nil {
+ return err
+ }
+
+ if srcSt.IsDir() {
+ return fmt.Errorf("Can't copy a directory")
+ }
+
+ // Clean up the trailing slash. This must be done in an operating
+ // system specific manner.
+ if dst[len(dst)-1] == os.PathSeparator {
+ dst = filepath.Join(dst, filepath.Base(src))
+ }
+ // Create the holding directory if necessary
+ if err := system.MkdirAll(filepath.Dir(dst), 0700); err != nil {
+ return err
+ }
+
+ r, w := io.Pipe()
+ errC := promise.Go(func() error {
+ defer w.Close()
+
+ srcF, err := os.Open(src)
+ if err != nil {
+ return err
+ }
+ defer srcF.Close()
+
+ hdr, err := tar.FileInfoHeader(srcSt, "")
+ if err != nil {
+ return err
+ }
+ hdr.Name = filepath.Base(dst)
+ hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode)))
+
+ remappedRootUID, remappedRootGID, err := idtools.GetRootUIDGID(archiver.UIDMaps, archiver.GIDMaps)
+ if err != nil {
+ return err
+ }
+
+ // only perform mapping if the file being copied isn't already owned by the
+ // uid or gid of the remapped root in the container
+ if remappedRootUID != hdr.Uid {
+ xUID, err := idtools.ToHost(hdr.Uid, archiver.UIDMaps)
+ if err != nil {
+ return err
+ }
+ hdr.Uid = xUID
+ }
+ if remappedRootGID != hdr.Gid {
+ xGID, err := idtools.ToHost(hdr.Gid, archiver.GIDMaps)
+ if err != nil {
+ return err
+ }
+ hdr.Gid = xGID
+ }
+
+ tw := tar.NewWriter(w)
+ defer tw.Close()
+ if err := tw.WriteHeader(hdr); err != nil {
+ return err
+ }
+ if _, err := io.Copy(tw, srcF); err != nil {
+ return err
+ }
+ return nil
+ })
+ defer func() {
+ if er := <-errC; err != nil {
+ err = er
+ }
+ }()
+
+ err = archiver.Untar(r, filepath.Dir(dst), nil)
+ if err != nil {
+ r.CloseWithError(err)
+ }
+ return err
+}
+
+// CopyFileWithTar emulates the behavior of the 'cp' command-line
+// for a single file. It copies a regular file from path `src` to
+// path `dst`, and preserves all its metadata.
+//
+// Destination handling is in an operating specific manner depending
+// where the daemon is running. If `dst` ends with a trailing slash
+// the final destination path will be `dst/base(src)` (Linux) or
+// `dst\base(src)` (Windows).
+func CopyFileWithTar(src, dst string) (err error) {
+ return defaultArchiver.CopyFileWithTar(src, dst)
+}
+
+// cmdStream executes a command, and returns its stdout as a stream.
+// If the command fails to run or doesn't complete successfully, an error
+// will be returned, including anything written on stderr.
+func cmdStream(cmd *exec.Cmd, input io.Reader) (io.ReadCloser, <-chan struct{}, error) {
+ chdone := make(chan struct{})
+ cmd.Stdin = input
+ pipeR, pipeW := io.Pipe()
+ cmd.Stdout = pipeW
+ var errBuf bytes.Buffer
+ cmd.Stderr = &errBuf
+
+ // Run the command and return the pipe
+ if err := cmd.Start(); err != nil {
+ return nil, nil, err
+ }
+
+ // Copy stdout to the returned pipe
+ go func() {
+ if err := cmd.Wait(); err != nil {
+ pipeW.CloseWithError(fmt.Errorf("%s: %s", err, errBuf.String()))
+ } else {
+ pipeW.Close()
+ }
+ close(chdone)
+ }()
+
+ return pipeR, chdone, nil
+}
+
+// NewTempArchive reads the content of src into a temporary file, and returns the contents
+// of that file as an archive. The archive can only be read once - as soon as reading completes,
+// the file will be deleted.
+func NewTempArchive(src Archive, dir string) (*TempArchive, error) {
+ f, err := ioutil.TempFile(dir, "")
+ if err != nil {
+ return nil, err
+ }
+ if _, err := io.Copy(f, src); err != nil {
+ return nil, err
+ }
+ if _, err := f.Seek(0, 0); err != nil {
+ return nil, err
+ }
+ st, err := f.Stat()
+ if err != nil {
+ return nil, err
+ }
+ size := st.Size()
+ return &TempArchive{File: f, Size: size}, nil
+}
+
+// TempArchive is a temporary archive. The archive can only be read once - as soon as reading completes,
+// the file will be deleted.
+type TempArchive struct {
+ *os.File
+ Size int64 // Pre-computed from Stat().Size() as a convenience
+ read int64
+ closed bool
+}
+
+// Close closes the underlying file if it's still open, or does a no-op
+// to allow callers to try to close the TempArchive multiple times safely.
+func (archive *TempArchive) Close() error {
+ if archive.closed {
+ return nil
+ }
+
+ archive.closed = true
+
+ return archive.File.Close()
+}
+
+func (archive *TempArchive) Read(data []byte) (int, error) {
+ n, err := archive.File.Read(data)
+ archive.read += int64(n)
+ if err != nil || archive.read == archive.Size {
+ archive.Close()
+ os.Remove(archive.File.Name())
+ }
+ return n, err
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_linux.go b/vendor/github.com/docker/docker/pkg/archive/archive_linux.go
new file mode 100644
index 0000000000..277ff98885
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/archive_linux.go
@@ -0,0 +1,90 @@
+package archive
+
+import (
+ "archive/tar"
+ "os"
+ "path/filepath"
+ "strings"
+ "syscall"
+
+ "github.com/docker/docker/pkg/system"
+)
+
+func getWhiteoutConverter(format WhiteoutFormat) tarWhiteoutConverter {
+ if format == OverlayWhiteoutFormat {
+ return overlayWhiteoutConverter{}
+ }
+ return nil
+}
+
+type overlayWhiteoutConverter struct{}
+
+func (overlayWhiteoutConverter) ConvertWrite(hdr *tar.Header, path string, fi os.FileInfo) error {
+ // convert whiteouts to AUFS format
+ if fi.Mode()&os.ModeCharDevice != 0 && hdr.Devmajor == 0 && hdr.Devminor == 0 {
+ // we just rename the file and make it normal
+ hdr.Name = WhiteoutPrefix + hdr.Name
+ hdr.Mode = 0600
+ hdr.Typeflag = tar.TypeReg
+ hdr.Size = 0
+ }
+
+ if fi.Mode()&os.ModeDir != 0 {
+ // convert opaque dirs to AUFS format by writing an empty file with the prefix
+ opaque, err := system.Lgetxattr(path, "trusted.overlay.opaque")
+ if err != nil {
+ return err
+ }
+ if opaque != nil && len(opaque) == 1 && opaque[0] == 'y' {
+ // create a header for the whiteout file
+ // it should inherit some properties from the parent, but be a regular file
+ *hdr = tar.Header{
+ Typeflag: tar.TypeReg,
+ Mode: hdr.Mode & int64(os.ModePerm),
+ Name: filepath.Join(hdr.Name, WhiteoutOpaqueDir),
+ Size: 0,
+ Uid: hdr.Uid,
+ Uname: hdr.Uname,
+ Gid: hdr.Gid,
+ Gname: hdr.Gname,
+ AccessTime: hdr.AccessTime,
+ ChangeTime: hdr.ChangeTime,
+ }
+ }
+ }
+
+ return nil
+}
+
+func (overlayWhiteoutConverter) ConvertRead(hdr *tar.Header, path string) (bool, error) {
+ base := filepath.Base(path)
+ dir := filepath.Dir(path)
+
+ // if a directory is marked as opaque by the AUFS special file, we need to translate that to overlay
+ if base == WhiteoutOpaqueDir {
+ if err := syscall.Setxattr(dir, "trusted.overlay.opaque", []byte{'y'}, 0); err != nil {
+ return false, err
+ }
+
+ // don't write the file itself
+ return false, nil
+ }
+
+ // if a file was deleted and we are using overlay, we need to create a character device
+ if strings.HasPrefix(base, WhiteoutPrefix) {
+ originalBase := base[len(WhiteoutPrefix):]
+ originalPath := filepath.Join(dir, originalBase)
+
+ if err := syscall.Mknod(originalPath, syscall.S_IFCHR, 0); err != nil {
+ return false, err
+ }
+ if err := os.Chown(originalPath, hdr.Uid, hdr.Gid); err != nil {
+ return false, err
+ }
+
+ // don't write the file itself
+ return false, nil
+ }
+
+ return true, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_other.go b/vendor/github.com/docker/docker/pkg/archive/archive_other.go
new file mode 100644
index 0000000000..54acbf2856
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/archive_other.go
@@ -0,0 +1,7 @@
+// +build !linux
+
+package archive
+
+func getWhiteoutConverter(format WhiteoutFormat) tarWhiteoutConverter {
+ return nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_unix.go b/vendor/github.com/docker/docker/pkg/archive/archive_unix.go
new file mode 100644
index 0000000000..fbc3bb8c4d
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/archive_unix.go
@@ -0,0 +1,112 @@
+// +build !windows
+
+package archive
+
+import (
+ "archive/tar"
+ "errors"
+ "os"
+ "path/filepath"
+ "syscall"
+
+ "github.com/docker/docker/pkg/system"
+)
+
+// fixVolumePathPrefix does platform specific processing to ensure that if
+// the path being passed in is not in a volume path format, convert it to one.
+func fixVolumePathPrefix(srcPath string) string {
+ return srcPath
+}
+
+// getWalkRoot calculates the root path when performing a TarWithOptions.
+// We use a separate function as this is platform specific. On Linux, we
+// can't use filepath.Join(srcPath,include) because this will clean away
+// a trailing "." or "/" which may be important.
+func getWalkRoot(srcPath string, include string) string {
+ return srcPath + string(filepath.Separator) + include
+}
+
+// CanonicalTarNameForPath returns platform-specific filepath
+// to canonical posix-style path for tar archival. p is relative
+// path.
+func CanonicalTarNameForPath(p string) (string, error) {
+ return p, nil // already unix-style
+}
+
+// chmodTarEntry is used to adjust the file permissions used in tar header based
+// on the platform the archival is done.
+
+func chmodTarEntry(perm os.FileMode) os.FileMode {
+ return perm // noop for unix as golang APIs provide perm bits correctly
+}
+
+func setHeaderForSpecialDevice(hdr *tar.Header, ta *tarAppender, name string, stat interface{}) (inode uint64, err error) {
+ s, ok := stat.(*syscall.Stat_t)
+
+ if !ok {
+ err = errors.New("cannot convert stat value to syscall.Stat_t")
+ return
+ }
+
+ inode = uint64(s.Ino)
+
+ // Currently go does not fill in the major/minors
+ if s.Mode&syscall.S_IFBLK != 0 ||
+ s.Mode&syscall.S_IFCHR != 0 {
+ hdr.Devmajor = int64(major(uint64(s.Rdev)))
+ hdr.Devminor = int64(minor(uint64(s.Rdev)))
+ }
+
+ return
+}
+
+func getFileUIDGID(stat interface{}) (int, int, error) {
+ s, ok := stat.(*syscall.Stat_t)
+
+ if !ok {
+ return -1, -1, errors.New("cannot convert stat value to syscall.Stat_t")
+ }
+ return int(s.Uid), int(s.Gid), nil
+}
+
+func major(device uint64) uint64 {
+ return (device >> 8) & 0xfff
+}
+
+func minor(device uint64) uint64 {
+ return (device & 0xff) | ((device >> 12) & 0xfff00)
+}
+
+// handleTarTypeBlockCharFifo is an OS-specific helper function used by
+// createTarFile to handle the following types of header: Block; Char; Fifo
+func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error {
+ mode := uint32(hdr.Mode & 07777)
+ switch hdr.Typeflag {
+ case tar.TypeBlock:
+ mode |= syscall.S_IFBLK
+ case tar.TypeChar:
+ mode |= syscall.S_IFCHR
+ case tar.TypeFifo:
+ mode |= syscall.S_IFIFO
+ }
+
+ if err := system.Mknod(path, mode, int(system.Mkdev(hdr.Devmajor, hdr.Devminor))); err != nil {
+ return err
+ }
+ return nil
+}
+
+func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error {
+ if hdr.Typeflag == tar.TypeLink {
+ if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) {
+ if err := os.Chmod(path, hdrInfo.Mode()); err != nil {
+ return err
+ }
+ }
+ } else if hdr.Typeflag != tar.TypeSymlink {
+ if err := os.Chmod(path, hdrInfo.Mode()); err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_windows.go b/vendor/github.com/docker/docker/pkg/archive/archive_windows.go
new file mode 100644
index 0000000000..5c3a1be340
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/archive_windows.go
@@ -0,0 +1,70 @@
+// +build windows
+
+package archive
+
+import (
+ "archive/tar"
+ "fmt"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "github.com/docker/docker/pkg/longpath"
+)
+
+// fixVolumePathPrefix does platform specific processing to ensure that if
+// the path being passed in is not in a volume path format, convert it to one.
+func fixVolumePathPrefix(srcPath string) string {
+ return longpath.AddPrefix(srcPath)
+}
+
+// getWalkRoot calculates the root path when performing a TarWithOptions.
+// We use a separate function as this is platform specific.
+func getWalkRoot(srcPath string, include string) string {
+ return filepath.Join(srcPath, include)
+}
+
+// CanonicalTarNameForPath returns platform-specific filepath
+// to canonical posix-style path for tar archival. p is relative
+// path.
+func CanonicalTarNameForPath(p string) (string, error) {
+ // windows: convert windows style relative path with backslashes
+ // into forward slashes. Since windows does not allow '/' or '\'
+ // in file names, it is mostly safe to replace however we must
+ // check just in case
+ if strings.Contains(p, "/") {
+ return "", fmt.Errorf("Windows path contains forward slash: %s", p)
+ }
+ return strings.Replace(p, string(os.PathSeparator), "/", -1), nil
+
+}
+
+// chmodTarEntry is used to adjust the file permissions used in tar header based
+// on the platform the archival is done.
+func chmodTarEntry(perm os.FileMode) os.FileMode {
+ perm &= 0755
+ // Add the x bit: make everything +x from windows
+ perm |= 0111
+
+ return perm
+}
+
+func setHeaderForSpecialDevice(hdr *tar.Header, ta *tarAppender, name string, stat interface{}) (inode uint64, err error) {
+ // do nothing. no notion of Rdev, Inode, Nlink in stat on Windows
+ return
+}
+
+// handleTarTypeBlockCharFifo is an OS-specific helper function used by
+// createTarFile to handle the following types of header: Block; Char; Fifo
+func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error {
+ return nil
+}
+
+func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error {
+ return nil
+}
+
+func getFileUIDGID(stat interface{}) (int, int, error) {
+ // no notion of file ownership mapping yet on Windows
+ return 0, 0, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/changes.go b/vendor/github.com/docker/docker/pkg/archive/changes.go
new file mode 100644
index 0000000000..4e2d8e54f7
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/changes.go
@@ -0,0 +1,446 @@
+package archive
+
+import (
+ "archive/tar"
+ "bytes"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "sort"
+ "strings"
+ "syscall"
+ "time"
+
+ "github.com/Sirupsen/logrus"
+ "github.com/docker/docker/pkg/idtools"
+ "github.com/docker/docker/pkg/pools"
+ "github.com/docker/docker/pkg/system"
+)
+
+// ChangeType represents the change type.
+type ChangeType int
+
+const (
+ // ChangeModify represents the modify operation.
+ ChangeModify = iota
+ // ChangeAdd represents the add operation.
+ ChangeAdd
+ // ChangeDelete represents the delete operation.
+ ChangeDelete
+)
+
+func (c ChangeType) String() string {
+ switch c {
+ case ChangeModify:
+ return "C"
+ case ChangeAdd:
+ return "A"
+ case ChangeDelete:
+ return "D"
+ }
+ return ""
+}
+
+// Change represents a change, it wraps the change type and path.
+// It describes changes of the files in the path respect to the
+// parent layers. The change could be modify, add, delete.
+// This is used for layer diff.
+type Change struct {
+ Path string
+ Kind ChangeType
+}
+
+func (change *Change) String() string {
+ return fmt.Sprintf("%s %s", change.Kind, change.Path)
+}
+
+// for sort.Sort
+type changesByPath []Change
+
+func (c changesByPath) Less(i, j int) bool { return c[i].Path < c[j].Path }
+func (c changesByPath) Len() int { return len(c) }
+func (c changesByPath) Swap(i, j int) { c[j], c[i] = c[i], c[j] }
+
+// Gnu tar and the go tar writer don't have sub-second mtime
+// precision, which is problematic when we apply changes via tar
+// files, we handle this by comparing for exact times, *or* same
+// second count and either a or b having exactly 0 nanoseconds
+func sameFsTime(a, b time.Time) bool {
+ return a == b ||
+ (a.Unix() == b.Unix() &&
+ (a.Nanosecond() == 0 || b.Nanosecond() == 0))
+}
+
+func sameFsTimeSpec(a, b syscall.Timespec) bool {
+ return a.Sec == b.Sec &&
+ (a.Nsec == b.Nsec || a.Nsec == 0 || b.Nsec == 0)
+}
+
+// Changes walks the path rw and determines changes for the files in the path,
+// with respect to the parent layers
+func Changes(layers []string, rw string) ([]Change, error) {
+ return changes(layers, rw, aufsDeletedFile, aufsMetadataSkip)
+}
+
+func aufsMetadataSkip(path string) (skip bool, err error) {
+ skip, err = filepath.Match(string(os.PathSeparator)+WhiteoutMetaPrefix+"*", path)
+ if err != nil {
+ skip = true
+ }
+ return
+}
+
+func aufsDeletedFile(root, path string, fi os.FileInfo) (string, error) {
+ f := filepath.Base(path)
+
+ // If there is a whiteout, then the file was removed
+ if strings.HasPrefix(f, WhiteoutPrefix) {
+ originalFile := f[len(WhiteoutPrefix):]
+ return filepath.Join(filepath.Dir(path), originalFile), nil
+ }
+
+ return "", nil
+}
+
+type skipChange func(string) (bool, error)
+type deleteChange func(string, string, os.FileInfo) (string, error)
+
+func changes(layers []string, rw string, dc deleteChange, sc skipChange) ([]Change, error) {
+ var (
+ changes []Change
+ changedDirs = make(map[string]struct{})
+ )
+
+ err := filepath.Walk(rw, func(path string, f os.FileInfo, err error) error {
+ if err != nil {
+ return err
+ }
+
+ // Rebase path
+ path, err = filepath.Rel(rw, path)
+ if err != nil {
+ return err
+ }
+
+ // As this runs on the daemon side, file paths are OS specific.
+ path = filepath.Join(string(os.PathSeparator), path)
+
+ // Skip root
+ if path == string(os.PathSeparator) {
+ return nil
+ }
+
+ if sc != nil {
+ if skip, err := sc(path); skip {
+ return err
+ }
+ }
+
+ change := Change{
+ Path: path,
+ }
+
+ deletedFile, err := dc(rw, path, f)
+ if err != nil {
+ return err
+ }
+
+ // Find out what kind of modification happened
+ if deletedFile != "" {
+ change.Path = deletedFile
+ change.Kind = ChangeDelete
+ } else {
+ // Otherwise, the file was added
+ change.Kind = ChangeAdd
+
+ // ...Unless it already existed in a top layer, in which case, it's a modification
+ for _, layer := range layers {
+ stat, err := os.Stat(filepath.Join(layer, path))
+ if err != nil && !os.IsNotExist(err) {
+ return err
+ }
+ if err == nil {
+ // The file existed in the top layer, so that's a modification
+
+ // However, if it's a directory, maybe it wasn't actually modified.
+ // If you modify /foo/bar/baz, then /foo will be part of the changed files only because it's the parent of bar
+ if stat.IsDir() && f.IsDir() {
+ if f.Size() == stat.Size() && f.Mode() == stat.Mode() && sameFsTime(f.ModTime(), stat.ModTime()) {
+ // Both directories are the same, don't record the change
+ return nil
+ }
+ }
+ change.Kind = ChangeModify
+ break
+ }
+ }
+ }
+
+ // If /foo/bar/file.txt is modified, then /foo/bar must be part of the changed files.
+ // This block is here to ensure the change is recorded even if the
+ // modify time, mode and size of the parent directory in the rw and ro layers are all equal.
+ // Check https://github.com/docker/docker/pull/13590 for details.
+ if f.IsDir() {
+ changedDirs[path] = struct{}{}
+ }
+ if change.Kind == ChangeAdd || change.Kind == ChangeDelete {
+ parent := filepath.Dir(path)
+ if _, ok := changedDirs[parent]; !ok && parent != "/" {
+ changes = append(changes, Change{Path: parent, Kind: ChangeModify})
+ changedDirs[parent] = struct{}{}
+ }
+ }
+
+ // Record change
+ changes = append(changes, change)
+ return nil
+ })
+ if err != nil && !os.IsNotExist(err) {
+ return nil, err
+ }
+ return changes, nil
+}
+
+// FileInfo describes the information of a file.
+type FileInfo struct {
+ parent *FileInfo
+ name string
+ stat *system.StatT
+ children map[string]*FileInfo
+ capability []byte
+ added bool
+}
+
+// LookUp looks up the file information of a file.
+func (info *FileInfo) LookUp(path string) *FileInfo {
+ // As this runs on the daemon side, file paths are OS specific.
+ parent := info
+ if path == string(os.PathSeparator) {
+ return info
+ }
+
+ pathElements := strings.Split(path, string(os.PathSeparator))
+ for _, elem := range pathElements {
+ if elem != "" {
+ child := parent.children[elem]
+ if child == nil {
+ return nil
+ }
+ parent = child
+ }
+ }
+ return parent
+}
+
+func (info *FileInfo) path() string {
+ if info.parent == nil {
+ // As this runs on the daemon side, file paths are OS specific.
+ return string(os.PathSeparator)
+ }
+ return filepath.Join(info.parent.path(), info.name)
+}
+
+func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) {
+
+ sizeAtEntry := len(*changes)
+
+ if oldInfo == nil {
+ // add
+ change := Change{
+ Path: info.path(),
+ Kind: ChangeAdd,
+ }
+ *changes = append(*changes, change)
+ info.added = true
+ }
+
+ // We make a copy so we can modify it to detect additions
+ // also, we only recurse on the old dir if the new info is a directory
+ // otherwise any previous delete/change is considered recursive
+ oldChildren := make(map[string]*FileInfo)
+ if oldInfo != nil && info.isDir() {
+ for k, v := range oldInfo.children {
+ oldChildren[k] = v
+ }
+ }
+
+ for name, newChild := range info.children {
+ oldChild, _ := oldChildren[name]
+ if oldChild != nil {
+ // change?
+ oldStat := oldChild.stat
+ newStat := newChild.stat
+ // Note: We can't compare inode or ctime or blocksize here, because these change
+ // when copying a file into a container. However, that is not generally a problem
+ // because any content change will change mtime, and any status change should
+ // be visible when actually comparing the stat fields. The only time this
+ // breaks down is if some code intentionally hides a change by setting
+ // back mtime
+ if statDifferent(oldStat, newStat) ||
+ bytes.Compare(oldChild.capability, newChild.capability) != 0 {
+ change := Change{
+ Path: newChild.path(),
+ Kind: ChangeModify,
+ }
+ *changes = append(*changes, change)
+ newChild.added = true
+ }
+
+ // Remove from copy so we can detect deletions
+ delete(oldChildren, name)
+ }
+
+ newChild.addChanges(oldChild, changes)
+ }
+ for _, oldChild := range oldChildren {
+ // delete
+ change := Change{
+ Path: oldChild.path(),
+ Kind: ChangeDelete,
+ }
+ *changes = append(*changes, change)
+ }
+
+ // If there were changes inside this directory, we need to add it, even if the directory
+ // itself wasn't changed. This is needed to properly save and restore filesystem permissions.
+ // As this runs on the daemon side, file paths are OS specific.
+ if len(*changes) > sizeAtEntry && info.isDir() && !info.added && info.path() != string(os.PathSeparator) {
+ change := Change{
+ Path: info.path(),
+ Kind: ChangeModify,
+ }
+ // Let's insert the directory entry before the recently added entries located inside this dir
+ *changes = append(*changes, change) // just to resize the slice, will be overwritten
+ copy((*changes)[sizeAtEntry+1:], (*changes)[sizeAtEntry:])
+ (*changes)[sizeAtEntry] = change
+ }
+
+}
+
+// Changes add changes to file information.
+func (info *FileInfo) Changes(oldInfo *FileInfo) []Change {
+ var changes []Change
+
+ info.addChanges(oldInfo, &changes)
+
+ return changes
+}
+
+func newRootFileInfo() *FileInfo {
+ // As this runs on the daemon side, file paths are OS specific.
+ root := &FileInfo{
+ name: string(os.PathSeparator),
+ children: make(map[string]*FileInfo),
+ }
+ return root
+}
+
+// ChangesDirs compares two directories and generates an array of Change objects describing the changes.
+// If oldDir is "", then all files in newDir will be Add-Changes.
+func ChangesDirs(newDir, oldDir string) ([]Change, error) {
+ var (
+ oldRoot, newRoot *FileInfo
+ )
+ if oldDir == "" {
+ emptyDir, err := ioutil.TempDir("", "empty")
+ if err != nil {
+ return nil, err
+ }
+ defer os.Remove(emptyDir)
+ oldDir = emptyDir
+ }
+ oldRoot, newRoot, err := collectFileInfoForChanges(oldDir, newDir)
+ if err != nil {
+ return nil, err
+ }
+
+ return newRoot.Changes(oldRoot), nil
+}
+
+// ChangesSize calculates the size in bytes of the provided changes, based on newDir.
+func ChangesSize(newDir string, changes []Change) int64 {
+ var (
+ size int64
+ sf = make(map[uint64]struct{})
+ )
+ for _, change := range changes {
+ if change.Kind == ChangeModify || change.Kind == ChangeAdd {
+ file := filepath.Join(newDir, change.Path)
+ fileInfo, err := os.Lstat(file)
+ if err != nil {
+ logrus.Errorf("Can not stat %q: %s", file, err)
+ continue
+ }
+
+ if fileInfo != nil && !fileInfo.IsDir() {
+ if hasHardlinks(fileInfo) {
+ inode := getIno(fileInfo)
+ if _, ok := sf[inode]; !ok {
+ size += fileInfo.Size()
+ sf[inode] = struct{}{}
+ }
+ } else {
+ size += fileInfo.Size()
+ }
+ }
+ }
+ }
+ return size
+}
+
+// ExportChanges produces an Archive from the provided changes, relative to dir.
+func ExportChanges(dir string, changes []Change, uidMaps, gidMaps []idtools.IDMap) (Archive, error) {
+ reader, writer := io.Pipe()
+ go func() {
+ ta := &tarAppender{
+ TarWriter: tar.NewWriter(writer),
+ Buffer: pools.BufioWriter32KPool.Get(nil),
+ SeenFiles: make(map[uint64]string),
+ UIDMaps: uidMaps,
+ GIDMaps: gidMaps,
+ }
+ // this buffer is needed for the duration of this piped stream
+ defer pools.BufioWriter32KPool.Put(ta.Buffer)
+
+ sort.Sort(changesByPath(changes))
+
+ // In general we log errors here but ignore them because
+ // during e.g. a diff operation the container can continue
+ // mutating the filesystem and we can see transient errors
+ // from this
+ for _, change := range changes {
+ if change.Kind == ChangeDelete {
+ whiteOutDir := filepath.Dir(change.Path)
+ whiteOutBase := filepath.Base(change.Path)
+ whiteOut := filepath.Join(whiteOutDir, WhiteoutPrefix+whiteOutBase)
+ timestamp := time.Now()
+ hdr := &tar.Header{
+ Name: whiteOut[1:],
+ Size: 0,
+ ModTime: timestamp,
+ AccessTime: timestamp,
+ ChangeTime: timestamp,
+ }
+ if err := ta.TarWriter.WriteHeader(hdr); err != nil {
+ logrus.Debugf("Can't write whiteout header: %s", err)
+ }
+ } else {
+ path := filepath.Join(dir, change.Path)
+ if err := ta.addTarFile(path, change.Path[1:]); err != nil {
+ logrus.Debugf("Can't add file %s to tar: %s", path, err)
+ }
+ }
+ }
+
+ // Make sure to check the error on Close.
+ if err := ta.TarWriter.Close(); err != nil {
+ logrus.Debugf("Can't close layer: %s", err)
+ }
+ if err := writer.Close(); err != nil {
+ logrus.Debugf("failed close Changes writer: %s", err)
+ }
+ }()
+ return reader, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_linux.go b/vendor/github.com/docker/docker/pkg/archive/changes_linux.go
new file mode 100644
index 0000000000..a4cc0c65d3
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/changes_linux.go
@@ -0,0 +1,312 @@
+package archive
+
+import (
+ "bytes"
+ "fmt"
+ "os"
+ "path/filepath"
+ "sort"
+ "syscall"
+ "unsafe"
+
+ "github.com/docker/docker/pkg/system"
+)
+
+// walker is used to implement collectFileInfoForChanges on linux. Where this
+// method in general returns the entire contents of two directory trees, we
+// optimize some FS calls out on linux. In particular, we take advantage of the
+// fact that getdents(2) returns the inode of each file in the directory being
+// walked, which, when walking two trees in parallel to generate a list of
+// changes, can be used to prune subtrees without ever having to lstat(2) them
+// directly. Eliminating stat calls in this way can save up to seconds on large
+// images.
+type walker struct {
+ dir1 string
+ dir2 string
+ root1 *FileInfo
+ root2 *FileInfo
+}
+
+// collectFileInfoForChanges returns a complete representation of the trees
+// rooted at dir1 and dir2, with one important exception: any subtree or
+// leaf where the inode and device numbers are an exact match between dir1
+// and dir2 will be pruned from the results. This method is *only* to be used
+// to generating a list of changes between the two directories, as it does not
+// reflect the full contents.
+func collectFileInfoForChanges(dir1, dir2 string) (*FileInfo, *FileInfo, error) {
+ w := &walker{
+ dir1: dir1,
+ dir2: dir2,
+ root1: newRootFileInfo(),
+ root2: newRootFileInfo(),
+ }
+
+ i1, err := os.Lstat(w.dir1)
+ if err != nil {
+ return nil, nil, err
+ }
+ i2, err := os.Lstat(w.dir2)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ if err := w.walk("/", i1, i2); err != nil {
+ return nil, nil, err
+ }
+
+ return w.root1, w.root2, nil
+}
+
+// Given a FileInfo, its path info, and a reference to the root of the tree
+// being constructed, register this file with the tree.
+func walkchunk(path string, fi os.FileInfo, dir string, root *FileInfo) error {
+ if fi == nil {
+ return nil
+ }
+ parent := root.LookUp(filepath.Dir(path))
+ if parent == nil {
+ return fmt.Errorf("collectFileInfoForChanges: Unexpectedly no parent for %s", path)
+ }
+ info := &FileInfo{
+ name: filepath.Base(path),
+ children: make(map[string]*FileInfo),
+ parent: parent,
+ }
+ cpath := filepath.Join(dir, path)
+ stat, err := system.FromStatT(fi.Sys().(*syscall.Stat_t))
+ if err != nil {
+ return err
+ }
+ info.stat = stat
+ info.capability, _ = system.Lgetxattr(cpath, "security.capability") // lgetxattr(2): fs access
+ parent.children[info.name] = info
+ return nil
+}
+
+// Walk a subtree rooted at the same path in both trees being iterated. For
+// example, /docker/overlay/1234/a/b/c/d and /docker/overlay/8888/a/b/c/d
+func (w *walker) walk(path string, i1, i2 os.FileInfo) (err error) {
+ // Register these nodes with the return trees, unless we're still at the
+ // (already-created) roots:
+ if path != "/" {
+ if err := walkchunk(path, i1, w.dir1, w.root1); err != nil {
+ return err
+ }
+ if err := walkchunk(path, i2, w.dir2, w.root2); err != nil {
+ return err
+ }
+ }
+
+ is1Dir := i1 != nil && i1.IsDir()
+ is2Dir := i2 != nil && i2.IsDir()
+
+ sameDevice := false
+ if i1 != nil && i2 != nil {
+ si1 := i1.Sys().(*syscall.Stat_t)
+ si2 := i2.Sys().(*syscall.Stat_t)
+ if si1.Dev == si2.Dev {
+ sameDevice = true
+ }
+ }
+
+ // If these files are both non-existent, or leaves (non-dirs), we are done.
+ if !is1Dir && !is2Dir {
+ return nil
+ }
+
+ // Fetch the names of all the files contained in both directories being walked:
+ var names1, names2 []nameIno
+ if is1Dir {
+ names1, err = readdirnames(filepath.Join(w.dir1, path)) // getdents(2): fs access
+ if err != nil {
+ return err
+ }
+ }
+ if is2Dir {
+ names2, err = readdirnames(filepath.Join(w.dir2, path)) // getdents(2): fs access
+ if err != nil {
+ return err
+ }
+ }
+
+ // We have lists of the files contained in both parallel directories, sorted
+ // in the same order. Walk them in parallel, generating a unique merged list
+ // of all items present in either or both directories.
+ var names []string
+ ix1 := 0
+ ix2 := 0
+
+ for {
+ if ix1 >= len(names1) {
+ break
+ }
+ if ix2 >= len(names2) {
+ break
+ }
+
+ ni1 := names1[ix1]
+ ni2 := names2[ix2]
+
+ switch bytes.Compare([]byte(ni1.name), []byte(ni2.name)) {
+ case -1: // ni1 < ni2 -- advance ni1
+ // we will not encounter ni1 in names2
+ names = append(names, ni1.name)
+ ix1++
+ case 0: // ni1 == ni2
+ if ni1.ino != ni2.ino || !sameDevice {
+ names = append(names, ni1.name)
+ }
+ ix1++
+ ix2++
+ case 1: // ni1 > ni2 -- advance ni2
+ // we will not encounter ni2 in names1
+ names = append(names, ni2.name)
+ ix2++
+ }
+ }
+ for ix1 < len(names1) {
+ names = append(names, names1[ix1].name)
+ ix1++
+ }
+ for ix2 < len(names2) {
+ names = append(names, names2[ix2].name)
+ ix2++
+ }
+
+ // For each of the names present in either or both of the directories being
+ // iterated, stat the name under each root, and recurse the pair of them:
+ for _, name := range names {
+ fname := filepath.Join(path, name)
+ var cInfo1, cInfo2 os.FileInfo
+ if is1Dir {
+ cInfo1, err = os.Lstat(filepath.Join(w.dir1, fname)) // lstat(2): fs access
+ if err != nil && !os.IsNotExist(err) {
+ return err
+ }
+ }
+ if is2Dir {
+ cInfo2, err = os.Lstat(filepath.Join(w.dir2, fname)) // lstat(2): fs access
+ if err != nil && !os.IsNotExist(err) {
+ return err
+ }
+ }
+ if err = w.walk(fname, cInfo1, cInfo2); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// {name,inode} pairs used to support the early-pruning logic of the walker type
+type nameIno struct {
+ name string
+ ino uint64
+}
+
+type nameInoSlice []nameIno
+
+func (s nameInoSlice) Len() int { return len(s) }
+func (s nameInoSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+func (s nameInoSlice) Less(i, j int) bool { return s[i].name < s[j].name }
+
+// readdirnames is a hacked-apart version of the Go stdlib code, exposing inode
+// numbers further up the stack when reading directory contents. Unlike
+// os.Readdirnames, which returns a list of filenames, this function returns a
+// list of {filename,inode} pairs.
+func readdirnames(dirname string) (names []nameIno, err error) {
+ var (
+ size = 100
+ buf = make([]byte, 4096)
+ nbuf int
+ bufp int
+ nb int
+ )
+
+ f, err := os.Open(dirname)
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+
+ names = make([]nameIno, 0, size) // Empty with room to grow.
+ for {
+ // Refill the buffer if necessary
+ if bufp >= nbuf {
+ bufp = 0
+ nbuf, err = syscall.ReadDirent(int(f.Fd()), buf) // getdents on linux
+ if nbuf < 0 {
+ nbuf = 0
+ }
+ if err != nil {
+ return nil, os.NewSyscallError("readdirent", err)
+ }
+ if nbuf <= 0 {
+ break // EOF
+ }
+ }
+
+ // Drain the buffer
+ nb, names = parseDirent(buf[bufp:nbuf], names)
+ bufp += nb
+ }
+
+ sl := nameInoSlice(names)
+ sort.Sort(sl)
+ return sl, nil
+}
+
+// parseDirent is a minor modification of syscall.ParseDirent (linux version)
+// which returns {name,inode} pairs instead of just names.
+func parseDirent(buf []byte, names []nameIno) (consumed int, newnames []nameIno) {
+ origlen := len(buf)
+ for len(buf) > 0 {
+ dirent := (*syscall.Dirent)(unsafe.Pointer(&buf[0]))
+ buf = buf[dirent.Reclen:]
+ if dirent.Ino == 0 { // File absent in directory.
+ continue
+ }
+ bytes := (*[10000]byte)(unsafe.Pointer(&dirent.Name[0]))
+ var name = string(bytes[0:clen(bytes[:])])
+ if name == "." || name == ".." { // Useless names
+ continue
+ }
+ names = append(names, nameIno{name, dirent.Ino})
+ }
+ return origlen - len(buf), names
+}
+
+func clen(n []byte) int {
+ for i := 0; i < len(n); i++ {
+ if n[i] == 0 {
+ return i
+ }
+ }
+ return len(n)
+}
+
+// OverlayChanges walks the path rw and determines changes for the files in the path,
+// with respect to the parent layers
+func OverlayChanges(layers []string, rw string) ([]Change, error) {
+ return changes(layers, rw, overlayDeletedFile, nil)
+}
+
+func overlayDeletedFile(root, path string, fi os.FileInfo) (string, error) {
+ if fi.Mode()&os.ModeCharDevice != 0 {
+ s := fi.Sys().(*syscall.Stat_t)
+ if major(uint64(s.Rdev)) == 0 && minor(uint64(s.Rdev)) == 0 {
+ return path, nil
+ }
+ }
+ if fi.Mode()&os.ModeDir != 0 {
+ opaque, err := system.Lgetxattr(filepath.Join(root, path), "trusted.overlay.opaque")
+ if err != nil {
+ return "", err
+ }
+ if opaque != nil && len(opaque) == 1 && opaque[0] == 'y' {
+ return path, nil
+ }
+ }
+
+ return "", nil
+
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_other.go b/vendor/github.com/docker/docker/pkg/archive/changes_other.go
new file mode 100644
index 0000000000..da70ed37c4
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/changes_other.go
@@ -0,0 +1,97 @@
+// +build !linux
+
+package archive
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+ "runtime"
+ "strings"
+
+ "github.com/docker/docker/pkg/system"
+)
+
+func collectFileInfoForChanges(oldDir, newDir string) (*FileInfo, *FileInfo, error) {
+ var (
+ oldRoot, newRoot *FileInfo
+ err1, err2 error
+ errs = make(chan error, 2)
+ )
+ go func() {
+ oldRoot, err1 = collectFileInfo(oldDir)
+ errs <- err1
+ }()
+ go func() {
+ newRoot, err2 = collectFileInfo(newDir)
+ errs <- err2
+ }()
+
+ // block until both routines have returned
+ for i := 0; i < 2; i++ {
+ if err := <-errs; err != nil {
+ return nil, nil, err
+ }
+ }
+
+ return oldRoot, newRoot, nil
+}
+
+func collectFileInfo(sourceDir string) (*FileInfo, error) {
+ root := newRootFileInfo()
+
+ err := filepath.Walk(sourceDir, func(path string, f os.FileInfo, err error) error {
+ if err != nil {
+ return err
+ }
+
+ // Rebase path
+ relPath, err := filepath.Rel(sourceDir, path)
+ if err != nil {
+ return err
+ }
+
+ // As this runs on the daemon side, file paths are OS specific.
+ relPath = filepath.Join(string(os.PathSeparator), relPath)
+
+ // See https://github.com/golang/go/issues/9168 - bug in filepath.Join.
+ // Temporary workaround. If the returned path starts with two backslashes,
+ // trim it down to a single backslash. Only relevant on Windows.
+ if runtime.GOOS == "windows" {
+ if strings.HasPrefix(relPath, `\\`) {
+ relPath = relPath[1:]
+ }
+ }
+
+ if relPath == string(os.PathSeparator) {
+ return nil
+ }
+
+ parent := root.LookUp(filepath.Dir(relPath))
+ if parent == nil {
+ return fmt.Errorf("collectFileInfo: Unexpectedly no parent for %s", relPath)
+ }
+
+ info := &FileInfo{
+ name: filepath.Base(relPath),
+ children: make(map[string]*FileInfo),
+ parent: parent,
+ }
+
+ s, err := system.Lstat(path)
+ if err != nil {
+ return err
+ }
+ info.stat = s
+
+ info.capability, _ = system.Lgetxattr(path, "security.capability")
+
+ parent.children[info.name] = info
+
+ return nil
+ })
+ if err != nil {
+ return nil, err
+ }
+ return root, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_unix.go b/vendor/github.com/docker/docker/pkg/archive/changes_unix.go
new file mode 100644
index 0000000000..3778b732cf
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/changes_unix.go
@@ -0,0 +1,36 @@
+// +build !windows
+
+package archive
+
+import (
+ "os"
+ "syscall"
+
+ "github.com/docker/docker/pkg/system"
+)
+
+func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool {
+ // Don't look at size for dirs, its not a good measure of change
+ if oldStat.Mode() != newStat.Mode() ||
+ oldStat.UID() != newStat.UID() ||
+ oldStat.GID() != newStat.GID() ||
+ oldStat.Rdev() != newStat.Rdev() ||
+ // Don't look at size for dirs, its not a good measure of change
+ (oldStat.Mode()&syscall.S_IFDIR != syscall.S_IFDIR &&
+ (!sameFsTimeSpec(oldStat.Mtim(), newStat.Mtim()) || (oldStat.Size() != newStat.Size()))) {
+ return true
+ }
+ return false
+}
+
+func (info *FileInfo) isDir() bool {
+ return info.parent == nil || info.stat.Mode()&syscall.S_IFDIR != 0
+}
+
+func getIno(fi os.FileInfo) uint64 {
+ return uint64(fi.Sys().(*syscall.Stat_t).Ino)
+}
+
+func hasHardlinks(fi os.FileInfo) bool {
+ return fi.Sys().(*syscall.Stat_t).Nlink > 1
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_windows.go b/vendor/github.com/docker/docker/pkg/archive/changes_windows.go
new file mode 100644
index 0000000000..af94243fc4
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/changes_windows.go
@@ -0,0 +1,30 @@
+package archive
+
+import (
+ "os"
+
+ "github.com/docker/docker/pkg/system"
+)
+
+func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool {
+
+ // Don't look at size for dirs, its not a good measure of change
+ if oldStat.ModTime() != newStat.ModTime() ||
+ oldStat.Mode() != newStat.Mode() ||
+ oldStat.Size() != newStat.Size() && !oldStat.IsDir() {
+ return true
+ }
+ return false
+}
+
+func (info *FileInfo) isDir() bool {
+ return info.parent == nil || info.stat.IsDir()
+}
+
+func getIno(fi os.FileInfo) (inode uint64) {
+ return
+}
+
+func hasHardlinks(fi os.FileInfo) bool {
+ return false
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/copy.go b/vendor/github.com/docker/docker/pkg/archive/copy.go
new file mode 100644
index 0000000000..a60c948d0d
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/copy.go
@@ -0,0 +1,458 @@
+package archive
+
+import (
+ "archive/tar"
+ "errors"
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "github.com/Sirupsen/logrus"
+ "github.com/docker/docker/pkg/system"
+)
+
+// Errors used or returned by this file.
+var (
+ ErrNotDirectory = errors.New("not a directory")
+ ErrDirNotExists = errors.New("no such directory")
+ ErrCannotCopyDir = errors.New("cannot copy directory")
+ ErrInvalidCopySource = errors.New("invalid copy source content")
+)
+
+// PreserveTrailingDotOrSeparator returns the given cleaned path (after
+// processing using any utility functions from the path or filepath stdlib
+// packages) and appends a trailing `/.` or `/` if its corresponding original
+// path (from before being processed by utility functions from the path or
+// filepath stdlib packages) ends with a trailing `/.` or `/`. If the cleaned
+// path already ends in a `.` path segment, then another is not added. If the
+// clean path already ends in a path separator, then another is not added.
+func PreserveTrailingDotOrSeparator(cleanedPath, originalPath string) string {
+ // Ensure paths are in platform semantics
+ cleanedPath = normalizePath(cleanedPath)
+ originalPath = normalizePath(originalPath)
+
+ if !specifiesCurrentDir(cleanedPath) && specifiesCurrentDir(originalPath) {
+ if !hasTrailingPathSeparator(cleanedPath) {
+ // Add a separator if it doesn't already end with one (a cleaned
+ // path would only end in a separator if it is the root).
+ cleanedPath += string(filepath.Separator)
+ }
+ cleanedPath += "."
+ }
+
+ if !hasTrailingPathSeparator(cleanedPath) && hasTrailingPathSeparator(originalPath) {
+ cleanedPath += string(filepath.Separator)
+ }
+
+ return cleanedPath
+}
+
+// assertsDirectory returns whether the given path is
+// asserted to be a directory, i.e., the path ends with
+// a trailing '/' or `/.`, assuming a path separator of `/`.
+func assertsDirectory(path string) bool {
+ return hasTrailingPathSeparator(path) || specifiesCurrentDir(path)
+}
+
+// hasTrailingPathSeparator returns whether the given
+// path ends with the system's path separator character.
+func hasTrailingPathSeparator(path string) bool {
+ return len(path) > 0 && os.IsPathSeparator(path[len(path)-1])
+}
+
+// specifiesCurrentDir returns whether the given path specifies
+// a "current directory", i.e., the last path segment is `.`.
+func specifiesCurrentDir(path string) bool {
+ return filepath.Base(path) == "."
+}
+
+// SplitPathDirEntry splits the given path between its directory name and its
+// basename by first cleaning the path but preserves a trailing "." if the
+// original path specified the current directory.
+func SplitPathDirEntry(path string) (dir, base string) {
+ cleanedPath := filepath.Clean(normalizePath(path))
+
+ if specifiesCurrentDir(path) {
+ cleanedPath += string(filepath.Separator) + "."
+ }
+
+ return filepath.Dir(cleanedPath), filepath.Base(cleanedPath)
+}
+
+// TarResource archives the resource described by the given CopyInfo to a Tar
+// archive. A non-nil error is returned if sourcePath does not exist or is
+// asserted to be a directory but exists as another type of file.
+//
+// This function acts as a convenient wrapper around TarWithOptions, which
+// requires a directory as the source path. TarResource accepts either a
+// directory or a file path and correctly sets the Tar options.
+func TarResource(sourceInfo CopyInfo) (content Archive, err error) {
+ return TarResourceRebase(sourceInfo.Path, sourceInfo.RebaseName)
+}
+
+// TarResourceRebase is like TarResource but renames the first path element of
+// items in the resulting tar archive to match the given rebaseName if not "".
+func TarResourceRebase(sourcePath, rebaseName string) (content Archive, err error) {
+ sourcePath = normalizePath(sourcePath)
+ if _, err = os.Lstat(sourcePath); err != nil {
+ // Catches the case where the source does not exist or is not a
+ // directory if asserted to be a directory, as this also causes an
+ // error.
+ return
+ }
+
+ // Separate the source path between its directory and
+ // the entry in that directory which we are archiving.
+ sourceDir, sourceBase := SplitPathDirEntry(sourcePath)
+
+ filter := []string{sourceBase}
+
+ logrus.Debugf("copying %q from %q", sourceBase, sourceDir)
+
+ return TarWithOptions(sourceDir, &TarOptions{
+ Compression: Uncompressed,
+ IncludeFiles: filter,
+ IncludeSourceDir: true,
+ RebaseNames: map[string]string{
+ sourceBase: rebaseName,
+ },
+ })
+}
+
+// CopyInfo holds basic info about the source
+// or destination path of a copy operation.
+type CopyInfo struct {
+ Path string
+ Exists bool
+ IsDir bool
+ RebaseName string
+}
+
+// CopyInfoSourcePath stats the given path to create a CopyInfo
+// struct representing that resource for the source of an archive copy
+// operation. The given path should be an absolute local path. A source path
+// has all symlinks evaluated that appear before the last path separator ("/"
+// on Unix). As it is to be a copy source, the path must exist.
+func CopyInfoSourcePath(path string, followLink bool) (CopyInfo, error) {
+ // normalize the file path and then evaluate the symbol link
+ // we will use the target file instead of the symbol link if
+ // followLink is set
+ path = normalizePath(path)
+
+ resolvedPath, rebaseName, err := ResolveHostSourcePath(path, followLink)
+ if err != nil {
+ return CopyInfo{}, err
+ }
+
+ stat, err := os.Lstat(resolvedPath)
+ if err != nil {
+ return CopyInfo{}, err
+ }
+
+ return CopyInfo{
+ Path: resolvedPath,
+ Exists: true,
+ IsDir: stat.IsDir(),
+ RebaseName: rebaseName,
+ }, nil
+}
+
+// CopyInfoDestinationPath stats the given path to create a CopyInfo
+// struct representing that resource for the destination of an archive copy
+// operation. The given path should be an absolute local path.
+func CopyInfoDestinationPath(path string) (info CopyInfo, err error) {
+ maxSymlinkIter := 10 // filepath.EvalSymlinks uses 255, but 10 already seems like a lot.
+ path = normalizePath(path)
+ originalPath := path
+
+ stat, err := os.Lstat(path)
+
+ if err == nil && stat.Mode()&os.ModeSymlink == 0 {
+ // The path exists and is not a symlink.
+ return CopyInfo{
+ Path: path,
+ Exists: true,
+ IsDir: stat.IsDir(),
+ }, nil
+ }
+
+ // While the path is a symlink.
+ for n := 0; err == nil && stat.Mode()&os.ModeSymlink != 0; n++ {
+ if n > maxSymlinkIter {
+ // Don't follow symlinks more than this arbitrary number of times.
+ return CopyInfo{}, errors.New("too many symlinks in " + originalPath)
+ }
+
+ // The path is a symbolic link. We need to evaluate it so that the
+ // destination of the copy operation is the link target and not the
+ // link itself. This is notably different than CopyInfoSourcePath which
+ // only evaluates symlinks before the last appearing path separator.
+ // Also note that it is okay if the last path element is a broken
+ // symlink as the copy operation should create the target.
+ var linkTarget string
+
+ linkTarget, err = os.Readlink(path)
+ if err != nil {
+ return CopyInfo{}, err
+ }
+
+ if !system.IsAbs(linkTarget) {
+ // Join with the parent directory.
+ dstParent, _ := SplitPathDirEntry(path)
+ linkTarget = filepath.Join(dstParent, linkTarget)
+ }
+
+ path = linkTarget
+ stat, err = os.Lstat(path)
+ }
+
+ if err != nil {
+ // It's okay if the destination path doesn't exist. We can still
+ // continue the copy operation if the parent directory exists.
+ if !os.IsNotExist(err) {
+ return CopyInfo{}, err
+ }
+
+ // Ensure destination parent dir exists.
+ dstParent, _ := SplitPathDirEntry(path)
+
+ parentDirStat, err := os.Lstat(dstParent)
+ if err != nil {
+ return CopyInfo{}, err
+ }
+ if !parentDirStat.IsDir() {
+ return CopyInfo{}, ErrNotDirectory
+ }
+
+ return CopyInfo{Path: path}, nil
+ }
+
+ // The path exists after resolving symlinks.
+ return CopyInfo{
+ Path: path,
+ Exists: true,
+ IsDir: stat.IsDir(),
+ }, nil
+}
+
+// PrepareArchiveCopy prepares the given srcContent archive, which should
+// contain the archived resource described by srcInfo, to the destination
+// described by dstInfo. Returns the possibly modified content archive along
+// with the path to the destination directory which it should be extracted to.
+func PrepareArchiveCopy(srcContent Reader, srcInfo, dstInfo CopyInfo) (dstDir string, content Archive, err error) {
+ // Ensure in platform semantics
+ srcInfo.Path = normalizePath(srcInfo.Path)
+ dstInfo.Path = normalizePath(dstInfo.Path)
+
+ // Separate the destination path between its directory and base
+ // components in case the source archive contents need to be rebased.
+ dstDir, dstBase := SplitPathDirEntry(dstInfo.Path)
+ _, srcBase := SplitPathDirEntry(srcInfo.Path)
+
+ switch {
+ case dstInfo.Exists && dstInfo.IsDir:
+ // The destination exists as a directory. No alteration
+ // to srcContent is needed as its contents can be
+ // simply extracted to the destination directory.
+ return dstInfo.Path, ioutil.NopCloser(srcContent), nil
+ case dstInfo.Exists && srcInfo.IsDir:
+ // The destination exists as some type of file and the source
+ // content is a directory. This is an error condition since
+ // you cannot copy a directory to an existing file location.
+ return "", nil, ErrCannotCopyDir
+ case dstInfo.Exists:
+ // The destination exists as some type of file and the source content
+ // is also a file. The source content entry will have to be renamed to
+ // have a basename which matches the destination path's basename.
+ if len(srcInfo.RebaseName) != 0 {
+ srcBase = srcInfo.RebaseName
+ }
+ return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil
+ case srcInfo.IsDir:
+ // The destination does not exist and the source content is an archive
+ // of a directory. The archive should be extracted to the parent of
+ // the destination path instead, and when it is, the directory that is
+ // created as a result should take the name of the destination path.
+ // The source content entries will have to be renamed to have a
+ // basename which matches the destination path's basename.
+ if len(srcInfo.RebaseName) != 0 {
+ srcBase = srcInfo.RebaseName
+ }
+ return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil
+ case assertsDirectory(dstInfo.Path):
+ // The destination does not exist and is asserted to be created as a
+ // directory, but the source content is not a directory. This is an
+ // error condition since you cannot create a directory from a file
+ // source.
+ return "", nil, ErrDirNotExists
+ default:
+ // The last remaining case is when the destination does not exist, is
+ // not asserted to be a directory, and the source content is not an
+ // archive of a directory. It this case, the destination file will need
+ // to be created when the archive is extracted and the source content
+ // entry will have to be renamed to have a basename which matches the
+ // destination path's basename.
+ if len(srcInfo.RebaseName) != 0 {
+ srcBase = srcInfo.RebaseName
+ }
+ return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil
+ }
+
+}
+
+// RebaseArchiveEntries rewrites the given srcContent archive replacing
+// an occurrence of oldBase with newBase at the beginning of entry names.
+func RebaseArchiveEntries(srcContent Reader, oldBase, newBase string) Archive {
+ if oldBase == string(os.PathSeparator) {
+ // If oldBase specifies the root directory, use an empty string as
+ // oldBase instead so that newBase doesn't replace the path separator
+ // that all paths will start with.
+ oldBase = ""
+ }
+
+ rebased, w := io.Pipe()
+
+ go func() {
+ srcTar := tar.NewReader(srcContent)
+ rebasedTar := tar.NewWriter(w)
+
+ for {
+ hdr, err := srcTar.Next()
+ if err == io.EOF {
+ // Signals end of archive.
+ rebasedTar.Close()
+ w.Close()
+ return
+ }
+ if err != nil {
+ w.CloseWithError(err)
+ return
+ }
+
+ hdr.Name = strings.Replace(hdr.Name, oldBase, newBase, 1)
+
+ if err = rebasedTar.WriteHeader(hdr); err != nil {
+ w.CloseWithError(err)
+ return
+ }
+
+ if _, err = io.Copy(rebasedTar, srcTar); err != nil {
+ w.CloseWithError(err)
+ return
+ }
+ }
+ }()
+
+ return rebased
+}
+
+// CopyResource performs an archive copy from the given source path to the
+// given destination path. The source path MUST exist and the destination
+// path's parent directory must exist.
+func CopyResource(srcPath, dstPath string, followLink bool) error {
+ var (
+ srcInfo CopyInfo
+ err error
+ )
+
+ // Ensure in platform semantics
+ srcPath = normalizePath(srcPath)
+ dstPath = normalizePath(dstPath)
+
+ // Clean the source and destination paths.
+ srcPath = PreserveTrailingDotOrSeparator(filepath.Clean(srcPath), srcPath)
+ dstPath = PreserveTrailingDotOrSeparator(filepath.Clean(dstPath), dstPath)
+
+ if srcInfo, err = CopyInfoSourcePath(srcPath, followLink); err != nil {
+ return err
+ }
+
+ content, err := TarResource(srcInfo)
+ if err != nil {
+ return err
+ }
+ defer content.Close()
+
+ return CopyTo(content, srcInfo, dstPath)
+}
+
+// CopyTo handles extracting the given content whose
+// entries should be sourced from srcInfo to dstPath.
+func CopyTo(content Reader, srcInfo CopyInfo, dstPath string) error {
+ // The destination path need not exist, but CopyInfoDestinationPath will
+ // ensure that at least the parent directory exists.
+ dstInfo, err := CopyInfoDestinationPath(normalizePath(dstPath))
+ if err != nil {
+ return err
+ }
+
+ dstDir, copyArchive, err := PrepareArchiveCopy(content, srcInfo, dstInfo)
+ if err != nil {
+ return err
+ }
+ defer copyArchive.Close()
+
+ options := &TarOptions{
+ NoLchown: true,
+ NoOverwriteDirNonDir: true,
+ }
+
+ return Untar(copyArchive, dstDir, options)
+}
+
+// ResolveHostSourcePath decides real path need to be copied with parameters such as
+// whether to follow symbol link or not, if followLink is true, resolvedPath will return
+// link target of any symbol link file, else it will only resolve symlink of directory
+// but return symbol link file itself without resolving.
+func ResolveHostSourcePath(path string, followLink bool) (resolvedPath, rebaseName string, err error) {
+ if followLink {
+ resolvedPath, err = filepath.EvalSymlinks(path)
+ if err != nil {
+ return
+ }
+
+ resolvedPath, rebaseName = GetRebaseName(path, resolvedPath)
+ } else {
+ dirPath, basePath := filepath.Split(path)
+
+ // if not follow symbol link, then resolve symbol link of parent dir
+ var resolvedDirPath string
+ resolvedDirPath, err = filepath.EvalSymlinks(dirPath)
+ if err != nil {
+ return
+ }
+ // resolvedDirPath will have been cleaned (no trailing path separators) so
+ // we can manually join it with the base path element.
+ resolvedPath = resolvedDirPath + string(filepath.Separator) + basePath
+ if hasTrailingPathSeparator(path) && filepath.Base(path) != filepath.Base(resolvedPath) {
+ rebaseName = filepath.Base(path)
+ }
+ }
+ return resolvedPath, rebaseName, nil
+}
+
+// GetRebaseName normalizes and compares path and resolvedPath,
+// return completed resolved path and rebased file name
+func GetRebaseName(path, resolvedPath string) (string, string) {
+ // linkTarget will have been cleaned (no trailing path separators and dot) so
+ // we can manually join it with them
+ var rebaseName string
+ if specifiesCurrentDir(path) && !specifiesCurrentDir(resolvedPath) {
+ resolvedPath += string(filepath.Separator) + "."
+ }
+
+ if hasTrailingPathSeparator(path) && !hasTrailingPathSeparator(resolvedPath) {
+ resolvedPath += string(filepath.Separator)
+ }
+
+ if filepath.Base(path) != filepath.Base(resolvedPath) {
+ // In the case where the path had a trailing separator and a symlink
+ // evaluation has changed the last path component, we will need to
+ // rebase the name in the archive that is being copied to match the
+ // originally requested name.
+ rebaseName = filepath.Base(path)
+ }
+ return resolvedPath, rebaseName
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/copy_unix.go b/vendor/github.com/docker/docker/pkg/archive/copy_unix.go
new file mode 100644
index 0000000000..e305b5e4af
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/copy_unix.go
@@ -0,0 +1,11 @@
+// +build !windows
+
+package archive
+
+import (
+ "path/filepath"
+)
+
+func normalizePath(path string) string {
+ return filepath.ToSlash(path)
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/copy_windows.go b/vendor/github.com/docker/docker/pkg/archive/copy_windows.go
new file mode 100644
index 0000000000..2b775b45c4
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/copy_windows.go
@@ -0,0 +1,9 @@
+package archive
+
+import (
+ "path/filepath"
+)
+
+func normalizePath(path string) string {
+ return filepath.FromSlash(path)
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/diff.go b/vendor/github.com/docker/docker/pkg/archive/diff.go
new file mode 100644
index 0000000000..1b08ad33ab
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/diff.go
@@ -0,0 +1,279 @@
+package archive
+
+import (
+ "archive/tar"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "runtime"
+ "strings"
+
+ "github.com/Sirupsen/logrus"
+ "github.com/docker/docker/pkg/idtools"
+ "github.com/docker/docker/pkg/pools"
+ "github.com/docker/docker/pkg/system"
+)
+
+// UnpackLayer unpack `layer` to a `dest`. The stream `layer` can be
+// compressed or uncompressed.
+// Returns the size in bytes of the contents of the layer.
+func UnpackLayer(dest string, layer Reader, options *TarOptions) (size int64, err error) {
+ tr := tar.NewReader(layer)
+ trBuf := pools.BufioReader32KPool.Get(tr)
+ defer pools.BufioReader32KPool.Put(trBuf)
+
+ var dirs []*tar.Header
+ unpackedPaths := make(map[string]struct{})
+
+ if options == nil {
+ options = &TarOptions{}
+ }
+ if options.ExcludePatterns == nil {
+ options.ExcludePatterns = []string{}
+ }
+ remappedRootUID, remappedRootGID, err := idtools.GetRootUIDGID(options.UIDMaps, options.GIDMaps)
+ if err != nil {
+ return 0, err
+ }
+
+ aufsTempdir := ""
+ aufsHardlinks := make(map[string]*tar.Header)
+
+ if options == nil {
+ options = &TarOptions{}
+ }
+ // Iterate through the files in the archive.
+ for {
+ hdr, err := tr.Next()
+ if err == io.EOF {
+ // end of tar archive
+ break
+ }
+ if err != nil {
+ return 0, err
+ }
+
+ size += hdr.Size
+
+ // Normalize name, for safety and for a simple is-root check
+ hdr.Name = filepath.Clean(hdr.Name)
+
+ // Windows does not support filenames with colons in them. Ignore
+ // these files. This is not a problem though (although it might
+ // appear that it is). Let's suppose a client is running docker pull.
+ // The daemon it points to is Windows. Would it make sense for the
+ // client to be doing a docker pull Ubuntu for example (which has files
+ // with colons in the name under /usr/share/man/man3)? No, absolutely
+ // not as it would really only make sense that they were pulling a
+ // Windows image. However, for development, it is necessary to be able
+ // to pull Linux images which are in the repository.
+ //
+ // TODO Windows. Once the registry is aware of what images are Windows-
+ // specific or Linux-specific, this warning should be changed to an error
+ // to cater for the situation where someone does manage to upload a Linux
+ // image but have it tagged as Windows inadvertently.
+ if runtime.GOOS == "windows" {
+ if strings.Contains(hdr.Name, ":") {
+ logrus.Warnf("Windows: Ignoring %s (is this a Linux image?)", hdr.Name)
+ continue
+ }
+ }
+
+ // Note as these operations are platform specific, so must the slash be.
+ if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) {
+ // Not the root directory, ensure that the parent directory exists.
+ // This happened in some tests where an image had a tarfile without any
+ // parent directories.
+ parent := filepath.Dir(hdr.Name)
+ parentPath := filepath.Join(dest, parent)
+
+ if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) {
+ err = system.MkdirAll(parentPath, 0600)
+ if err != nil {
+ return 0, err
+ }
+ }
+ }
+
+ // Skip AUFS metadata dirs
+ if strings.HasPrefix(hdr.Name, WhiteoutMetaPrefix) {
+ // Regular files inside /.wh..wh.plnk can be used as hardlink targets
+ // We don't want this directory, but we need the files in them so that
+ // such hardlinks can be resolved.
+ if strings.HasPrefix(hdr.Name, WhiteoutLinkDir) && hdr.Typeflag == tar.TypeReg {
+ basename := filepath.Base(hdr.Name)
+ aufsHardlinks[basename] = hdr
+ if aufsTempdir == "" {
+ if aufsTempdir, err = ioutil.TempDir("", "dockerplnk"); err != nil {
+ return 0, err
+ }
+ defer os.RemoveAll(aufsTempdir)
+ }
+ if err := createTarFile(filepath.Join(aufsTempdir, basename), dest, hdr, tr, true, nil); err != nil {
+ return 0, err
+ }
+ }
+
+ if hdr.Name != WhiteoutOpaqueDir {
+ continue
+ }
+ }
+ path := filepath.Join(dest, hdr.Name)
+ rel, err := filepath.Rel(dest, path)
+ if err != nil {
+ return 0, err
+ }
+
+ // Note as these operations are platform specific, so must the slash be.
+ if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) {
+ return 0, breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest))
+ }
+ base := filepath.Base(path)
+
+ if strings.HasPrefix(base, WhiteoutPrefix) {
+ dir := filepath.Dir(path)
+ if base == WhiteoutOpaqueDir {
+ _, err := os.Lstat(dir)
+ if err != nil {
+ return 0, err
+ }
+ err = filepath.Walk(dir, func(path string, info os.FileInfo, err error) error {
+ if err != nil {
+ if os.IsNotExist(err) {
+ err = nil // parent was deleted
+ }
+ return err
+ }
+ if path == dir {
+ return nil
+ }
+ if _, exists := unpackedPaths[path]; !exists {
+ err := os.RemoveAll(path)
+ return err
+ }
+ return nil
+ })
+ if err != nil {
+ return 0, err
+ }
+ } else {
+ originalBase := base[len(WhiteoutPrefix):]
+ originalPath := filepath.Join(dir, originalBase)
+ if err := os.RemoveAll(originalPath); err != nil {
+ return 0, err
+ }
+ }
+ } else {
+ // If path exits we almost always just want to remove and replace it.
+ // The only exception is when it is a directory *and* the file from
+ // the layer is also a directory. Then we want to merge them (i.e.
+ // just apply the metadata from the layer).
+ if fi, err := os.Lstat(path); err == nil {
+ if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) {
+ if err := os.RemoveAll(path); err != nil {
+ return 0, err
+ }
+ }
+ }
+
+ trBuf.Reset(tr)
+ srcData := io.Reader(trBuf)
+ srcHdr := hdr
+
+ // Hard links into /.wh..wh.plnk don't work, as we don't extract that directory, so
+ // we manually retarget these into the temporary files we extracted them into
+ if hdr.Typeflag == tar.TypeLink && strings.HasPrefix(filepath.Clean(hdr.Linkname), WhiteoutLinkDir) {
+ linkBasename := filepath.Base(hdr.Linkname)
+ srcHdr = aufsHardlinks[linkBasename]
+ if srcHdr == nil {
+ return 0, fmt.Errorf("Invalid aufs hardlink")
+ }
+ tmpFile, err := os.Open(filepath.Join(aufsTempdir, linkBasename))
+ if err != nil {
+ return 0, err
+ }
+ defer tmpFile.Close()
+ srcData = tmpFile
+ }
+
+ // if the options contain a uid & gid maps, convert header uid/gid
+ // entries using the maps such that lchown sets the proper mapped
+ // uid/gid after writing the file. We only perform this mapping if
+ // the file isn't already owned by the remapped root UID or GID, as
+ // that specific uid/gid has no mapping from container -> host, and
+ // those files already have the proper ownership for inside the
+ // container.
+ if srcHdr.Uid != remappedRootUID {
+ xUID, err := idtools.ToHost(srcHdr.Uid, options.UIDMaps)
+ if err != nil {
+ return 0, err
+ }
+ srcHdr.Uid = xUID
+ }
+ if srcHdr.Gid != remappedRootGID {
+ xGID, err := idtools.ToHost(srcHdr.Gid, options.GIDMaps)
+ if err != nil {
+ return 0, err
+ }
+ srcHdr.Gid = xGID
+ }
+ if err := createTarFile(path, dest, srcHdr, srcData, true, nil); err != nil {
+ return 0, err
+ }
+
+ // Directory mtimes must be handled at the end to avoid further
+ // file creation in them to modify the directory mtime
+ if hdr.Typeflag == tar.TypeDir {
+ dirs = append(dirs, hdr)
+ }
+ unpackedPaths[path] = struct{}{}
+ }
+ }
+
+ for _, hdr := range dirs {
+ path := filepath.Join(dest, hdr.Name)
+ if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil {
+ return 0, err
+ }
+ }
+
+ return size, nil
+}
+
+// ApplyLayer parses a diff in the standard layer format from `layer`,
+// and applies it to the directory `dest`. The stream `layer` can be
+// compressed or uncompressed.
+// Returns the size in bytes of the contents of the layer.
+func ApplyLayer(dest string, layer Reader) (int64, error) {
+ return applyLayerHandler(dest, layer, &TarOptions{}, true)
+}
+
+// ApplyUncompressedLayer parses a diff in the standard layer format from
+// `layer`, and applies it to the directory `dest`. The stream `layer`
+// can only be uncompressed.
+// Returns the size in bytes of the contents of the layer.
+func ApplyUncompressedLayer(dest string, layer Reader, options *TarOptions) (int64, error) {
+ return applyLayerHandler(dest, layer, options, false)
+}
+
+// do the bulk load of ApplyLayer, but allow for not calling DecompressStream
+func applyLayerHandler(dest string, layer Reader, options *TarOptions, decompress bool) (int64, error) {
+ dest = filepath.Clean(dest)
+
+ // We need to be able to set any perms
+ oldmask, err := system.Umask(0)
+ if err != nil {
+ return 0, err
+ }
+ defer system.Umask(oldmask) // ignore err, ErrNotSupportedPlatform
+
+ if decompress {
+ layer, err = DecompressStream(layer)
+ if err != nil {
+ return 0, err
+ }
+ }
+ return UnpackLayer(dest, layer, options)
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/example_changes.go b/vendor/github.com/docker/docker/pkg/archive/example_changes.go
new file mode 100644
index 0000000000..cedd46a408
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/example_changes.go
@@ -0,0 +1,97 @@
+// +build ignore
+
+// Simple tool to create an archive stream from an old and new directory
+//
+// By default it will stream the comparison of two temporary directories with junk files
+package main
+
+import (
+ "flag"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "path"
+
+ "github.com/Sirupsen/logrus"
+ "github.com/docker/docker/pkg/archive"
+)
+
+var (
+ flDebug = flag.Bool("D", false, "debugging output")
+ flNewDir = flag.String("newdir", "", "")
+ flOldDir = flag.String("olddir", "", "")
+ log = logrus.New()
+)
+
+func main() {
+ flag.Usage = func() {
+ fmt.Println("Produce a tar from comparing two directory paths. By default a demo tar is created of around 200 files (including hardlinks)")
+ fmt.Printf("%s [OPTIONS]\n", os.Args[0])
+ flag.PrintDefaults()
+ }
+ flag.Parse()
+ log.Out = os.Stderr
+ if (len(os.Getenv("DEBUG")) > 0) || *flDebug {
+ logrus.SetLevel(logrus.DebugLevel)
+ }
+ var newDir, oldDir string
+
+ if len(*flNewDir) == 0 {
+ var err error
+ newDir, err = ioutil.TempDir("", "docker-test-newDir")
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer os.RemoveAll(newDir)
+ if _, err := prepareUntarSourceDirectory(100, newDir, true); err != nil {
+ log.Fatal(err)
+ }
+ } else {
+ newDir = *flNewDir
+ }
+
+ if len(*flOldDir) == 0 {
+ oldDir, err := ioutil.TempDir("", "docker-test-oldDir")
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer os.RemoveAll(oldDir)
+ } else {
+ oldDir = *flOldDir
+ }
+
+ changes, err := archive.ChangesDirs(newDir, oldDir)
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ a, err := archive.ExportChanges(newDir, changes)
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer a.Close()
+
+ i, err := io.Copy(os.Stdout, a)
+ if err != nil && err != io.EOF {
+ log.Fatal(err)
+ }
+ fmt.Fprintf(os.Stderr, "wrote archive of %d bytes", i)
+}
+
+func prepareUntarSourceDirectory(numberOfFiles int, targetPath string, makeLinks bool) (int, error) {
+ fileData := []byte("fooo")
+ for n := 0; n < numberOfFiles; n++ {
+ fileName := fmt.Sprintf("file-%d", n)
+ if err := ioutil.WriteFile(path.Join(targetPath, fileName), fileData, 0700); err != nil {
+ return 0, err
+ }
+ if makeLinks {
+ if err := os.Link(path.Join(targetPath, fileName), path.Join(targetPath, fileName+"-link")); err != nil {
+ return 0, err
+ }
+ }
+ }
+ totalSize := numberOfFiles * len(fileData)
+ return totalSize, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/time_linux.go b/vendor/github.com/docker/docker/pkg/archive/time_linux.go
new file mode 100644
index 0000000000..3448569b1e
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/time_linux.go
@@ -0,0 +1,16 @@
+package archive
+
+import (
+ "syscall"
+ "time"
+)
+
+func timeToTimespec(time time.Time) (ts syscall.Timespec) {
+ if time.IsZero() {
+ // Return UTIME_OMIT special value
+ ts.Sec = 0
+ ts.Nsec = ((1 << 30) - 2)
+ return
+ }
+ return syscall.NsecToTimespec(time.UnixNano())
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/time_unsupported.go b/vendor/github.com/docker/docker/pkg/archive/time_unsupported.go
new file mode 100644
index 0000000000..e85aac0540
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/time_unsupported.go
@@ -0,0 +1,16 @@
+// +build !linux
+
+package archive
+
+import (
+ "syscall"
+ "time"
+)
+
+func timeToTimespec(time time.Time) (ts syscall.Timespec) {
+ nsec := int64(0)
+ if !time.IsZero() {
+ nsec = time.UnixNano()
+ }
+ return syscall.NsecToTimespec(nsec)
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/whiteouts.go b/vendor/github.com/docker/docker/pkg/archive/whiteouts.go
new file mode 100644
index 0000000000..d20478a10d
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/whiteouts.go
@@ -0,0 +1,23 @@
+package archive
+
+// Whiteouts are files with a special meaning for the layered filesystem.
+// Docker uses AUFS whiteout files inside exported archives. In other
+// filesystems these files are generated/handled on tar creation/extraction.
+
+// WhiteoutPrefix prefix means file is a whiteout. If this is followed by a
+// filename this means that file has been removed from the base layer.
+const WhiteoutPrefix = ".wh."
+
+// WhiteoutMetaPrefix prefix means whiteout has a special meaning and is not
+// for removing an actual file. Normally these files are excluded from exported
+// archives.
+const WhiteoutMetaPrefix = WhiteoutPrefix + WhiteoutPrefix
+
+// WhiteoutLinkDir is a directory AUFS uses for storing hardlink links to other
+// layers. Normally these should not go into exported archives and all changed
+// hardlinks should be copied to the top layer.
+const WhiteoutLinkDir = WhiteoutMetaPrefix + "plnk"
+
+// WhiteoutOpaqueDir file means directory has been made opaque - meaning
+// readdir calls to this directory do not follow to lower layers.
+const WhiteoutOpaqueDir = WhiteoutMetaPrefix + ".opq"
diff --git a/vendor/github.com/docker/docker/pkg/archive/wrap.go b/vendor/github.com/docker/docker/pkg/archive/wrap.go
new file mode 100644
index 0000000000..dfb335c0b6
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/wrap.go
@@ -0,0 +1,59 @@
+package archive
+
+import (
+ "archive/tar"
+ "bytes"
+ "io/ioutil"
+)
+
+// Generate generates a new archive from the content provided
+// as input.
+//
+// `files` is a sequence of path/content pairs. A new file is
+// added to the archive for each pair.
+// If the last pair is incomplete, the file is created with an
+// empty content. For example:
+//
+// Generate("foo.txt", "hello world", "emptyfile")
+//
+// The above call will return an archive with 2 files:
+// * ./foo.txt with content "hello world"
+// * ./empty with empty content
+//
+// FIXME: stream content instead of buffering
+// FIXME: specify permissions and other archive metadata
+func Generate(input ...string) (Archive, error) {
+ files := parseStringPairs(input...)
+ buf := new(bytes.Buffer)
+ tw := tar.NewWriter(buf)
+ for _, file := range files {
+ name, content := file[0], file[1]
+ hdr := &tar.Header{
+ Name: name,
+ Size: int64(len(content)),
+ }
+ if err := tw.WriteHeader(hdr); err != nil {
+ return nil, err
+ }
+ if _, err := tw.Write([]byte(content)); err != nil {
+ return nil, err
+ }
+ }
+ if err := tw.Close(); err != nil {
+ return nil, err
+ }
+ return ioutil.NopCloser(buf), nil
+}
+
+func parseStringPairs(input ...string) (output [][2]string) {
+ output = make([][2]string, 0, len(input)/2+1)
+ for i := 0; i < len(input); i += 2 {
+ var pair [2]string
+ pair[0] = input[i]
+ if i+1 < len(input) {
+ pair[1] = input[i+1]
+ }
+ output = append(output, pair)
+ }
+ return
+}
diff --git a/vendor/github.com/docker/docker/pkg/fileutils/fileutils.go b/vendor/github.com/docker/docker/pkg/fileutils/fileutils.go
new file mode 100644
index 0000000000..c00a0cdee3
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/fileutils/fileutils.go
@@ -0,0 +1,283 @@
+package fileutils
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "regexp"
+ "strings"
+ "text/scanner"
+
+ "github.com/Sirupsen/logrus"
+)
+
+// exclusion returns true if the specified pattern is an exclusion
+func exclusion(pattern string) bool {
+ return pattern[0] == '!'
+}
+
+// empty returns true if the specified pattern is empty
+func empty(pattern string) bool {
+ return pattern == ""
+}
+
+// CleanPatterns takes a slice of patterns returns a new
+// slice of patterns cleaned with filepath.Clean, stripped
+// of any empty patterns and lets the caller know whether the
+// slice contains any exception patterns (prefixed with !).
+func CleanPatterns(patterns []string) ([]string, [][]string, bool, error) {
+ // Loop over exclusion patterns and:
+ // 1. Clean them up.
+ // 2. Indicate whether we are dealing with any exception rules.
+ // 3. Error if we see a single exclusion marker on its own (!).
+ cleanedPatterns := []string{}
+ patternDirs := [][]string{}
+ exceptions := false
+ for _, pattern := range patterns {
+ // Eliminate leading and trailing whitespace.
+ pattern = strings.TrimSpace(pattern)
+ if empty(pattern) {
+ continue
+ }
+ if exclusion(pattern) {
+ if len(pattern) == 1 {
+ return nil, nil, false, errors.New("Illegal exclusion pattern: !")
+ }
+ exceptions = true
+ }
+ pattern = filepath.Clean(pattern)
+ cleanedPatterns = append(cleanedPatterns, pattern)
+ if exclusion(pattern) {
+ pattern = pattern[1:]
+ }
+ patternDirs = append(patternDirs, strings.Split(pattern, string(os.PathSeparator)))
+ }
+
+ return cleanedPatterns, patternDirs, exceptions, nil
+}
+
+// Matches returns true if file matches any of the patterns
+// and isn't excluded by any of the subsequent patterns.
+func Matches(file string, patterns []string) (bool, error) {
+ file = filepath.Clean(file)
+
+ if file == "." {
+ // Don't let them exclude everything, kind of silly.
+ return false, nil
+ }
+
+ patterns, patDirs, _, err := CleanPatterns(patterns)
+ if err != nil {
+ return false, err
+ }
+
+ return OptimizedMatches(file, patterns, patDirs)
+}
+
+// OptimizedMatches is basically the same as fileutils.Matches() but optimized for archive.go.
+// It will assume that the inputs have been preprocessed and therefore the function
+// doesn't need to do as much error checking and clean-up. This was done to avoid
+// repeating these steps on each file being checked during the archive process.
+// The more generic fileutils.Matches() can't make these assumptions.
+func OptimizedMatches(file string, patterns []string, patDirs [][]string) (bool, error) {
+ matched := false
+ file = filepath.FromSlash(file)
+ parentPath := filepath.Dir(file)
+ parentPathDirs := strings.Split(parentPath, string(os.PathSeparator))
+
+ for i, pattern := range patterns {
+ negative := false
+
+ if exclusion(pattern) {
+ negative = true
+ pattern = pattern[1:]
+ }
+
+ match, err := regexpMatch(pattern, file)
+ if err != nil {
+ return false, fmt.Errorf("Error in pattern (%s): %s", pattern, err)
+ }
+
+ if !match && parentPath != "." {
+ // Check to see if the pattern matches one of our parent dirs.
+ if len(patDirs[i]) <= len(parentPathDirs) {
+ match, _ = regexpMatch(strings.Join(patDirs[i], string(os.PathSeparator)),
+ strings.Join(parentPathDirs[:len(patDirs[i])], string(os.PathSeparator)))
+ }
+ }
+
+ if match {
+ matched = !negative
+ }
+ }
+
+ if matched {
+ logrus.Debugf("Skipping excluded path: %s", file)
+ }
+
+ return matched, nil
+}
+
+// regexpMatch tries to match the logic of filepath.Match but
+// does so using regexp logic. We do this so that we can expand the
+// wildcard set to include other things, like "**" to mean any number
+// of directories. This means that we should be backwards compatible
+// with filepath.Match(). We'll end up supporting more stuff, due to
+// the fact that we're using regexp, but that's ok - it does no harm.
+//
+// As per the comment in golangs filepath.Match, on Windows, escaping
+// is disabled. Instead, '\\' is treated as path separator.
+func regexpMatch(pattern, path string) (bool, error) {
+ regStr := "^"
+
+ // Do some syntax checking on the pattern.
+ // filepath's Match() has some really weird rules that are inconsistent
+ // so instead of trying to dup their logic, just call Match() for its
+ // error state and if there is an error in the pattern return it.
+ // If this becomes an issue we can remove this since its really only
+ // needed in the error (syntax) case - which isn't really critical.
+ if _, err := filepath.Match(pattern, path); err != nil {
+ return false, err
+ }
+
+ // Go through the pattern and convert it to a regexp.
+ // We use a scanner so we can support utf-8 chars.
+ var scan scanner.Scanner
+ scan.Init(strings.NewReader(pattern))
+
+ sl := string(os.PathSeparator)
+ escSL := sl
+ if sl == `\` {
+ escSL += `\`
+ }
+
+ for scan.Peek() != scanner.EOF {
+ ch := scan.Next()
+
+ if ch == '*' {
+ if scan.Peek() == '*' {
+ // is some flavor of "**"
+ scan.Next()
+
+ if scan.Peek() == scanner.EOF {
+ // is "**EOF" - to align with .gitignore just accept all
+ regStr += ".*"
+ } else {
+ // is "**"
+ regStr += "((.*" + escSL + ")|([^" + escSL + "]*))"
+ }
+
+ // Treat **/ as ** so eat the "/"
+ if string(scan.Peek()) == sl {
+ scan.Next()
+ }
+ } else {
+ // is "*" so map it to anything but "/"
+ regStr += "[^" + escSL + "]*"
+ }
+ } else if ch == '?' {
+ // "?" is any char except "/"
+ regStr += "[^" + escSL + "]"
+ } else if strings.Index(".$", string(ch)) != -1 {
+ // Escape some regexp special chars that have no meaning
+ // in golang's filepath.Match
+ regStr += `\` + string(ch)
+ } else if ch == '\\' {
+ // escape next char. Note that a trailing \ in the pattern
+ // will be left alone (but need to escape it)
+ if sl == `\` {
+ // On windows map "\" to "\\", meaning an escaped backslash,
+ // and then just continue because filepath.Match on
+ // Windows doesn't allow escaping at all
+ regStr += escSL
+ continue
+ }
+ if scan.Peek() != scanner.EOF {
+ regStr += `\` + string(scan.Next())
+ } else {
+ regStr += `\`
+ }
+ } else {
+ regStr += string(ch)
+ }
+ }
+
+ regStr += "$"
+
+ res, err := regexp.MatchString(regStr, path)
+
+ // Map regexp's error to filepath's so no one knows we're not using filepath
+ if err != nil {
+ err = filepath.ErrBadPattern
+ }
+
+ return res, err
+}
+
+// CopyFile copies from src to dst until either EOF is reached
+// on src or an error occurs. It verifies src exists and removes
+// the dst if it exists.
+func CopyFile(src, dst string) (int64, error) {
+ cleanSrc := filepath.Clean(src)
+ cleanDst := filepath.Clean(dst)
+ if cleanSrc == cleanDst {
+ return 0, nil
+ }
+ sf, err := os.Open(cleanSrc)
+ if err != nil {
+ return 0, err
+ }
+ defer sf.Close()
+ if err := os.Remove(cleanDst); err != nil && !os.IsNotExist(err) {
+ return 0, err
+ }
+ df, err := os.Create(cleanDst)
+ if err != nil {
+ return 0, err
+ }
+ defer df.Close()
+ return io.Copy(df, sf)
+}
+
+// ReadSymlinkedDirectory returns the target directory of a symlink.
+// The target of the symbolic link may not be a file.
+func ReadSymlinkedDirectory(path string) (string, error) {
+ var realPath string
+ var err error
+ if realPath, err = filepath.Abs(path); err != nil {
+ return "", fmt.Errorf("unable to get absolute path for %s: %s", path, err)
+ }
+ if realPath, err = filepath.EvalSymlinks(realPath); err != nil {
+ return "", fmt.Errorf("failed to canonicalise path for %s: %s", path, err)
+ }
+ realPathInfo, err := os.Stat(realPath)
+ if err != nil {
+ return "", fmt.Errorf("failed to stat target '%s' of '%s': %s", realPath, path, err)
+ }
+ if !realPathInfo.Mode().IsDir() {
+ return "", fmt.Errorf("canonical path points to a file '%s'", realPath)
+ }
+ return realPath, nil
+}
+
+// CreateIfNotExists creates a file or a directory only if it does not already exist.
+func CreateIfNotExists(path string, isDir bool) error {
+ if _, err := os.Stat(path); err != nil {
+ if os.IsNotExist(err) {
+ if isDir {
+ return os.MkdirAll(path, 0755)
+ }
+ if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil {
+ return err
+ }
+ f, err := os.OpenFile(path, os.O_CREATE, 0755)
+ if err != nil {
+ return err
+ }
+ f.Close()
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/fileutils/fileutils_darwin.go b/vendor/github.com/docker/docker/pkg/fileutils/fileutils_darwin.go
new file mode 100644
index 0000000000..ccd648fac3
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/fileutils/fileutils_darwin.go
@@ -0,0 +1,27 @@
+package fileutils
+
+import (
+ "os"
+ "os/exec"
+ "strconv"
+ "strings"
+)
+
+// GetTotalUsedFds returns the number of used File Descriptors by
+// executing `lsof -p PID`
+func GetTotalUsedFds() int {
+ pid := os.Getpid()
+
+ cmd := exec.Command("lsof", "-p", strconv.Itoa(pid))
+
+ output, err := cmd.CombinedOutput()
+ if err != nil {
+ return -1
+ }
+
+ outputStr := strings.TrimSpace(string(output))
+
+ fds := strings.Split(outputStr, "\n")
+
+ return len(fds) - 1
+}
diff --git a/vendor/github.com/docker/docker/pkg/fileutils/fileutils_solaris.go b/vendor/github.com/docker/docker/pkg/fileutils/fileutils_solaris.go
new file mode 100644
index 0000000000..0f2cb7ab93
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/fileutils/fileutils_solaris.go
@@ -0,0 +1,7 @@
+package fileutils
+
+// GetTotalUsedFds Returns the number of used File Descriptors.
+// On Solaris these limits are per process and not systemwide
+func GetTotalUsedFds() int {
+ return -1
+}
diff --git a/vendor/github.com/docker/docker/pkg/fileutils/fileutils_unix.go b/vendor/github.com/docker/docker/pkg/fileutils/fileutils_unix.go
new file mode 100644
index 0000000000..d5c3abf568
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/fileutils/fileutils_unix.go
@@ -0,0 +1,22 @@
+// +build linux freebsd
+
+package fileutils
+
+import (
+ "fmt"
+ "io/ioutil"
+ "os"
+
+ "github.com/Sirupsen/logrus"
+)
+
+// GetTotalUsedFds Returns the number of used File Descriptors by
+// reading it via /proc filesystem.
+func GetTotalUsedFds() int {
+ if fds, err := ioutil.ReadDir(fmt.Sprintf("/proc/%d/fd", os.Getpid())); err != nil {
+ logrus.Errorf("Error opening /proc/%d/fd: %s", os.Getpid(), err)
+ } else {
+ return len(fds)
+ }
+ return -1
+}
diff --git a/vendor/github.com/docker/docker/pkg/fileutils/fileutils_windows.go b/vendor/github.com/docker/docker/pkg/fileutils/fileutils_windows.go
new file mode 100644
index 0000000000..5ec21cace5
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/fileutils/fileutils_windows.go
@@ -0,0 +1,7 @@
+package fileutils
+
+// GetTotalUsedFds Returns the number of used File Descriptors. Not supported
+// on Windows.
+func GetTotalUsedFds() int {
+ return -1
+}
diff --git a/vendor/github.com/docker/docker/pkg/homedir/homedir.go b/vendor/github.com/docker/docker/pkg/homedir/homedir.go
new file mode 100644
index 0000000000..8154e83f0c
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/homedir/homedir.go
@@ -0,0 +1,39 @@
+package homedir
+
+import (
+ "os"
+ "runtime"
+
+ "github.com/opencontainers/runc/libcontainer/user"
+)
+
+// Key returns the env var name for the user's home dir based on
+// the platform being run on
+func Key() string {
+ if runtime.GOOS == "windows" {
+ return "USERPROFILE"
+ }
+ return "HOME"
+}
+
+// Get returns the home directory of the current user with the help of
+// environment variables depending on the target operating system.
+// Returned path should be used with "path/filepath" to form new paths.
+func Get() string {
+ home := os.Getenv(Key())
+ if home == "" && runtime.GOOS != "windows" {
+ if u, err := user.CurrentUser(); err == nil {
+ return u.Home
+ }
+ }
+ return home
+}
+
+// GetShortcutString returns the string that is shortcut to user's home directory
+// in the native shell of the platform running on.
+func GetShortcutString() string {
+ if runtime.GOOS == "windows" {
+ return "%USERPROFILE%" // be careful while using in format functions
+ }
+ return "~"
+}
diff --git a/vendor/github.com/docker/docker/pkg/idtools/idtools.go b/vendor/github.com/docker/docker/pkg/idtools/idtools.go
new file mode 100644
index 0000000000..6bca466286
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/idtools/idtools.go
@@ -0,0 +1,197 @@
+package idtools
+
+import (
+ "bufio"
+ "fmt"
+ "os"
+ "sort"
+ "strconv"
+ "strings"
+)
+
+// IDMap contains a single entry for user namespace range remapping. An array
+// of IDMap entries represents the structure that will be provided to the Linux
+// kernel for creating a user namespace.
+type IDMap struct {
+ ContainerID int `json:"container_id"`
+ HostID int `json:"host_id"`
+ Size int `json:"size"`
+}
+
+type subIDRange struct {
+ Start int
+ Length int
+}
+
+type ranges []subIDRange
+
+func (e ranges) Len() int { return len(e) }
+func (e ranges) Swap(i, j int) { e[i], e[j] = e[j], e[i] }
+func (e ranges) Less(i, j int) bool { return e[i].Start < e[j].Start }
+
+const (
+ subuidFileName string = "/etc/subuid"
+ subgidFileName string = "/etc/subgid"
+)
+
+// MkdirAllAs creates a directory (include any along the path) and then modifies
+// ownership to the requested uid/gid. If the directory already exists, this
+// function will still change ownership to the requested uid/gid pair.
+func MkdirAllAs(path string, mode os.FileMode, ownerUID, ownerGID int) error {
+ return mkdirAs(path, mode, ownerUID, ownerGID, true, true)
+}
+
+// MkdirAllNewAs creates a directory (include any along the path) and then modifies
+// ownership ONLY of newly created directories to the requested uid/gid. If the
+// directories along the path exist, no change of ownership will be performed
+func MkdirAllNewAs(path string, mode os.FileMode, ownerUID, ownerGID int) error {
+ return mkdirAs(path, mode, ownerUID, ownerGID, true, false)
+}
+
+// MkdirAs creates a directory and then modifies ownership to the requested uid/gid.
+// If the directory already exists, this function still changes ownership
+func MkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int) error {
+ return mkdirAs(path, mode, ownerUID, ownerGID, false, true)
+}
+
+// GetRootUIDGID retrieves the remapped root uid/gid pair from the set of maps.
+// If the maps are empty, then the root uid/gid will default to "real" 0/0
+func GetRootUIDGID(uidMap, gidMap []IDMap) (int, int, error) {
+ var uid, gid int
+
+ if uidMap != nil {
+ xUID, err := ToHost(0, uidMap)
+ if err != nil {
+ return -1, -1, err
+ }
+ uid = xUID
+ }
+ if gidMap != nil {
+ xGID, err := ToHost(0, gidMap)
+ if err != nil {
+ return -1, -1, err
+ }
+ gid = xGID
+ }
+ return uid, gid, nil
+}
+
+// ToContainer takes an id mapping, and uses it to translate a
+// host ID to the remapped ID. If no map is provided, then the translation
+// assumes a 1-to-1 mapping and returns the passed in id
+func ToContainer(hostID int, idMap []IDMap) (int, error) {
+ if idMap == nil {
+ return hostID, nil
+ }
+ for _, m := range idMap {
+ if (hostID >= m.HostID) && (hostID <= (m.HostID + m.Size - 1)) {
+ contID := m.ContainerID + (hostID - m.HostID)
+ return contID, nil
+ }
+ }
+ return -1, fmt.Errorf("Host ID %d cannot be mapped to a container ID", hostID)
+}
+
+// ToHost takes an id mapping and a remapped ID, and translates the
+// ID to the mapped host ID. If no map is provided, then the translation
+// assumes a 1-to-1 mapping and returns the passed in id #
+func ToHost(contID int, idMap []IDMap) (int, error) {
+ if idMap == nil {
+ return contID, nil
+ }
+ for _, m := range idMap {
+ if (contID >= m.ContainerID) && (contID <= (m.ContainerID + m.Size - 1)) {
+ hostID := m.HostID + (contID - m.ContainerID)
+ return hostID, nil
+ }
+ }
+ return -1, fmt.Errorf("Container ID %d cannot be mapped to a host ID", contID)
+}
+
+// CreateIDMappings takes a requested user and group name and
+// using the data from /etc/sub{uid,gid} ranges, creates the
+// proper uid and gid remapping ranges for that user/group pair
+func CreateIDMappings(username, groupname string) ([]IDMap, []IDMap, error) {
+ subuidRanges, err := parseSubuid(username)
+ if err != nil {
+ return nil, nil, err
+ }
+ subgidRanges, err := parseSubgid(groupname)
+ if err != nil {
+ return nil, nil, err
+ }
+ if len(subuidRanges) == 0 {
+ return nil, nil, fmt.Errorf("No subuid ranges found for user %q", username)
+ }
+ if len(subgidRanges) == 0 {
+ return nil, nil, fmt.Errorf("No subgid ranges found for group %q", groupname)
+ }
+
+ return createIDMap(subuidRanges), createIDMap(subgidRanges), nil
+}
+
+func createIDMap(subidRanges ranges) []IDMap {
+ idMap := []IDMap{}
+
+ // sort the ranges by lowest ID first
+ sort.Sort(subidRanges)
+ containerID := 0
+ for _, idrange := range subidRanges {
+ idMap = append(idMap, IDMap{
+ ContainerID: containerID,
+ HostID: idrange.Start,
+ Size: idrange.Length,
+ })
+ containerID = containerID + idrange.Length
+ }
+ return idMap
+}
+
+func parseSubuid(username string) (ranges, error) {
+ return parseSubidFile(subuidFileName, username)
+}
+
+func parseSubgid(username string) (ranges, error) {
+ return parseSubidFile(subgidFileName, username)
+}
+
+// parseSubidFile will read the appropriate file (/etc/subuid or /etc/subgid)
+// and return all found ranges for a specified username. If the special value
+// "ALL" is supplied for username, then all ranges in the file will be returned
+func parseSubidFile(path, username string) (ranges, error) {
+ var rangeList ranges
+
+ subidFile, err := os.Open(path)
+ if err != nil {
+ return rangeList, err
+ }
+ defer subidFile.Close()
+
+ s := bufio.NewScanner(subidFile)
+ for s.Scan() {
+ if err := s.Err(); err != nil {
+ return rangeList, err
+ }
+
+ text := strings.TrimSpace(s.Text())
+ if text == "" || strings.HasPrefix(text, "#") {
+ continue
+ }
+ parts := strings.Split(text, ":")
+ if len(parts) != 3 {
+ return rangeList, fmt.Errorf("Cannot parse subuid/gid information: Format not correct for %s file", path)
+ }
+ if parts[0] == username || username == "ALL" {
+ startid, err := strconv.Atoi(parts[1])
+ if err != nil {
+ return rangeList, fmt.Errorf("String to int conversion failed during subuid/gid parsing of %s: %v", path, err)
+ }
+ length, err := strconv.Atoi(parts[2])
+ if err != nil {
+ return rangeList, fmt.Errorf("String to int conversion failed during subuid/gid parsing of %s: %v", path, err)
+ }
+ rangeList = append(rangeList, subIDRange{startid, length})
+ }
+ }
+ return rangeList, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go b/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go
new file mode 100644
index 0000000000..b57d6ef125
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go
@@ -0,0 +1,60 @@
+// +build !windows
+
+package idtools
+
+import (
+ "os"
+ "path/filepath"
+
+ "github.com/docker/docker/pkg/system"
+)
+
+func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chownExisting bool) error {
+ // make an array containing the original path asked for, plus (for mkAll == true)
+ // all path components leading up to the complete path that don't exist before we MkdirAll
+ // so that we can chown all of them properly at the end. If chownExisting is false, we won't
+ // chown the full directory path if it exists
+ var paths []string
+ if _, err := os.Stat(path); err != nil && os.IsNotExist(err) {
+ paths = []string{path}
+ } else if err == nil && chownExisting {
+ if err := os.Chown(path, ownerUID, ownerGID); err != nil {
+ return err
+ }
+ // short-circuit--we were called with an existing directory and chown was requested
+ return nil
+ } else if err == nil {
+ // nothing to do; directory path fully exists already and chown was NOT requested
+ return nil
+ }
+
+ if mkAll {
+ // walk back to "/" looking for directories which do not exist
+ // and add them to the paths array for chown after creation
+ dirPath := path
+ for {
+ dirPath = filepath.Dir(dirPath)
+ if dirPath == "/" {
+ break
+ }
+ if _, err := os.Stat(dirPath); err != nil && os.IsNotExist(err) {
+ paths = append(paths, dirPath)
+ }
+ }
+ if err := system.MkdirAll(path, mode); err != nil && !os.IsExist(err) {
+ return err
+ }
+ } else {
+ if err := os.Mkdir(path, mode); err != nil && !os.IsExist(err) {
+ return err
+ }
+ }
+ // even if it existed, we will chown the requested path + any subpaths that
+ // didn't exist when we called MkdirAll
+ for _, pathComponent := range paths {
+ if err := os.Chown(pathComponent, ownerUID, ownerGID); err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go b/vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go
new file mode 100644
index 0000000000..c9e3c937cd
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go
@@ -0,0 +1,18 @@
+// +build windows
+
+package idtools
+
+import (
+ "os"
+
+ "github.com/docker/docker/pkg/system"
+)
+
+// Platforms such as Windows do not support the UID/GID concept. So make this
+// just a wrapper around system.MkdirAll.
+func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chownExisting bool) error {
+ if err := system.MkdirAll(path, mode); err != nil && !os.IsExist(err) {
+ return err
+ }
+ return nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go b/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go
new file mode 100644
index 0000000000..4a4aaed04d
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go
@@ -0,0 +1,188 @@
+package idtools
+
+import (
+ "fmt"
+ "os/exec"
+ "path/filepath"
+ "regexp"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+)
+
+// add a user and/or group to Linux /etc/passwd, /etc/group using standard
+// Linux distribution commands:
+// adduser --system --shell /bin/false --disabled-login --disabled-password --no-create-home --group
+// useradd -r -s /bin/false
+
+var (
+ once sync.Once
+ userCommand string
+
+ cmdTemplates = map[string]string{
+ "adduser": "--system --shell /bin/false --no-create-home --disabled-login --disabled-password --group %s",
+ "useradd": "-r -s /bin/false %s",
+ "usermod": "-%s %d-%d %s",
+ }
+
+ idOutRegexp = regexp.MustCompile(`uid=([0-9]+).*gid=([0-9]+)`)
+ // default length for a UID/GID subordinate range
+ defaultRangeLen = 65536
+ defaultRangeStart = 100000
+ userMod = "usermod"
+)
+
+func resolveBinary(binname string) (string, error) {
+ binaryPath, err := exec.LookPath(binname)
+ if err != nil {
+ return "", err
+ }
+ resolvedPath, err := filepath.EvalSymlinks(binaryPath)
+ if err != nil {
+ return "", err
+ }
+ //only return no error if the final resolved binary basename
+ //matches what was searched for
+ if filepath.Base(resolvedPath) == binname {
+ return resolvedPath, nil
+ }
+ return "", fmt.Errorf("Binary %q does not resolve to a binary of that name in $PATH (%q)", binname, resolvedPath)
+}
+
+// AddNamespaceRangesUser takes a username and uses the standard system
+// utility to create a system user/group pair used to hold the
+// /etc/sub{uid,gid} ranges which will be used for user namespace
+// mapping ranges in containers.
+func AddNamespaceRangesUser(name string) (int, int, error) {
+ if err := addUser(name); err != nil {
+ return -1, -1, fmt.Errorf("Error adding user %q: %v", name, err)
+ }
+
+ // Query the system for the created uid and gid pair
+ out, err := execCmd("id", name)
+ if err != nil {
+ return -1, -1, fmt.Errorf("Error trying to find uid/gid for new user %q: %v", name, err)
+ }
+ matches := idOutRegexp.FindStringSubmatch(strings.TrimSpace(string(out)))
+ if len(matches) != 3 {
+ return -1, -1, fmt.Errorf("Can't find uid, gid from `id` output: %q", string(out))
+ }
+ uid, err := strconv.Atoi(matches[1])
+ if err != nil {
+ return -1, -1, fmt.Errorf("Can't convert found uid (%s) to int: %v", matches[1], err)
+ }
+ gid, err := strconv.Atoi(matches[2])
+ if err != nil {
+ return -1, -1, fmt.Errorf("Can't convert found gid (%s) to int: %v", matches[2], err)
+ }
+
+ // Now we need to create the subuid/subgid ranges for our new user/group (system users
+ // do not get auto-created ranges in subuid/subgid)
+
+ if err := createSubordinateRanges(name); err != nil {
+ return -1, -1, fmt.Errorf("Couldn't create subordinate ID ranges: %v", err)
+ }
+ return uid, gid, nil
+}
+
+func addUser(userName string) error {
+ once.Do(func() {
+ // set up which commands are used for adding users/groups dependent on distro
+ if _, err := resolveBinary("adduser"); err == nil {
+ userCommand = "adduser"
+ } else if _, err := resolveBinary("useradd"); err == nil {
+ userCommand = "useradd"
+ }
+ })
+ if userCommand == "" {
+ return fmt.Errorf("Cannot add user; no useradd/adduser binary found")
+ }
+ args := fmt.Sprintf(cmdTemplates[userCommand], userName)
+ out, err := execCmd(userCommand, args)
+ if err != nil {
+ return fmt.Errorf("Failed to add user with error: %v; output: %q", err, string(out))
+ }
+ return nil
+}
+
+func createSubordinateRanges(name string) error {
+
+ // first, we should verify that ranges weren't automatically created
+ // by the distro tooling
+ ranges, err := parseSubuid(name)
+ if err != nil {
+ return fmt.Errorf("Error while looking for subuid ranges for user %q: %v", name, err)
+ }
+ if len(ranges) == 0 {
+ // no UID ranges; let's create one
+ startID, err := findNextUIDRange()
+ if err != nil {
+ return fmt.Errorf("Can't find available subuid range: %v", err)
+ }
+ out, err := execCmd(userMod, fmt.Sprintf(cmdTemplates[userMod], "v", startID, startID+defaultRangeLen-1, name))
+ if err != nil {
+ return fmt.Errorf("Unable to add subuid range to user: %q; output: %s, err: %v", name, out, err)
+ }
+ }
+
+ ranges, err = parseSubgid(name)
+ if err != nil {
+ return fmt.Errorf("Error while looking for subgid ranges for user %q: %v", name, err)
+ }
+ if len(ranges) == 0 {
+ // no GID ranges; let's create one
+ startID, err := findNextGIDRange()
+ if err != nil {
+ return fmt.Errorf("Can't find available subgid range: %v", err)
+ }
+ out, err := execCmd(userMod, fmt.Sprintf(cmdTemplates[userMod], "w", startID, startID+defaultRangeLen-1, name))
+ if err != nil {
+ return fmt.Errorf("Unable to add subgid range to user: %q; output: %s, err: %v", name, out, err)
+ }
+ }
+ return nil
+}
+
+func findNextUIDRange() (int, error) {
+ ranges, err := parseSubuid("ALL")
+ if err != nil {
+ return -1, fmt.Errorf("Couldn't parse all ranges in /etc/subuid file: %v", err)
+ }
+ sort.Sort(ranges)
+ return findNextRangeStart(ranges)
+}
+
+func findNextGIDRange() (int, error) {
+ ranges, err := parseSubgid("ALL")
+ if err != nil {
+ return -1, fmt.Errorf("Couldn't parse all ranges in /etc/subgid file: %v", err)
+ }
+ sort.Sort(ranges)
+ return findNextRangeStart(ranges)
+}
+
+func findNextRangeStart(rangeList ranges) (int, error) {
+ startID := defaultRangeStart
+ for _, arange := range rangeList {
+ if wouldOverlap(arange, startID) {
+ startID = arange.Start + arange.Length
+ }
+ }
+ return startID, nil
+}
+
+func wouldOverlap(arange subIDRange, ID int) bool {
+ low := ID
+ high := ID + defaultRangeLen
+ if (low >= arange.Start && low <= arange.Start+arange.Length) ||
+ (high <= arange.Start+arange.Length && high >= arange.Start) {
+ return true
+ }
+ return false
+}
+
+func execCmd(cmd, args string) ([]byte, error) {
+ execCmd := exec.Command(cmd, strings.Split(args, " ")...)
+ return execCmd.CombinedOutput()
+}
diff --git a/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_unsupported.go b/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_unsupported.go
new file mode 100644
index 0000000000..d98b354cbd
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_unsupported.go
@@ -0,0 +1,12 @@
+// +build !linux
+
+package idtools
+
+import "fmt"
+
+// AddNamespaceRangesUser takes a name and finds an unused uid, gid pair
+// and calls the appropriate helper function to add the group and then
+// the user to the group in /etc/group and /etc/passwd respectively.
+func AddNamespaceRangesUser(name string) (int, int, error) {
+ return -1, -1, fmt.Errorf("No support for adding users or groups on this OS")
+}
diff --git a/vendor/github.com/docker/docker/pkg/ioutils/buffer.go b/vendor/github.com/docker/docker/pkg/ioutils/buffer.go
new file mode 100644
index 0000000000..3d737b3e19
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/ioutils/buffer.go
@@ -0,0 +1,51 @@
+package ioutils
+
+import (
+ "errors"
+ "io"
+)
+
+var errBufferFull = errors.New("buffer is full")
+
+type fixedBuffer struct {
+ buf []byte
+ pos int
+ lastRead int
+}
+
+func (b *fixedBuffer) Write(p []byte) (int, error) {
+ n := copy(b.buf[b.pos:cap(b.buf)], p)
+ b.pos += n
+
+ if n < len(p) {
+ if b.pos == cap(b.buf) {
+ return n, errBufferFull
+ }
+ return n, io.ErrShortWrite
+ }
+ return n, nil
+}
+
+func (b *fixedBuffer) Read(p []byte) (int, error) {
+ n := copy(p, b.buf[b.lastRead:b.pos])
+ b.lastRead += n
+ return n, nil
+}
+
+func (b *fixedBuffer) Len() int {
+ return b.pos - b.lastRead
+}
+
+func (b *fixedBuffer) Cap() int {
+ return cap(b.buf)
+}
+
+func (b *fixedBuffer) Reset() {
+ b.pos = 0
+ b.lastRead = 0
+ b.buf = b.buf[:0]
+}
+
+func (b *fixedBuffer) String() string {
+ return string(b.buf[b.lastRead:b.pos])
+}
diff --git a/vendor/github.com/docker/docker/pkg/ioutils/bytespipe.go b/vendor/github.com/docker/docker/pkg/ioutils/bytespipe.go
new file mode 100644
index 0000000000..72a04f3491
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/ioutils/bytespipe.go
@@ -0,0 +1,186 @@
+package ioutils
+
+import (
+ "errors"
+ "io"
+ "sync"
+)
+
+// maxCap is the highest capacity to use in byte slices that buffer data.
+const maxCap = 1e6
+
+// minCap is the lowest capacity to use in byte slices that buffer data
+const minCap = 64
+
+// blockThreshold is the minimum number of bytes in the buffer which will cause
+// a write to BytesPipe to block when allocating a new slice.
+const blockThreshold = 1e6
+
+var (
+ // ErrClosed is returned when Write is called on a closed BytesPipe.
+ ErrClosed = errors.New("write to closed BytesPipe")
+
+ bufPools = make(map[int]*sync.Pool)
+ bufPoolsLock sync.Mutex
+)
+
+// BytesPipe is io.ReadWriteCloser which works similarly to pipe(queue).
+// All written data may be read at most once. Also, BytesPipe allocates
+// and releases new byte slices to adjust to current needs, so the buffer
+// won't be overgrown after peak loads.
+type BytesPipe struct {
+ mu sync.Mutex
+ wait *sync.Cond
+ buf []*fixedBuffer
+ bufLen int
+ closeErr error // error to return from next Read. set to nil if not closed.
+}
+
+// NewBytesPipe creates new BytesPipe, initialized by specified slice.
+// If buf is nil, then it will be initialized with slice which cap is 64.
+// buf will be adjusted in a way that len(buf) == 0, cap(buf) == cap(buf).
+func NewBytesPipe() *BytesPipe {
+ bp := &BytesPipe{}
+ bp.buf = append(bp.buf, getBuffer(minCap))
+ bp.wait = sync.NewCond(&bp.mu)
+ return bp
+}
+
+// Write writes p to BytesPipe.
+// It can allocate new []byte slices in a process of writing.
+func (bp *BytesPipe) Write(p []byte) (int, error) {
+ bp.mu.Lock()
+
+ written := 0
+loop0:
+ for {
+ if bp.closeErr != nil {
+ bp.mu.Unlock()
+ return written, ErrClosed
+ }
+
+ if len(bp.buf) == 0 {
+ bp.buf = append(bp.buf, getBuffer(64))
+ }
+ // get the last buffer
+ b := bp.buf[len(bp.buf)-1]
+
+ n, err := b.Write(p)
+ written += n
+ bp.bufLen += n
+
+ // errBufferFull is an error we expect to get if the buffer is full
+ if err != nil && err != errBufferFull {
+ bp.wait.Broadcast()
+ bp.mu.Unlock()
+ return written, err
+ }
+
+ // if there was enough room to write all then break
+ if len(p) == n {
+ break
+ }
+
+ // more data: write to the next slice
+ p = p[n:]
+
+ // make sure the buffer doesn't grow too big from this write
+ for bp.bufLen >= blockThreshold {
+ bp.wait.Wait()
+ if bp.closeErr != nil {
+ continue loop0
+ }
+ }
+
+ // add new byte slice to the buffers slice and continue writing
+ nextCap := b.Cap() * 2
+ if nextCap > maxCap {
+ nextCap = maxCap
+ }
+ bp.buf = append(bp.buf, getBuffer(nextCap))
+ }
+ bp.wait.Broadcast()
+ bp.mu.Unlock()
+ return written, nil
+}
+
+// CloseWithError causes further reads from a BytesPipe to return immediately.
+func (bp *BytesPipe) CloseWithError(err error) error {
+ bp.mu.Lock()
+ if err != nil {
+ bp.closeErr = err
+ } else {
+ bp.closeErr = io.EOF
+ }
+ bp.wait.Broadcast()
+ bp.mu.Unlock()
+ return nil
+}
+
+// Close causes further reads from a BytesPipe to return immediately.
+func (bp *BytesPipe) Close() error {
+ return bp.CloseWithError(nil)
+}
+
+// Read reads bytes from BytesPipe.
+// Data could be read only once.
+func (bp *BytesPipe) Read(p []byte) (n int, err error) {
+ bp.mu.Lock()
+ if bp.bufLen == 0 {
+ if bp.closeErr != nil {
+ bp.mu.Unlock()
+ return 0, bp.closeErr
+ }
+ bp.wait.Wait()
+ if bp.bufLen == 0 && bp.closeErr != nil {
+ err := bp.closeErr
+ bp.mu.Unlock()
+ return 0, err
+ }
+ }
+
+ for bp.bufLen > 0 {
+ b := bp.buf[0]
+ read, _ := b.Read(p) // ignore error since fixedBuffer doesn't really return an error
+ n += read
+ bp.bufLen -= read
+
+ if b.Len() == 0 {
+ // it's empty so return it to the pool and move to the next one
+ returnBuffer(b)
+ bp.buf[0] = nil
+ bp.buf = bp.buf[1:]
+ }
+
+ if len(p) == read {
+ break
+ }
+
+ p = p[read:]
+ }
+
+ bp.wait.Broadcast()
+ bp.mu.Unlock()
+ return
+}
+
+func returnBuffer(b *fixedBuffer) {
+ b.Reset()
+ bufPoolsLock.Lock()
+ pool := bufPools[b.Cap()]
+ bufPoolsLock.Unlock()
+ if pool != nil {
+ pool.Put(b)
+ }
+}
+
+func getBuffer(size int) *fixedBuffer {
+ bufPoolsLock.Lock()
+ pool, ok := bufPools[size]
+ if !ok {
+ pool = &sync.Pool{New: func() interface{} { return &fixedBuffer{buf: make([]byte, 0, size)} }}
+ bufPools[size] = pool
+ }
+ bufPoolsLock.Unlock()
+ return pool.Get().(*fixedBuffer)
+}
diff --git a/vendor/github.com/docker/docker/pkg/ioutils/fmt.go b/vendor/github.com/docker/docker/pkg/ioutils/fmt.go
new file mode 100644
index 0000000000..0b04b0ba3e
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/ioutils/fmt.go
@@ -0,0 +1,22 @@
+package ioutils
+
+import (
+ "fmt"
+ "io"
+)
+
+// FprintfIfNotEmpty prints the string value if it's not empty
+func FprintfIfNotEmpty(w io.Writer, format, value string) (int, error) {
+ if value != "" {
+ return fmt.Fprintf(w, format, value)
+ }
+ return 0, nil
+}
+
+// FprintfIfTrue prints the boolean value if it's true
+func FprintfIfTrue(w io.Writer, format string, ok bool) (int, error) {
+ if ok {
+ return fmt.Fprintf(w, format, ok)
+ }
+ return 0, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/ioutils/fswriters.go b/vendor/github.com/docker/docker/pkg/ioutils/fswriters.go
new file mode 100644
index 0000000000..6dc50a03dc
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/ioutils/fswriters.go
@@ -0,0 +1,82 @@
+package ioutils
+
+import (
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+)
+
+// NewAtomicFileWriter returns WriteCloser so that writing to it writes to a
+// temporary file and closing it atomically changes the temporary file to
+// destination path. Writing and closing concurrently is not allowed.
+func NewAtomicFileWriter(filename string, perm os.FileMode) (io.WriteCloser, error) {
+ f, err := ioutil.TempFile(filepath.Dir(filename), ".tmp-"+filepath.Base(filename))
+ if err != nil {
+ return nil, err
+ }
+
+ abspath, err := filepath.Abs(filename)
+ if err != nil {
+ return nil, err
+ }
+ return &atomicFileWriter{
+ f: f,
+ fn: abspath,
+ perm: perm,
+ }, nil
+}
+
+// AtomicWriteFile atomically writes data to a file named by filename.
+func AtomicWriteFile(filename string, data []byte, perm os.FileMode) error {
+ f, err := NewAtomicFileWriter(filename, perm)
+ if err != nil {
+ return err
+ }
+ n, err := f.Write(data)
+ if err == nil && n < len(data) {
+ err = io.ErrShortWrite
+ f.(*atomicFileWriter).writeErr = err
+ }
+ if err1 := f.Close(); err == nil {
+ err = err1
+ }
+ return err
+}
+
+type atomicFileWriter struct {
+ f *os.File
+ fn string
+ writeErr error
+ perm os.FileMode
+}
+
+func (w *atomicFileWriter) Write(dt []byte) (int, error) {
+ n, err := w.f.Write(dt)
+ if err != nil {
+ w.writeErr = err
+ }
+ return n, err
+}
+
+func (w *atomicFileWriter) Close() (retErr error) {
+ defer func() {
+ if retErr != nil || w.writeErr != nil {
+ os.Remove(w.f.Name())
+ }
+ }()
+ if err := w.f.Sync(); err != nil {
+ w.f.Close()
+ return err
+ }
+ if err := w.f.Close(); err != nil {
+ return err
+ }
+ if err := os.Chmod(w.f.Name(), w.perm); err != nil {
+ return err
+ }
+ if w.writeErr == nil {
+ return os.Rename(w.f.Name(), w.fn)
+ }
+ return nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/ioutils/multireader.go b/vendor/github.com/docker/docker/pkg/ioutils/multireader.go
new file mode 100644
index 0000000000..0d2d76b479
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/ioutils/multireader.go
@@ -0,0 +1,226 @@
+package ioutils
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "os"
+)
+
+type pos struct {
+ idx int
+ offset int64
+}
+
+type multiReadSeeker struct {
+ readers []io.ReadSeeker
+ pos *pos
+ posIdx map[io.ReadSeeker]int
+}
+
+func (r *multiReadSeeker) Seek(offset int64, whence int) (int64, error) {
+ var tmpOffset int64
+ switch whence {
+ case os.SEEK_SET:
+ for i, rdr := range r.readers {
+ // get size of the current reader
+ s, err := rdr.Seek(0, os.SEEK_END)
+ if err != nil {
+ return -1, err
+ }
+
+ if offset > tmpOffset+s {
+ if i == len(r.readers)-1 {
+ rdrOffset := s + (offset - tmpOffset)
+ if _, err := rdr.Seek(rdrOffset, os.SEEK_SET); err != nil {
+ return -1, err
+ }
+ r.pos = &pos{i, rdrOffset}
+ return offset, nil
+ }
+
+ tmpOffset += s
+ continue
+ }
+
+ rdrOffset := offset - tmpOffset
+ idx := i
+
+ rdr.Seek(rdrOffset, os.SEEK_SET)
+ // make sure all following readers are at 0
+ for _, rdr := range r.readers[i+1:] {
+ rdr.Seek(0, os.SEEK_SET)
+ }
+
+ if rdrOffset == s && i != len(r.readers)-1 {
+ idx++
+ rdrOffset = 0
+ }
+ r.pos = &pos{idx, rdrOffset}
+ return offset, nil
+ }
+ case os.SEEK_END:
+ for _, rdr := range r.readers {
+ s, err := rdr.Seek(0, os.SEEK_END)
+ if err != nil {
+ return -1, err
+ }
+ tmpOffset += s
+ }
+ r.Seek(tmpOffset+offset, os.SEEK_SET)
+ return tmpOffset + offset, nil
+ case os.SEEK_CUR:
+ if r.pos == nil {
+ return r.Seek(offset, os.SEEK_SET)
+ }
+ // Just return the current offset
+ if offset == 0 {
+ return r.getCurOffset()
+ }
+
+ curOffset, err := r.getCurOffset()
+ if err != nil {
+ return -1, err
+ }
+ rdr, rdrOffset, err := r.getReaderForOffset(curOffset + offset)
+ if err != nil {
+ return -1, err
+ }
+
+ r.pos = &pos{r.posIdx[rdr], rdrOffset}
+ return curOffset + offset, nil
+ default:
+ return -1, fmt.Errorf("Invalid whence: %d", whence)
+ }
+
+ return -1, fmt.Errorf("Error seeking for whence: %d, offset: %d", whence, offset)
+}
+
+func (r *multiReadSeeker) getReaderForOffset(offset int64) (io.ReadSeeker, int64, error) {
+ var rdr io.ReadSeeker
+ var rdrOffset int64
+
+ for i, rdr := range r.readers {
+ offsetTo, err := r.getOffsetToReader(rdr)
+ if err != nil {
+ return nil, -1, err
+ }
+ if offsetTo > offset {
+ rdr = r.readers[i-1]
+ rdrOffset = offsetTo - offset
+ break
+ }
+
+ if rdr == r.readers[len(r.readers)-1] {
+ rdrOffset = offsetTo + offset
+ break
+ }
+ }
+
+ return rdr, rdrOffset, nil
+}
+
+func (r *multiReadSeeker) getCurOffset() (int64, error) {
+ var totalSize int64
+ for _, rdr := range r.readers[:r.pos.idx+1] {
+ if r.posIdx[rdr] == r.pos.idx {
+ totalSize += r.pos.offset
+ break
+ }
+
+ size, err := getReadSeekerSize(rdr)
+ if err != nil {
+ return -1, fmt.Errorf("error getting seeker size: %v", err)
+ }
+ totalSize += size
+ }
+ return totalSize, nil
+}
+
+func (r *multiReadSeeker) getOffsetToReader(rdr io.ReadSeeker) (int64, error) {
+ var offset int64
+ for _, r := range r.readers {
+ if r == rdr {
+ break
+ }
+
+ size, err := getReadSeekerSize(rdr)
+ if err != nil {
+ return -1, err
+ }
+ offset += size
+ }
+ return offset, nil
+}
+
+func (r *multiReadSeeker) Read(b []byte) (int, error) {
+ if r.pos == nil {
+ r.pos = &pos{0, 0}
+ }
+
+ bCap := int64(cap(b))
+ buf := bytes.NewBuffer(nil)
+ var rdr io.ReadSeeker
+
+ for _, rdr = range r.readers[r.pos.idx:] {
+ readBytes, err := io.CopyN(buf, rdr, bCap)
+ if err != nil && err != io.EOF {
+ return -1, err
+ }
+ bCap -= readBytes
+
+ if bCap == 0 {
+ break
+ }
+ }
+
+ rdrPos, err := rdr.Seek(0, os.SEEK_CUR)
+ if err != nil {
+ return -1, err
+ }
+ r.pos = &pos{r.posIdx[rdr], rdrPos}
+ return buf.Read(b)
+}
+
+func getReadSeekerSize(rdr io.ReadSeeker) (int64, error) {
+ // save the current position
+ pos, err := rdr.Seek(0, os.SEEK_CUR)
+ if err != nil {
+ return -1, err
+ }
+
+ // get the size
+ size, err := rdr.Seek(0, os.SEEK_END)
+ if err != nil {
+ return -1, err
+ }
+
+ // reset the position
+ if _, err := rdr.Seek(pos, os.SEEK_SET); err != nil {
+ return -1, err
+ }
+ return size, nil
+}
+
+// MultiReadSeeker returns a ReadSeeker that's the logical concatenation of the provided
+// input readseekers. After calling this method the initial position is set to the
+// beginning of the first ReadSeeker. At the end of a ReadSeeker, Read always advances
+// to the beginning of the next ReadSeeker and returns EOF at the end of the last ReadSeeker.
+// Seek can be used over the sum of lengths of all readseekers.
+//
+// When a MultiReadSeeker is used, no Read and Seek operations should be made on
+// its ReadSeeker components. Also, users should make no assumption on the state
+// of individual readseekers while the MultiReadSeeker is used.
+func MultiReadSeeker(readers ...io.ReadSeeker) io.ReadSeeker {
+ if len(readers) == 1 {
+ return readers[0]
+ }
+ idx := make(map[io.ReadSeeker]int)
+ for i, rdr := range readers {
+ idx[rdr] = i
+ }
+ return &multiReadSeeker{
+ readers: readers,
+ posIdx: idx,
+ }
+}
diff --git a/vendor/github.com/docker/docker/pkg/ioutils/readers.go b/vendor/github.com/docker/docker/pkg/ioutils/readers.go
new file mode 100644
index 0000000000..63f3c07f46
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/ioutils/readers.go
@@ -0,0 +1,154 @@
+package ioutils
+
+import (
+ "crypto/sha256"
+ "encoding/hex"
+ "io"
+
+ "golang.org/x/net/context"
+)
+
+type readCloserWrapper struct {
+ io.Reader
+ closer func() error
+}
+
+func (r *readCloserWrapper) Close() error {
+ return r.closer()
+}
+
+// NewReadCloserWrapper returns a new io.ReadCloser.
+func NewReadCloserWrapper(r io.Reader, closer func() error) io.ReadCloser {
+ return &readCloserWrapper{
+ Reader: r,
+ closer: closer,
+ }
+}
+
+type readerErrWrapper struct {
+ reader io.Reader
+ closer func()
+}
+
+func (r *readerErrWrapper) Read(p []byte) (int, error) {
+ n, err := r.reader.Read(p)
+ if err != nil {
+ r.closer()
+ }
+ return n, err
+}
+
+// NewReaderErrWrapper returns a new io.Reader.
+func NewReaderErrWrapper(r io.Reader, closer func()) io.Reader {
+ return &readerErrWrapper{
+ reader: r,
+ closer: closer,
+ }
+}
+
+// HashData returns the sha256 sum of src.
+func HashData(src io.Reader) (string, error) {
+ h := sha256.New()
+ if _, err := io.Copy(h, src); err != nil {
+ return "", err
+ }
+ return "sha256:" + hex.EncodeToString(h.Sum(nil)), nil
+}
+
+// OnEOFReader wraps an io.ReadCloser and a function
+// the function will run at the end of file or close the file.
+type OnEOFReader struct {
+ Rc io.ReadCloser
+ Fn func()
+}
+
+func (r *OnEOFReader) Read(p []byte) (n int, err error) {
+ n, err = r.Rc.Read(p)
+ if err == io.EOF {
+ r.runFunc()
+ }
+ return
+}
+
+// Close closes the file and run the function.
+func (r *OnEOFReader) Close() error {
+ err := r.Rc.Close()
+ r.runFunc()
+ return err
+}
+
+func (r *OnEOFReader) runFunc() {
+ if fn := r.Fn; fn != nil {
+ fn()
+ r.Fn = nil
+ }
+}
+
+// cancelReadCloser wraps an io.ReadCloser with a context for cancelling read
+// operations.
+type cancelReadCloser struct {
+ cancel func()
+ pR *io.PipeReader // Stream to read from
+ pW *io.PipeWriter
+}
+
+// NewCancelReadCloser creates a wrapper that closes the ReadCloser when the
+// context is cancelled. The returned io.ReadCloser must be closed when it is
+// no longer needed.
+func NewCancelReadCloser(ctx context.Context, in io.ReadCloser) io.ReadCloser {
+ pR, pW := io.Pipe()
+
+ // Create a context used to signal when the pipe is closed
+ doneCtx, cancel := context.WithCancel(context.Background())
+
+ p := &cancelReadCloser{
+ cancel: cancel,
+ pR: pR,
+ pW: pW,
+ }
+
+ go func() {
+ _, err := io.Copy(pW, in)
+ select {
+ case <-ctx.Done():
+ // If the context was closed, p.closeWithError
+ // was already called. Calling it again would
+ // change the error that Read returns.
+ default:
+ p.closeWithError(err)
+ }
+ in.Close()
+ }()
+ go func() {
+ for {
+ select {
+ case <-ctx.Done():
+ p.closeWithError(ctx.Err())
+ case <-doneCtx.Done():
+ return
+ }
+ }
+ }()
+
+ return p
+}
+
+// Read wraps the Read method of the pipe that provides data from the wrapped
+// ReadCloser.
+func (p *cancelReadCloser) Read(buf []byte) (n int, err error) {
+ return p.pR.Read(buf)
+}
+
+// closeWithError closes the wrapper and its underlying reader. It will
+// cause future calls to Read to return err.
+func (p *cancelReadCloser) closeWithError(err error) {
+ p.pW.CloseWithError(err)
+ p.cancel()
+}
+
+// Close closes the wrapper its underlying reader. It will cause
+// future calls to Read to return io.EOF.
+func (p *cancelReadCloser) Close() error {
+ p.closeWithError(io.EOF)
+ return nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/ioutils/temp_unix.go b/vendor/github.com/docker/docker/pkg/ioutils/temp_unix.go
new file mode 100644
index 0000000000..1539ad21b5
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/ioutils/temp_unix.go
@@ -0,0 +1,10 @@
+// +build !windows
+
+package ioutils
+
+import "io/ioutil"
+
+// TempDir on Unix systems is equivalent to ioutil.TempDir.
+func TempDir(dir, prefix string) (string, error) {
+ return ioutil.TempDir(dir, prefix)
+}
diff --git a/vendor/github.com/docker/docker/pkg/ioutils/temp_windows.go b/vendor/github.com/docker/docker/pkg/ioutils/temp_windows.go
new file mode 100644
index 0000000000..c258e5fdd8
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/ioutils/temp_windows.go
@@ -0,0 +1,18 @@
+// +build windows
+
+package ioutils
+
+import (
+ "io/ioutil"
+
+ "github.com/docker/docker/pkg/longpath"
+)
+
+// TempDir is the equivalent of ioutil.TempDir, except that the result is in Windows longpath format.
+func TempDir(dir, prefix string) (string, error) {
+ tempDir, err := ioutil.TempDir(dir, prefix)
+ if err != nil {
+ return "", err
+ }
+ return longpath.AddPrefix(tempDir), nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/ioutils/writeflusher.go b/vendor/github.com/docker/docker/pkg/ioutils/writeflusher.go
new file mode 100644
index 0000000000..52a4901ade
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/ioutils/writeflusher.go
@@ -0,0 +1,92 @@
+package ioutils
+
+import (
+ "io"
+ "sync"
+)
+
+// WriteFlusher wraps the Write and Flush operation ensuring that every write
+// is a flush. In addition, the Close method can be called to intercept
+// Read/Write calls if the targets lifecycle has already ended.
+type WriteFlusher struct {
+ w io.Writer
+ flusher flusher
+ flushed chan struct{}
+ flushedOnce sync.Once
+ closed chan struct{}
+ closeLock sync.Mutex
+}
+
+type flusher interface {
+ Flush()
+}
+
+var errWriteFlusherClosed = io.EOF
+
+func (wf *WriteFlusher) Write(b []byte) (n int, err error) {
+ select {
+ case <-wf.closed:
+ return 0, errWriteFlusherClosed
+ default:
+ }
+
+ n, err = wf.w.Write(b)
+ wf.Flush() // every write is a flush.
+ return n, err
+}
+
+// Flush the stream immediately.
+func (wf *WriteFlusher) Flush() {
+ select {
+ case <-wf.closed:
+ return
+ default:
+ }
+
+ wf.flushedOnce.Do(func() {
+ close(wf.flushed)
+ })
+ wf.flusher.Flush()
+}
+
+// Flushed returns the state of flushed.
+// If it's flushed, return true, or else it return false.
+func (wf *WriteFlusher) Flushed() bool {
+ // BUG(stevvooe): Remove this method. Its use is inherently racy. Seems to
+ // be used to detect whether or a response code has been issued or not.
+ // Another hook should be used instead.
+ var flushed bool
+ select {
+ case <-wf.flushed:
+ flushed = true
+ default:
+ }
+ return flushed
+}
+
+// Close closes the write flusher, disallowing any further writes to the
+// target. After the flusher is closed, all calls to write or flush will
+// result in an error.
+func (wf *WriteFlusher) Close() error {
+ wf.closeLock.Lock()
+ defer wf.closeLock.Unlock()
+
+ select {
+ case <-wf.closed:
+ return errWriteFlusherClosed
+ default:
+ close(wf.closed)
+ }
+ return nil
+}
+
+// NewWriteFlusher returns a new WriteFlusher.
+func NewWriteFlusher(w io.Writer) *WriteFlusher {
+ var fl flusher
+ if f, ok := w.(flusher); ok {
+ fl = f
+ } else {
+ fl = &NopFlusher{}
+ }
+ return &WriteFlusher{w: w, flusher: fl, closed: make(chan struct{}), flushed: make(chan struct{})}
+}
diff --git a/vendor/github.com/docker/docker/pkg/ioutils/writers.go b/vendor/github.com/docker/docker/pkg/ioutils/writers.go
new file mode 100644
index 0000000000..ccc7f9c23e
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/ioutils/writers.go
@@ -0,0 +1,66 @@
+package ioutils
+
+import "io"
+
+// NopWriter represents a type which write operation is nop.
+type NopWriter struct{}
+
+func (*NopWriter) Write(buf []byte) (int, error) {
+ return len(buf), nil
+}
+
+type nopWriteCloser struct {
+ io.Writer
+}
+
+func (w *nopWriteCloser) Close() error { return nil }
+
+// NopWriteCloser returns a nopWriteCloser.
+func NopWriteCloser(w io.Writer) io.WriteCloser {
+ return &nopWriteCloser{w}
+}
+
+// NopFlusher represents a type which flush operation is nop.
+type NopFlusher struct{}
+
+// Flush is a nop operation.
+func (f *NopFlusher) Flush() {}
+
+type writeCloserWrapper struct {
+ io.Writer
+ closer func() error
+}
+
+func (r *writeCloserWrapper) Close() error {
+ return r.closer()
+}
+
+// NewWriteCloserWrapper returns a new io.WriteCloser.
+func NewWriteCloserWrapper(r io.Writer, closer func() error) io.WriteCloser {
+ return &writeCloserWrapper{
+ Writer: r,
+ closer: closer,
+ }
+}
+
+// WriteCounter wraps a concrete io.Writer and hold a count of the number
+// of bytes written to the writer during a "session".
+// This can be convenient when write return is masked
+// (e.g., json.Encoder.Encode())
+type WriteCounter struct {
+ Count int64
+ Writer io.Writer
+}
+
+// NewWriteCounter returns a new WriteCounter.
+func NewWriteCounter(w io.Writer) *WriteCounter {
+ return &WriteCounter{
+ Writer: w,
+ }
+}
+
+func (wc *WriteCounter) Write(p []byte) (count int, err error) {
+ count, err = wc.Writer.Write(p)
+ wc.Count += int64(count)
+ return
+}
diff --git a/vendor/github.com/docker/docker/pkg/longpath/longpath.go b/vendor/github.com/docker/docker/pkg/longpath/longpath.go
new file mode 100644
index 0000000000..9b15bfff4c
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/longpath/longpath.go
@@ -0,0 +1,26 @@
+// longpath introduces some constants and helper functions for handling long paths
+// in Windows, which are expected to be prepended with `\\?\` and followed by either
+// a drive letter, a UNC server\share, or a volume identifier.
+
+package longpath
+
+import (
+ "strings"
+)
+
+// Prefix is the longpath prefix for Windows file paths.
+const Prefix = `\\?\`
+
+// AddPrefix will add the Windows long path prefix to the path provided if
+// it does not already have it.
+func AddPrefix(path string) string {
+ if !strings.HasPrefix(path, Prefix) {
+ if strings.HasPrefix(path, `\\`) {
+ // This is a UNC path, so we need to add 'UNC' to the path as well.
+ path = Prefix + `UNC` + path[1:]
+ } else {
+ path = Prefix + path
+ }
+ }
+ return path
+}
diff --git a/vendor/github.com/docker/docker/pkg/pools/pools.go b/vendor/github.com/docker/docker/pkg/pools/pools.go
new file mode 100644
index 0000000000..6f5988e267
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/pools/pools.go
@@ -0,0 +1,119 @@
+// Package pools provides a collection of pools which provide various
+// data types with buffers. These can be used to lower the number of
+// memory allocations and reuse buffers.
+//
+// New pools should be added to this package to allow them to be
+// shared across packages.
+//
+// Utility functions which operate on pools should be added to this
+// package to allow them to be reused.
+package pools
+
+import (
+ "bufio"
+ "io"
+ "sync"
+
+ "github.com/docker/docker/pkg/ioutils"
+)
+
+var (
+ // BufioReader32KPool is a pool which returns bufio.Reader with a 32K buffer.
+ BufioReader32KPool *BufioReaderPool
+ // BufioWriter32KPool is a pool which returns bufio.Writer with a 32K buffer.
+ BufioWriter32KPool *BufioWriterPool
+)
+
+const buffer32K = 32 * 1024
+
+// BufioReaderPool is a bufio reader that uses sync.Pool.
+type BufioReaderPool struct {
+ pool *sync.Pool
+}
+
+func init() {
+ BufioReader32KPool = newBufioReaderPoolWithSize(buffer32K)
+ BufioWriter32KPool = newBufioWriterPoolWithSize(buffer32K)
+}
+
+// newBufioReaderPoolWithSize is unexported because new pools should be
+// added here to be shared where required.
+func newBufioReaderPoolWithSize(size int) *BufioReaderPool {
+ pool := &sync.Pool{
+ New: func() interface{} { return bufio.NewReaderSize(nil, size) },
+ }
+ return &BufioReaderPool{pool: pool}
+}
+
+// Get returns a bufio.Reader which reads from r. The buffer size is that of the pool.
+func (bufPool *BufioReaderPool) Get(r io.Reader) *bufio.Reader {
+ buf := bufPool.pool.Get().(*bufio.Reader)
+ buf.Reset(r)
+ return buf
+}
+
+// Put puts the bufio.Reader back into the pool.
+func (bufPool *BufioReaderPool) Put(b *bufio.Reader) {
+ b.Reset(nil)
+ bufPool.pool.Put(b)
+}
+
+// Copy is a convenience wrapper which uses a buffer to avoid allocation in io.Copy.
+func Copy(dst io.Writer, src io.Reader) (written int64, err error) {
+ buf := BufioReader32KPool.Get(src)
+ written, err = io.Copy(dst, buf)
+ BufioReader32KPool.Put(buf)
+ return
+}
+
+// NewReadCloserWrapper returns a wrapper which puts the bufio.Reader back
+// into the pool and closes the reader if it's an io.ReadCloser.
+func (bufPool *BufioReaderPool) NewReadCloserWrapper(buf *bufio.Reader, r io.Reader) io.ReadCloser {
+ return ioutils.NewReadCloserWrapper(r, func() error {
+ if readCloser, ok := r.(io.ReadCloser); ok {
+ readCloser.Close()
+ }
+ bufPool.Put(buf)
+ return nil
+ })
+}
+
+// BufioWriterPool is a bufio writer that uses sync.Pool.
+type BufioWriterPool struct {
+ pool *sync.Pool
+}
+
+// newBufioWriterPoolWithSize is unexported because new pools should be
+// added here to be shared where required.
+func newBufioWriterPoolWithSize(size int) *BufioWriterPool {
+ pool := &sync.Pool{
+ New: func() interface{} { return bufio.NewWriterSize(nil, size) },
+ }
+ return &BufioWriterPool{pool: pool}
+}
+
+// Get returns a bufio.Writer which writes to w. The buffer size is that of the pool.
+func (bufPool *BufioWriterPool) Get(w io.Writer) *bufio.Writer {
+ buf := bufPool.pool.Get().(*bufio.Writer)
+ buf.Reset(w)
+ return buf
+}
+
+// Put puts the bufio.Writer back into the pool.
+func (bufPool *BufioWriterPool) Put(b *bufio.Writer) {
+ b.Reset(nil)
+ bufPool.pool.Put(b)
+}
+
+// NewWriteCloserWrapper returns a wrapper which puts the bufio.Writer back
+// into the pool and closes the writer if it's an io.Writecloser.
+func (bufPool *BufioWriterPool) NewWriteCloserWrapper(buf *bufio.Writer, w io.Writer) io.WriteCloser {
+ return ioutils.NewWriteCloserWrapper(w, func() error {
+ buf.Flush()
+ if writeCloser, ok := w.(io.WriteCloser); ok {
+ writeCloser.Close()
+ }
+ bufPool.Put(buf)
+ return nil
+ })
+}
diff --git a/vendor/github.com/docker/docker/pkg/promise/promise.go b/vendor/github.com/docker/docker/pkg/promise/promise.go
new file mode 100644
index 0000000000..dd52b9082f
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/promise/promise.go
@@ -0,0 +1,11 @@
+package promise
+
+// Go is a basic promise implementation: it wraps calls a function in a goroutine,
+// and returns a channel which will later return the function's return value.
+func Go(f func() error) chan error {
+ ch := make(chan error, 1)
+ go func() {
+ ch <- f()
+ }()
+ return ch
+}
diff --git a/vendor/github.com/docker/docker/pkg/stdcopy/stdcopy.go b/vendor/github.com/docker/docker/pkg/stdcopy/stdcopy.go
new file mode 100644
index 0000000000..8f67ece949
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/stdcopy/stdcopy.go
@@ -0,0 +1,185 @@
+package stdcopy
+
+import (
+ "bytes"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "io"
+ "sync"
+
+ "github.com/Sirupsen/logrus"
+)
+
+// StdType is the type of standard stream
+// a writer can multiplex to.
+type StdType byte
+
+const (
+ // Stdin represents standard input stream type.
+ Stdin StdType = iota
+ // Stdout represents standard output stream type.
+ Stdout
+ // Stderr represents standard error steam type.
+ Stderr
+
+ stdWriterPrefixLen = 8
+ stdWriterFdIndex = 0
+ stdWriterSizeIndex = 4
+
+ startingBufLen = 32*1024 + stdWriterPrefixLen + 1
+)
+
+var bufPool = &sync.Pool{New: func() interface{} { return bytes.NewBuffer(nil) }}
+
+// stdWriter is wrapper of io.Writer with extra customized info.
+type stdWriter struct {
+ io.Writer
+ prefix byte
+}
+
+// Write sends the buffer to the underneath writer.
+// It inserts the prefix header before the buffer,
+// so stdcopy.StdCopy knows where to multiplex the output.
+// It makes stdWriter to implement io.Writer.
+func (w *stdWriter) Write(p []byte) (n int, err error) {
+ if w == nil || w.Writer == nil {
+ return 0, errors.New("Writer not instantiated")
+ }
+ if p == nil {
+ return 0, nil
+ }
+
+ header := [stdWriterPrefixLen]byte{stdWriterFdIndex: w.prefix}
+ binary.BigEndian.PutUint32(header[stdWriterSizeIndex:], uint32(len(p)))
+ buf := bufPool.Get().(*bytes.Buffer)
+ buf.Write(header[:])
+ buf.Write(p)
+
+ n, err = w.Writer.Write(buf.Bytes())
+ n -= stdWriterPrefixLen
+ if n < 0 {
+ n = 0
+ }
+
+ buf.Reset()
+ bufPool.Put(buf)
+ return
+}
+
+// NewStdWriter instantiates a new Writer.
+// Everything written to it will be encapsulated using a custom format,
+// and written to the underlying `w` stream.
+// This allows multiple write streams (e.g. stdout and stderr) to be muxed into a single connection.
+// `t` indicates the id of the stream to encapsulate.
+// It can be stdcopy.Stdin, stdcopy.Stdout, stdcopy.Stderr.
+func NewStdWriter(w io.Writer, t StdType) io.Writer {
+ return &stdWriter{
+ Writer: w,
+ prefix: byte(t),
+ }
+}
+
+// StdCopy is a modified version of io.Copy.
+//
+// StdCopy will demultiplex `src`, assuming that it contains two streams,
+// previously multiplexed together using a StdWriter instance.
+// As it reads from `src`, StdCopy will write to `dstout` and `dsterr`.
+//
+// StdCopy will read until it hits EOF on `src`. It will then return a nil error.
+// In other words: if `err` is non nil, it indicates a real underlying error.
+//
+// `written` will hold the total number of bytes written to `dstout` and `dsterr`.
+func StdCopy(dstout, dsterr io.Writer, src io.Reader) (written int64, err error) {
+ var (
+ buf = make([]byte, startingBufLen)
+ bufLen = len(buf)
+ nr, nw int
+ er, ew error
+ out io.Writer
+ frameSize int
+ )
+
+ for {
+ // Make sure we have at least a full header
+ for nr < stdWriterPrefixLen {
+ var nr2 int
+ nr2, er = src.Read(buf[nr:])
+ nr += nr2
+ if er == io.EOF {
+ if nr < stdWriterPrefixLen {
+ logrus.Debugf("Corrupted prefix: %v", buf[:nr])
+ return written, nil
+ }
+ break
+ }
+ if er != nil {
+ logrus.Debugf("Error reading header: %s", er)
+ return 0, er
+ }
+ }
+
+ // Check the first byte to know where to write
+ switch StdType(buf[stdWriterFdIndex]) {
+ case Stdin:
+ fallthrough
+ case Stdout:
+ // Write on stdout
+ out = dstout
+ case Stderr:
+ // Write on stderr
+ out = dsterr
+ default:
+ logrus.Debugf("Error selecting output fd: (%d)", buf[stdWriterFdIndex])
+ return 0, fmt.Errorf("Unrecognized input header: %d", buf[stdWriterFdIndex])
+ }
+
+ // Retrieve the size of the frame
+ frameSize = int(binary.BigEndian.Uint32(buf[stdWriterSizeIndex : stdWriterSizeIndex+4]))
+ logrus.Debugf("framesize: %d", frameSize)
+
+ // Check if the buffer is big enough to read the frame.
+ // Extend it if necessary.
+ if frameSize+stdWriterPrefixLen > bufLen {
+ logrus.Debugf("Extending buffer cap by %d (was %d)", frameSize+stdWriterPrefixLen-bufLen+1, len(buf))
+ buf = append(buf, make([]byte, frameSize+stdWriterPrefixLen-bufLen+1)...)
+ bufLen = len(buf)
+ }
+
+ // While the amount of bytes read is less than the size of the frame + header, we keep reading
+ for nr < frameSize+stdWriterPrefixLen {
+ var nr2 int
+ nr2, er = src.Read(buf[nr:])
+ nr += nr2
+ if er == io.EOF {
+ if nr < frameSize+stdWriterPrefixLen {
+ logrus.Debugf("Corrupted frame: %v", buf[stdWriterPrefixLen:nr])
+ return written, nil
+ }
+ break
+ }
+ if er != nil {
+ logrus.Debugf("Error reading frame: %s", er)
+ return 0, er
+ }
+ }
+
+ // Write the retrieved frame (without header)
+ nw, ew = out.Write(buf[stdWriterPrefixLen : frameSize+stdWriterPrefixLen])
+ if ew != nil {
+ logrus.Debugf("Error writing frame: %s", ew)
+ return 0, ew
+ }
+ // If the frame has not been fully written: error
+ if nw != frameSize {
+ logrus.Debugf("Error Short Write: (%d on %d)", nw, frameSize)
+ return 0, io.ErrShortWrite
+ }
+ written += int64(nw)
+
+ // Move the rest of the buffer to the beginning
+ copy(buf, buf[frameSize+stdWriterPrefixLen:])
+ // Move the index
+ nr -= frameSize + stdWriterPrefixLen
+ }
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/chtimes.go b/vendor/github.com/docker/docker/pkg/system/chtimes.go
new file mode 100644
index 0000000000..7637f12e1a
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/chtimes.go
@@ -0,0 +1,52 @@
+package system
+
+import (
+ "os"
+ "syscall"
+ "time"
+ "unsafe"
+)
+
+var (
+ maxTime time.Time
+)
+
+func init() {
+ if unsafe.Sizeof(syscall.Timespec{}.Nsec) == 8 {
+ // This is a 64 bit timespec
+ // os.Chtimes limits time to the following
+ maxTime = time.Unix(0, 1<<63-1)
+ } else {
+ // This is a 32 bit timespec
+ maxTime = time.Unix(1<<31-1, 0)
+ }
+}
+
+// Chtimes changes the access time and modified time of a file at the given path
+func Chtimes(name string, atime time.Time, mtime time.Time) error {
+ unixMinTime := time.Unix(0, 0)
+ unixMaxTime := maxTime
+
+ // If the modified time is prior to the Unix Epoch, or after the
+ // end of Unix Time, os.Chtimes has undefined behavior
+ // default to Unix Epoch in this case, just in case
+
+ if atime.Before(unixMinTime) || atime.After(unixMaxTime) {
+ atime = unixMinTime
+ }
+
+ if mtime.Before(unixMinTime) || mtime.After(unixMaxTime) {
+ mtime = unixMinTime
+ }
+
+ if err := os.Chtimes(name, atime, mtime); err != nil {
+ return err
+ }
+
+ // Take platform specific action for setting create time.
+ if err := setCTime(name, mtime); err != nil {
+ return err
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/chtimes_unix.go b/vendor/github.com/docker/docker/pkg/system/chtimes_unix.go
new file mode 100644
index 0000000000..09d58bcbfd
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/chtimes_unix.go
@@ -0,0 +1,14 @@
+// +build !windows
+
+package system
+
+import (
+ "time"
+)
+
+//setCTime will set the create time on a file. On Unix, the create
+//time is updated as a side effect of setting the modified time, so
+//no action is required.
+func setCTime(path string, ctime time.Time) error {
+ return nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/chtimes_windows.go b/vendor/github.com/docker/docker/pkg/system/chtimes_windows.go
new file mode 100644
index 0000000000..2945868465
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/chtimes_windows.go
@@ -0,0 +1,27 @@
+// +build windows
+
+package system
+
+import (
+ "syscall"
+ "time"
+)
+
+//setCTime will set the create time on a file. On Windows, this requires
+//calling SetFileTime and explicitly including the create time.
+func setCTime(path string, ctime time.Time) error {
+ ctimespec := syscall.NsecToTimespec(ctime.UnixNano())
+ pathp, e := syscall.UTF16PtrFromString(path)
+ if e != nil {
+ return e
+ }
+ h, e := syscall.CreateFile(pathp,
+ syscall.FILE_WRITE_ATTRIBUTES, syscall.FILE_SHARE_WRITE, nil,
+ syscall.OPEN_EXISTING, syscall.FILE_FLAG_BACKUP_SEMANTICS, 0)
+ if e != nil {
+ return e
+ }
+ defer syscall.Close(h)
+ c := syscall.NsecToFiletime(syscall.TimespecToNsec(ctimespec))
+ return syscall.SetFileTime(h, &c, nil, nil)
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/errors.go b/vendor/github.com/docker/docker/pkg/system/errors.go
new file mode 100644
index 0000000000..288318985e
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/errors.go
@@ -0,0 +1,10 @@
+package system
+
+import (
+ "errors"
+)
+
+var (
+ // ErrNotSupportedPlatform means the platform is not supported.
+ ErrNotSupportedPlatform = errors.New("platform and architecture is not supported")
+)
diff --git a/vendor/github.com/docker/docker/pkg/system/events_windows.go b/vendor/github.com/docker/docker/pkg/system/events_windows.go
new file mode 100644
index 0000000000..04e2de7871
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/events_windows.go
@@ -0,0 +1,83 @@
+package system
+
+// This file implements syscalls for Win32 events which are not implemented
+// in golang.
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+var (
+ procCreateEvent = modkernel32.NewProc("CreateEventW")
+ procOpenEvent = modkernel32.NewProc("OpenEventW")
+ procSetEvent = modkernel32.NewProc("SetEvent")
+ procResetEvent = modkernel32.NewProc("ResetEvent")
+ procPulseEvent = modkernel32.NewProc("PulseEvent")
+)
+
+// CreateEvent implements win32 CreateEventW func in golang. It will create an event object.
+func CreateEvent(eventAttributes *syscall.SecurityAttributes, manualReset bool, initialState bool, name string) (handle syscall.Handle, err error) {
+ namep, _ := syscall.UTF16PtrFromString(name)
+ var _p1 uint32
+ if manualReset {
+ _p1 = 1
+ }
+ var _p2 uint32
+ if initialState {
+ _p2 = 1
+ }
+ r0, _, e1 := procCreateEvent.Call(uintptr(unsafe.Pointer(eventAttributes)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(namep)))
+ use(unsafe.Pointer(namep))
+ handle = syscall.Handle(r0)
+ if handle == syscall.InvalidHandle {
+ err = e1
+ }
+ return
+}
+
+// OpenEvent implements win32 OpenEventW func in golang. It opens an event object.
+func OpenEvent(desiredAccess uint32, inheritHandle bool, name string) (handle syscall.Handle, err error) {
+ namep, _ := syscall.UTF16PtrFromString(name)
+ var _p1 uint32
+ if inheritHandle {
+ _p1 = 1
+ }
+ r0, _, e1 := procOpenEvent.Call(uintptr(desiredAccess), uintptr(_p1), uintptr(unsafe.Pointer(namep)))
+ use(unsafe.Pointer(namep))
+ handle = syscall.Handle(r0)
+ if handle == syscall.InvalidHandle {
+ err = e1
+ }
+ return
+}
+
+// SetEvent implements win32 SetEvent func in golang.
+func SetEvent(handle syscall.Handle) (err error) {
+ return setResetPulse(handle, procSetEvent)
+}
+
+// ResetEvent implements win32 ResetEvent func in golang.
+func ResetEvent(handle syscall.Handle) (err error) {
+ return setResetPulse(handle, procResetEvent)
+}
+
+// PulseEvent implements win32 PulseEvent func in golang.
+func PulseEvent(handle syscall.Handle) (err error) {
+ return setResetPulse(handle, procPulseEvent)
+}
+
+func setResetPulse(handle syscall.Handle, proc *syscall.LazyProc) (err error) {
+ r0, _, _ := proc.Call(uintptr(handle))
+ if r0 != 0 {
+ err = syscall.Errno(r0)
+ }
+ return
+}
+
+var temp unsafe.Pointer
+
+// use ensures a variable is kept alive without the GC freeing while still needed
+func use(p unsafe.Pointer) {
+ temp = p
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/filesys.go b/vendor/github.com/docker/docker/pkg/system/filesys.go
new file mode 100644
index 0000000000..c14feb8496
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/filesys.go
@@ -0,0 +1,19 @@
+// +build !windows
+
+package system
+
+import (
+ "os"
+ "path/filepath"
+)
+
+// MkdirAll creates a directory named path along with any necessary parents,
+// with permission specified by attribute perm for all dir created.
+func MkdirAll(path string, perm os.FileMode) error {
+ return os.MkdirAll(path, perm)
+}
+
+// IsAbs is a platform-specific wrapper for filepath.IsAbs.
+func IsAbs(path string) bool {
+ return filepath.IsAbs(path)
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/filesys_windows.go b/vendor/github.com/docker/docker/pkg/system/filesys_windows.go
new file mode 100644
index 0000000000..16823d5517
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/filesys_windows.go
@@ -0,0 +1,82 @@
+// +build windows
+
+package system
+
+import (
+ "os"
+ "path/filepath"
+ "regexp"
+ "strings"
+ "syscall"
+)
+
+// MkdirAll implementation that is volume path aware for Windows.
+func MkdirAll(path string, perm os.FileMode) error {
+ if re := regexp.MustCompile(`^\\\\\?\\Volume{[a-z0-9-]+}$`); re.MatchString(path) {
+ return nil
+ }
+
+ // The rest of this method is copied from os.MkdirAll and should be kept
+ // as-is to ensure compatibility.
+
+ // Fast path: if we can tell whether path is a directory or file, stop with success or error.
+ dir, err := os.Stat(path)
+ if err == nil {
+ if dir.IsDir() {
+ return nil
+ }
+ return &os.PathError{
+ Op: "mkdir",
+ Path: path,
+ Err: syscall.ENOTDIR,
+ }
+ }
+
+ // Slow path: make sure parent exists and then call Mkdir for path.
+ i := len(path)
+ for i > 0 && os.IsPathSeparator(path[i-1]) { // Skip trailing path separator.
+ i--
+ }
+
+ j := i
+ for j > 0 && !os.IsPathSeparator(path[j-1]) { // Scan backward over element.
+ j--
+ }
+
+ if j > 1 {
+ // Create parent
+ err = MkdirAll(path[0:j-1], perm)
+ if err != nil {
+ return err
+ }
+ }
+
+ // Parent now exists; invoke Mkdir and use its result.
+ err = os.Mkdir(path, perm)
+ if err != nil {
+ // Handle arguments like "foo/." by
+ // double-checking that directory doesn't exist.
+ dir, err1 := os.Lstat(path)
+ if err1 == nil && dir.IsDir() {
+ return nil
+ }
+ return err
+ }
+ return nil
+}
+
+// IsAbs is a platform-specific wrapper for filepath.IsAbs. On Windows,
+// golang filepath.IsAbs does not consider a path \windows\system32 as absolute
+// as it doesn't start with a drive-letter/colon combination. However, in
+// docker we need to verify things such as WORKDIR /windows/system32 in
+// a Dockerfile (which gets translated to \windows\system32 when being processed
+// by the daemon. This SHOULD be treated as absolute from a docker processing
+// perspective.
+func IsAbs(path string) bool {
+ if !filepath.IsAbs(path) {
+ if !strings.HasPrefix(path, string(os.PathSeparator)) {
+ return false
+ }
+ }
+ return true
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/lstat.go b/vendor/github.com/docker/docker/pkg/system/lstat.go
new file mode 100644
index 0000000000..bd23c4d50b
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/lstat.go
@@ -0,0 +1,19 @@
+// +build !windows
+
+package system
+
+import (
+ "syscall"
+)
+
+// Lstat takes a path to a file and returns
+// a system.StatT type pertaining to that file.
+//
+// Throws an error if the file does not exist
+func Lstat(path string) (*StatT, error) {
+ s := &syscall.Stat_t{}
+ if err := syscall.Lstat(path, s); err != nil {
+ return nil, err
+ }
+ return fromStatT(s)
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/lstat_windows.go b/vendor/github.com/docker/docker/pkg/system/lstat_windows.go
new file mode 100644
index 0000000000..49e87eb40b
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/lstat_windows.go
@@ -0,0 +1,25 @@
+// +build windows
+
+package system
+
+import (
+ "os"
+)
+
+// Lstat calls os.Lstat to get a fileinfo interface back.
+// This is then copied into our own locally defined structure.
+// Note the Linux version uses fromStatT to do the copy back,
+// but that not strictly necessary when already in an OS specific module.
+func Lstat(path string) (*StatT, error) {
+ fi, err := os.Lstat(path)
+ if err != nil {
+ return nil, err
+ }
+
+ return &StatT{
+ name: fi.Name(),
+ size: fi.Size(),
+ mode: fi.Mode(),
+ modTime: fi.ModTime(),
+ isDir: fi.IsDir()}, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/meminfo.go b/vendor/github.com/docker/docker/pkg/system/meminfo.go
new file mode 100644
index 0000000000..3b6e947e67
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/meminfo.go
@@ -0,0 +1,17 @@
+package system
+
+// MemInfo contains memory statistics of the host system.
+type MemInfo struct {
+ // Total usable RAM (i.e. physical RAM minus a few reserved bits and the
+ // kernel binary code).
+ MemTotal int64
+
+ // Amount of free memory.
+ MemFree int64
+
+ // Total amount of swap space available.
+ SwapTotal int64
+
+ // Amount of swap space that is currently unused.
+ SwapFree int64
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/meminfo_linux.go b/vendor/github.com/docker/docker/pkg/system/meminfo_linux.go
new file mode 100644
index 0000000000..385f1d5e73
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/meminfo_linux.go
@@ -0,0 +1,65 @@
+package system
+
+import (
+ "bufio"
+ "io"
+ "os"
+ "strconv"
+ "strings"
+
+ "github.com/docker/go-units"
+)
+
+// ReadMemInfo retrieves memory statistics of the host system and returns a
+// MemInfo type.
+func ReadMemInfo() (*MemInfo, error) {
+ file, err := os.Open("/proc/meminfo")
+ if err != nil {
+ return nil, err
+ }
+ defer file.Close()
+ return parseMemInfo(file)
+}
+
+// parseMemInfo parses the /proc/meminfo file into
+// a MemInfo object given an io.Reader to the file.
+// Throws error if there are problems reading from the file
+func parseMemInfo(reader io.Reader) (*MemInfo, error) {
+ meminfo := &MemInfo{}
+ scanner := bufio.NewScanner(reader)
+ for scanner.Scan() {
+ // Expected format: ["MemTotal:", "1234", "kB"]
+ parts := strings.Fields(scanner.Text())
+
+ // Sanity checks: Skip malformed entries.
+ if len(parts) < 3 || parts[2] != "kB" {
+ continue
+ }
+
+ // Convert to bytes.
+ size, err := strconv.Atoi(parts[1])
+ if err != nil {
+ continue
+ }
+ bytes := int64(size) * units.KiB
+
+ switch parts[0] {
+ case "MemTotal:":
+ meminfo.MemTotal = bytes
+ case "MemFree:":
+ meminfo.MemFree = bytes
+ case "SwapTotal:":
+ meminfo.SwapTotal = bytes
+ case "SwapFree:":
+ meminfo.SwapFree = bytes
+ }
+
+ }
+
+ // Handle errors that may have occurred during the reading of the file.
+ if err := scanner.Err(); err != nil {
+ return nil, err
+ }
+
+ return meminfo, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/meminfo_solaris.go b/vendor/github.com/docker/docker/pkg/system/meminfo_solaris.go
new file mode 100644
index 0000000000..313c601b12
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/meminfo_solaris.go
@@ -0,0 +1,128 @@
+// +build solaris,cgo
+
+package system
+
+import (
+ "fmt"
+ "unsafe"
+)
+
+// #cgo LDFLAGS: -lkstat
+// #include
+// #include
+// #include
+// #include
+// #include
+// #include
+// struct swaptable *allocSwaptable(int num) {
+// struct swaptable *st;
+// struct swapent *swapent;
+// st = (struct swaptable *)malloc(num * sizeof(swapent_t) + sizeof (int));
+// swapent = st->swt_ent;
+// for (int i = 0; i < num; i++,swapent++) {
+// swapent->ste_path = (char *)malloc(MAXPATHLEN * sizeof (char));
+// }
+// st->swt_n = num;
+// return st;
+//}
+// void freeSwaptable (struct swaptable *st) {
+// struct swapent *swapent = st->swt_ent;
+// for (int i = 0; i < st->swt_n; i++,swapent++) {
+// free(swapent->ste_path);
+// }
+// free(st);
+// }
+// swapent_t getSwapEnt(swapent_t *ent, int i) {
+// return ent[i];
+// }
+// int64_t getPpKernel() {
+// int64_t pp_kernel = 0;
+// kstat_ctl_t *ksc;
+// kstat_t *ks;
+// kstat_named_t *knp;
+// kid_t kid;
+//
+// if ((ksc = kstat_open()) == NULL) {
+// return -1;
+// }
+// if ((ks = kstat_lookup(ksc, "unix", 0, "system_pages")) == NULL) {
+// return -1;
+// }
+// if (((kid = kstat_read(ksc, ks, NULL)) == -1) ||
+// ((knp = kstat_data_lookup(ks, "pp_kernel")) == NULL)) {
+// return -1;
+// }
+// switch (knp->data_type) {
+// case KSTAT_DATA_UINT64:
+// pp_kernel = knp->value.ui64;
+// break;
+// case KSTAT_DATA_UINT32:
+// pp_kernel = knp->value.ui32;
+// break;
+// }
+// pp_kernel *= sysconf(_SC_PAGESIZE);
+// return (pp_kernel > 0 ? pp_kernel : -1);
+// }
+import "C"
+
+// Get the system memory info using sysconf same as prtconf
+func getTotalMem() int64 {
+ pagesize := C.sysconf(C._SC_PAGESIZE)
+ npages := C.sysconf(C._SC_PHYS_PAGES)
+ return int64(pagesize * npages)
+}
+
+func getFreeMem() int64 {
+ pagesize := C.sysconf(C._SC_PAGESIZE)
+ npages := C.sysconf(C._SC_AVPHYS_PAGES)
+ return int64(pagesize * npages)
+}
+
+// ReadMemInfo retrieves memory statistics of the host system and returns a
+// MemInfo type.
+func ReadMemInfo() (*MemInfo, error) {
+
+ ppKernel := C.getPpKernel()
+ MemTotal := getTotalMem()
+ MemFree := getFreeMem()
+ SwapTotal, SwapFree, err := getSysSwap()
+
+ if ppKernel < 0 || MemTotal < 0 || MemFree < 0 || SwapTotal < 0 ||
+ SwapFree < 0 {
+ return nil, fmt.Errorf("Error getting system memory info %v\n", err)
+ }
+
+ meminfo := &MemInfo{}
+ // Total memory is total physical memory less than memory locked by kernel
+ meminfo.MemTotal = MemTotal - int64(ppKernel)
+ meminfo.MemFree = MemFree
+ meminfo.SwapTotal = SwapTotal
+ meminfo.SwapFree = SwapFree
+
+ return meminfo, nil
+}
+
+func getSysSwap() (int64, int64, error) {
+ var tSwap int64
+ var fSwap int64
+ var diskblksPerPage int64
+ num, err := C.swapctl(C.SC_GETNSWP, nil)
+ if err != nil {
+ return -1, -1, err
+ }
+ st := C.allocSwaptable(num)
+ _, err = C.swapctl(C.SC_LIST, unsafe.Pointer(st))
+ if err != nil {
+ C.freeSwaptable(st)
+ return -1, -1, err
+ }
+
+ diskblksPerPage = int64(C.sysconf(C._SC_PAGESIZE) >> C.DEV_BSHIFT)
+ for i := 0; i < int(num); i++ {
+ swapent := C.getSwapEnt(&st.swt_ent[0], C.int(i))
+ tSwap += int64(swapent.ste_pages) * diskblksPerPage
+ fSwap += int64(swapent.ste_free) * diskblksPerPage
+ }
+ C.freeSwaptable(st)
+ return tSwap, fSwap, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/meminfo_unsupported.go b/vendor/github.com/docker/docker/pkg/system/meminfo_unsupported.go
new file mode 100644
index 0000000000..3ce019dffd
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/meminfo_unsupported.go
@@ -0,0 +1,8 @@
+// +build !linux,!windows,!solaris
+
+package system
+
+// ReadMemInfo is not supported on platforms other than linux and windows.
+func ReadMemInfo() (*MemInfo, error) {
+ return nil, ErrNotSupportedPlatform
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/meminfo_windows.go b/vendor/github.com/docker/docker/pkg/system/meminfo_windows.go
new file mode 100644
index 0000000000..d46642598c
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/meminfo_windows.go
@@ -0,0 +1,44 @@
+package system
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+var (
+ modkernel32 = syscall.NewLazyDLL("kernel32.dll")
+
+ procGlobalMemoryStatusEx = modkernel32.NewProc("GlobalMemoryStatusEx")
+)
+
+// https://msdn.microsoft.com/en-us/library/windows/desktop/aa366589(v=vs.85).aspx
+// https://msdn.microsoft.com/en-us/library/windows/desktop/aa366770(v=vs.85).aspx
+type memorystatusex struct {
+ dwLength uint32
+ dwMemoryLoad uint32
+ ullTotalPhys uint64
+ ullAvailPhys uint64
+ ullTotalPageFile uint64
+ ullAvailPageFile uint64
+ ullTotalVirtual uint64
+ ullAvailVirtual uint64
+ ullAvailExtendedVirtual uint64
+}
+
+// ReadMemInfo retrieves memory statistics of the host system and returns a
+// MemInfo type.
+func ReadMemInfo() (*MemInfo, error) {
+ msi := &memorystatusex{
+ dwLength: 64,
+ }
+ r1, _, _ := procGlobalMemoryStatusEx.Call(uintptr(unsafe.Pointer(msi)))
+ if r1 == 0 {
+ return &MemInfo{}, nil
+ }
+ return &MemInfo{
+ MemTotal: int64(msi.ullTotalPhys),
+ MemFree: int64(msi.ullAvailPhys),
+ SwapTotal: int64(msi.ullTotalPageFile),
+ SwapFree: int64(msi.ullAvailPageFile),
+ }, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/mknod.go b/vendor/github.com/docker/docker/pkg/system/mknod.go
new file mode 100644
index 0000000000..73958182b4
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/mknod.go
@@ -0,0 +1,22 @@
+// +build !windows
+
+package system
+
+import (
+ "syscall"
+)
+
+// Mknod creates a filesystem node (file, device special file or named pipe) named path
+// with attributes specified by mode and dev.
+func Mknod(path string, mode uint32, dev int) error {
+ return syscall.Mknod(path, mode, dev)
+}
+
+// Mkdev is used to build the value of linux devices (in /dev/) which specifies major
+// and minor number of the newly created device special file.
+// Linux device nodes are a bit weird due to backwards compat with 16 bit device nodes.
+// They are, from low to high: the lower 8 bits of the minor, then 12 bits of the major,
+// then the top 12 bits of the minor.
+func Mkdev(major int64, minor int64) uint32 {
+ return uint32(((minor & 0xfff00) << 12) | ((major & 0xfff) << 8) | (minor & 0xff))
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/mknod_windows.go b/vendor/github.com/docker/docker/pkg/system/mknod_windows.go
new file mode 100644
index 0000000000..2e863c0215
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/mknod_windows.go
@@ -0,0 +1,13 @@
+// +build windows
+
+package system
+
+// Mknod is not implemented on Windows.
+func Mknod(path string, mode uint32, dev int) error {
+ return ErrNotSupportedPlatform
+}
+
+// Mkdev is not implemented on Windows.
+func Mkdev(major int64, minor int64) uint32 {
+ panic("Mkdev not implemented on Windows.")
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/path_unix.go b/vendor/github.com/docker/docker/pkg/system/path_unix.go
new file mode 100644
index 0000000000..c607c4db09
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/path_unix.go
@@ -0,0 +1,14 @@
+// +build !windows
+
+package system
+
+// DefaultPathEnv is unix style list of directories to search for
+// executables. Each directory is separated from the next by a colon
+// ':' character .
+const DefaultPathEnv = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
+
+// CheckSystemDriveAndRemoveDriveLetter verifies that a path, if it includes a drive letter,
+// is the system drive. This is a no-op on Linux.
+func CheckSystemDriveAndRemoveDriveLetter(path string) (string, error) {
+ return path, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/path_windows.go b/vendor/github.com/docker/docker/pkg/system/path_windows.go
new file mode 100644
index 0000000000..cbfe2c1576
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/path_windows.go
@@ -0,0 +1,37 @@
+// +build windows
+
+package system
+
+import (
+ "fmt"
+ "path/filepath"
+ "strings"
+)
+
+// DefaultPathEnv is deliberately empty on Windows as the default path will be set by
+// the container. Docker has no context of what the default path should be.
+const DefaultPathEnv = ""
+
+// CheckSystemDriveAndRemoveDriveLetter verifies and manipulates a Windows path.
+// This is used, for example, when validating a user provided path in docker cp.
+// If a drive letter is supplied, it must be the system drive. The drive letter
+// is always removed. Also, it translates it to OS semantics (IOW / to \). We
+// need the path in this syntax so that it can ultimately be contatenated with
+// a Windows long-path which doesn't support drive-letters. Examples:
+// C: --> Fail
+// C:\ --> \
+// a --> a
+// /a --> \a
+// d:\ --> Fail
+func CheckSystemDriveAndRemoveDriveLetter(path string) (string, error) {
+ if len(path) == 2 && string(path[1]) == ":" {
+ return "", fmt.Errorf("No relative path specified in %q", path)
+ }
+ if !filepath.IsAbs(path) || len(path) < 2 {
+ return filepath.FromSlash(path), nil
+ }
+ if string(path[1]) == ":" && !strings.EqualFold(string(path[0]), "c") {
+ return "", fmt.Errorf("The specified path is not on the system drive (C:)")
+ }
+ return filepath.FromSlash(path[2:]), nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/stat.go b/vendor/github.com/docker/docker/pkg/system/stat.go
new file mode 100644
index 0000000000..087034c5ec
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/stat.go
@@ -0,0 +1,53 @@
+// +build !windows
+
+package system
+
+import (
+ "syscall"
+)
+
+// StatT type contains status of a file. It contains metadata
+// like permission, owner, group, size, etc about a file.
+type StatT struct {
+ mode uint32
+ uid uint32
+ gid uint32
+ rdev uint64
+ size int64
+ mtim syscall.Timespec
+}
+
+// Mode returns file's permission mode.
+func (s StatT) Mode() uint32 {
+ return s.mode
+}
+
+// UID returns file's user id of owner.
+func (s StatT) UID() uint32 {
+ return s.uid
+}
+
+// GID returns file's group id of owner.
+func (s StatT) GID() uint32 {
+ return s.gid
+}
+
+// Rdev returns file's device ID (if it's special file).
+func (s StatT) Rdev() uint64 {
+ return s.rdev
+}
+
+// Size returns file's size.
+func (s StatT) Size() int64 {
+ return s.size
+}
+
+// Mtim returns file's last modification time.
+func (s StatT) Mtim() syscall.Timespec {
+ return s.mtim
+}
+
+// GetLastModification returns file's last modification time.
+func (s StatT) GetLastModification() syscall.Timespec {
+ return s.Mtim()
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/stat_freebsd.go b/vendor/github.com/docker/docker/pkg/system/stat_freebsd.go
new file mode 100644
index 0000000000..d0fb6f1519
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/stat_freebsd.go
@@ -0,0 +1,27 @@
+package system
+
+import (
+ "syscall"
+)
+
+// fromStatT converts a syscall.Stat_t type to a system.Stat_t type
+func fromStatT(s *syscall.Stat_t) (*StatT, error) {
+ return &StatT{size: s.Size,
+ mode: uint32(s.Mode),
+ uid: s.Uid,
+ gid: s.Gid,
+ rdev: uint64(s.Rdev),
+ mtim: s.Mtimespec}, nil
+}
+
+// Stat takes a path to a file and returns
+// a system.Stat_t type pertaining to that file.
+//
+// Throws an error if the file does not exist
+func Stat(path string) (*StatT, error) {
+ s := &syscall.Stat_t{}
+ if err := syscall.Stat(path, s); err != nil {
+ return nil, err
+ }
+ return fromStatT(s)
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/stat_linux.go b/vendor/github.com/docker/docker/pkg/system/stat_linux.go
new file mode 100644
index 0000000000..8b1eded138
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/stat_linux.go
@@ -0,0 +1,33 @@
+package system
+
+import (
+ "syscall"
+)
+
+// fromStatT converts a syscall.Stat_t type to a system.Stat_t type
+func fromStatT(s *syscall.Stat_t) (*StatT, error) {
+ return &StatT{size: s.Size,
+ mode: s.Mode,
+ uid: s.Uid,
+ gid: s.Gid,
+ rdev: s.Rdev,
+ mtim: s.Mtim}, nil
+}
+
+// FromStatT exists only on linux, and loads a system.StatT from a
+// syscal.Stat_t.
+func FromStatT(s *syscall.Stat_t) (*StatT, error) {
+ return fromStatT(s)
+}
+
+// Stat takes a path to a file and returns
+// a system.StatT type pertaining to that file.
+//
+// Throws an error if the file does not exist
+func Stat(path string) (*StatT, error) {
+ s := &syscall.Stat_t{}
+ if err := syscall.Stat(path, s); err != nil {
+ return nil, err
+ }
+ return fromStatT(s)
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/stat_openbsd.go b/vendor/github.com/docker/docker/pkg/system/stat_openbsd.go
new file mode 100644
index 0000000000..3c3b71fb21
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/stat_openbsd.go
@@ -0,0 +1,15 @@
+package system
+
+import (
+ "syscall"
+)
+
+// fromStatT creates a system.StatT type from a syscall.Stat_t type
+func fromStatT(s *syscall.Stat_t) (*StatT, error) {
+ return &StatT{size: s.Size,
+ mode: uint32(s.Mode),
+ uid: s.Uid,
+ gid: s.Gid,
+ rdev: uint64(s.Rdev),
+ mtim: s.Mtim}, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/stat_solaris.go b/vendor/github.com/docker/docker/pkg/system/stat_solaris.go
new file mode 100644
index 0000000000..0216985a25
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/stat_solaris.go
@@ -0,0 +1,34 @@
+// +build solaris
+
+package system
+
+import (
+ "syscall"
+)
+
+// fromStatT creates a system.StatT type from a syscall.Stat_t type
+func fromStatT(s *syscall.Stat_t) (*StatT, error) {
+ return &StatT{size: s.Size,
+ mode: uint32(s.Mode),
+ uid: s.Uid,
+ gid: s.Gid,
+ rdev: uint64(s.Rdev),
+ mtim: s.Mtim}, nil
+}
+
+// FromStatT loads a system.StatT from a syscal.Stat_t.
+func FromStatT(s *syscall.Stat_t) (*StatT, error) {
+ return fromStatT(s)
+}
+
+// Stat takes a path to a file and returns
+// a system.StatT type pertaining to that file.
+//
+// Throws an error if the file does not exist
+func Stat(path string) (*StatT, error) {
+ s := &syscall.Stat_t{}
+ if err := syscall.Stat(path, s); err != nil {
+ return nil, err
+ }
+ return fromStatT(s)
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/stat_unsupported.go b/vendor/github.com/docker/docker/pkg/system/stat_unsupported.go
new file mode 100644
index 0000000000..f53e9de4d1
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/stat_unsupported.go
@@ -0,0 +1,17 @@
+// +build !linux,!windows,!freebsd,!solaris,!openbsd
+
+package system
+
+import (
+ "syscall"
+)
+
+// fromStatT creates a system.StatT type from a syscall.Stat_t type
+func fromStatT(s *syscall.Stat_t) (*StatT, error) {
+ return &StatT{size: s.Size,
+ mode: uint32(s.Mode),
+ uid: s.Uid,
+ gid: s.Gid,
+ rdev: uint64(s.Rdev),
+ mtim: s.Mtimespec}, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/stat_windows.go b/vendor/github.com/docker/docker/pkg/system/stat_windows.go
new file mode 100644
index 0000000000..39490c625c
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/stat_windows.go
@@ -0,0 +1,43 @@
+// +build windows
+
+package system
+
+import (
+ "os"
+ "time"
+)
+
+// StatT type contains status of a file. It contains metadata
+// like name, permission, size, etc about a file.
+type StatT struct {
+ name string
+ size int64
+ mode os.FileMode
+ modTime time.Time
+ isDir bool
+}
+
+// Name returns file's name.
+func (s StatT) Name() string {
+ return s.name
+}
+
+// Size returns file's size.
+func (s StatT) Size() int64 {
+ return s.size
+}
+
+// Mode returns file's permission mode.
+func (s StatT) Mode() os.FileMode {
+ return s.mode
+}
+
+// ModTime returns file's last modification time.
+func (s StatT) ModTime() time.Time {
+ return s.modTime
+}
+
+// IsDir returns whether file is actually a directory.
+func (s StatT) IsDir() bool {
+ return s.isDir
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/syscall_unix.go b/vendor/github.com/docker/docker/pkg/system/syscall_unix.go
new file mode 100644
index 0000000000..3ae9128468
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/syscall_unix.go
@@ -0,0 +1,17 @@
+// +build linux freebsd
+
+package system
+
+import "syscall"
+
+// Unmount is a platform-specific helper function to call
+// the unmount syscall.
+func Unmount(dest string) error {
+ return syscall.Unmount(dest, 0)
+}
+
+// CommandLineToArgv should not be used on Unix.
+// It simply returns commandLine in the only element in the returned array.
+func CommandLineToArgv(commandLine string) ([]string, error) {
+ return []string{commandLine}, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/syscall_windows.go b/vendor/github.com/docker/docker/pkg/system/syscall_windows.go
new file mode 100644
index 0000000000..f5f2d56941
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/syscall_windows.go
@@ -0,0 +1,103 @@
+package system
+
+import (
+ "syscall"
+ "unsafe"
+
+ "github.com/Sirupsen/logrus"
+)
+
+var (
+ ntuserApiset = syscall.NewLazyDLL("ext-ms-win-ntuser-window-l1-1-0")
+ procGetVersionExW = modkernel32.NewProc("GetVersionExW")
+)
+
+// OSVersion is a wrapper for Windows version information
+// https://msdn.microsoft.com/en-us/library/windows/desktop/ms724439(v=vs.85).aspx
+type OSVersion struct {
+ Version uint32
+ MajorVersion uint8
+ MinorVersion uint8
+ Build uint16
+}
+
+// https://msdn.microsoft.com/en-us/library/windows/desktop/ms724833(v=vs.85).aspx
+type osVersionInfoEx struct {
+ OSVersionInfoSize uint32
+ MajorVersion uint32
+ MinorVersion uint32
+ BuildNumber uint32
+ PlatformID uint32
+ CSDVersion [128]uint16
+ ServicePackMajor uint16
+ ServicePackMinor uint16
+ SuiteMask uint16
+ ProductType byte
+ Reserve byte
+}
+
+// GetOSVersion gets the operating system version on Windows. Note that
+// docker.exe must be manifested to get the correct version information.
+func GetOSVersion() OSVersion {
+ var err error
+ osv := OSVersion{}
+ osv.Version, err = syscall.GetVersion()
+ if err != nil {
+ // GetVersion never fails.
+ panic(err)
+ }
+ osv.MajorVersion = uint8(osv.Version & 0xFF)
+ osv.MinorVersion = uint8(osv.Version >> 8 & 0xFF)
+ osv.Build = uint16(osv.Version >> 16)
+ return osv
+}
+
+// IsWindowsClient returns true if the SKU is client
+func IsWindowsClient() bool {
+ osviex := &osVersionInfoEx{OSVersionInfoSize: 284}
+ r1, _, err := procGetVersionExW.Call(uintptr(unsafe.Pointer(osviex)))
+ if r1 == 0 {
+ logrus.Warnf("GetVersionExW failed - assuming server SKU: %v", err)
+ return false
+ }
+ const verNTWorkstation = 0x00000001
+ return osviex.ProductType == verNTWorkstation
+}
+
+// Unmount is a platform-specific helper function to call
+// the unmount syscall. Not supported on Windows
+func Unmount(dest string) error {
+ return nil
+}
+
+// CommandLineToArgv wraps the Windows syscall to turn a commandline into an argument array.
+func CommandLineToArgv(commandLine string) ([]string, error) {
+ var argc int32
+
+ argsPtr, err := syscall.UTF16PtrFromString(commandLine)
+ if err != nil {
+ return nil, err
+ }
+
+ argv, err := syscall.CommandLineToArgv(argsPtr, &argc)
+ if err != nil {
+ return nil, err
+ }
+ defer syscall.LocalFree(syscall.Handle(uintptr(unsafe.Pointer(argv))))
+
+ newArgs := make([]string, argc)
+ for i, v := range (*argv)[:argc] {
+ newArgs[i] = string(syscall.UTF16ToString((*v)[:]))
+ }
+
+ return newArgs, nil
+}
+
+// HasWin32KSupport determines whether containers that depend on win32k can
+// run on this machine. Win32k is the driver used to implement windowing.
+func HasWin32KSupport() bool {
+ // For now, check for ntuser API support on the host. In the future, a host
+ // may support win32k in containers even if the host does not support ntuser
+ // APIs.
+ return ntuserApiset.Load() == nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/umask.go b/vendor/github.com/docker/docker/pkg/system/umask.go
new file mode 100644
index 0000000000..3d0146b01a
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/umask.go
@@ -0,0 +1,13 @@
+// +build !windows
+
+package system
+
+import (
+ "syscall"
+)
+
+// Umask sets current process's file mode creation mask to newmask
+// and returns oldmask.
+func Umask(newmask int) (oldmask int, err error) {
+ return syscall.Umask(newmask), nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/umask_windows.go b/vendor/github.com/docker/docker/pkg/system/umask_windows.go
new file mode 100644
index 0000000000..13f1de1769
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/umask_windows.go
@@ -0,0 +1,9 @@
+// +build windows
+
+package system
+
+// Umask is not supported on the windows platform.
+func Umask(newmask int) (oldmask int, err error) {
+ // should not be called on cli code path
+ return 0, ErrNotSupportedPlatform
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/utimes_darwin.go b/vendor/github.com/docker/docker/pkg/system/utimes_darwin.go
new file mode 100644
index 0000000000..0a16197544
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/utimes_darwin.go
@@ -0,0 +1,8 @@
+package system
+
+import "syscall"
+
+// LUtimesNano is not supported by darwin platform.
+func LUtimesNano(path string, ts []syscall.Timespec) error {
+ return ErrNotSupportedPlatform
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/utimes_freebsd.go b/vendor/github.com/docker/docker/pkg/system/utimes_freebsd.go
new file mode 100644
index 0000000000..e2eac3b553
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/utimes_freebsd.go
@@ -0,0 +1,22 @@
+package system
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+// LUtimesNano is used to change access and modification time of the specified path.
+// It's used for symbol link file because syscall.UtimesNano doesn't support a NOFOLLOW flag atm.
+func LUtimesNano(path string, ts []syscall.Timespec) error {
+ var _path *byte
+ _path, err := syscall.BytePtrFromString(path)
+ if err != nil {
+ return err
+ }
+
+ if _, _, err := syscall.Syscall(syscall.SYS_LUTIMES, uintptr(unsafe.Pointer(_path)), uintptr(unsafe.Pointer(&ts[0])), 0); err != 0 && err != syscall.ENOSYS {
+ return err
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/utimes_linux.go b/vendor/github.com/docker/docker/pkg/system/utimes_linux.go
new file mode 100644
index 0000000000..fc8a1aba95
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/utimes_linux.go
@@ -0,0 +1,26 @@
+package system
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+// LUtimesNano is used to change access and modification time of the specified path.
+// It's used for symbol link file because syscall.UtimesNano doesn't support a NOFOLLOW flag atm.
+func LUtimesNano(path string, ts []syscall.Timespec) error {
+ // These are not currently available in syscall
+ atFdCwd := -100
+ atSymLinkNoFollow := 0x100
+
+ var _path *byte
+ _path, err := syscall.BytePtrFromString(path)
+ if err != nil {
+ return err
+ }
+
+ if _, _, err := syscall.Syscall6(syscall.SYS_UTIMENSAT, uintptr(atFdCwd), uintptr(unsafe.Pointer(_path)), uintptr(unsafe.Pointer(&ts[0])), uintptr(atSymLinkNoFollow), 0, 0); err != 0 && err != syscall.ENOSYS {
+ return err
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/utimes_unsupported.go b/vendor/github.com/docker/docker/pkg/system/utimes_unsupported.go
new file mode 100644
index 0000000000..50c3a04364
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/utimes_unsupported.go
@@ -0,0 +1,10 @@
+// +build !linux,!freebsd,!darwin
+
+package system
+
+import "syscall"
+
+// LUtimesNano is not supported on platforms other than linux, freebsd and darwin.
+func LUtimesNano(path string, ts []syscall.Timespec) error {
+ return ErrNotSupportedPlatform
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/xattrs_linux.go b/vendor/github.com/docker/docker/pkg/system/xattrs_linux.go
new file mode 100644
index 0000000000..d2e2c05799
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/xattrs_linux.go
@@ -0,0 +1,63 @@
+package system
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+// Lgetxattr retrieves the value of the extended attribute identified by attr
+// and associated with the given path in the file system.
+// It will returns a nil slice and nil error if the xattr is not set.
+func Lgetxattr(path string, attr string) ([]byte, error) {
+ pathBytes, err := syscall.BytePtrFromString(path)
+ if err != nil {
+ return nil, err
+ }
+ attrBytes, err := syscall.BytePtrFromString(attr)
+ if err != nil {
+ return nil, err
+ }
+
+ dest := make([]byte, 128)
+ destBytes := unsafe.Pointer(&dest[0])
+ sz, _, errno := syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(destBytes), uintptr(len(dest)), 0, 0)
+ if errno == syscall.ENODATA {
+ return nil, nil
+ }
+ if errno == syscall.ERANGE {
+ dest = make([]byte, sz)
+ destBytes := unsafe.Pointer(&dest[0])
+ sz, _, errno = syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(destBytes), uintptr(len(dest)), 0, 0)
+ }
+ if errno != 0 {
+ return nil, errno
+ }
+
+ return dest[:sz], nil
+}
+
+var _zero uintptr
+
+// Lsetxattr sets the value of the extended attribute identified by attr
+// and associated with the given path in the file system.
+func Lsetxattr(path string, attr string, data []byte, flags int) error {
+ pathBytes, err := syscall.BytePtrFromString(path)
+ if err != nil {
+ return err
+ }
+ attrBytes, err := syscall.BytePtrFromString(attr)
+ if err != nil {
+ return err
+ }
+ var dataBytes unsafe.Pointer
+ if len(data) > 0 {
+ dataBytes = unsafe.Pointer(&data[0])
+ } else {
+ dataBytes = unsafe.Pointer(&_zero)
+ }
+ _, _, errno := syscall.Syscall6(syscall.SYS_LSETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(dataBytes), uintptr(len(data)), uintptr(flags), 0)
+ if errno != 0 {
+ return errno
+ }
+ return nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/xattrs_unsupported.go b/vendor/github.com/docker/docker/pkg/system/xattrs_unsupported.go
new file mode 100644
index 0000000000..0114f2227c
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/xattrs_unsupported.go
@@ -0,0 +1,13 @@
+// +build !linux
+
+package system
+
+// Lgetxattr is not supported on platforms other than linux.
+func Lgetxattr(path string, attr string) ([]byte, error) {
+ return nil, ErrNotSupportedPlatform
+}
+
+// Lsetxattr is not supported on platforms other than linux.
+func Lsetxattr(path string, attr string, data []byte, flags int) error {
+ return ErrNotSupportedPlatform
+}
diff --git a/vendor/github.com/docker/engine-api/LICENSE b/vendor/github.com/docker/engine-api/LICENSE
new file mode 100644
index 0000000000..c157bff96a
--- /dev/null
+++ b/vendor/github.com/docker/engine-api/LICENSE
@@ -0,0 +1,191 @@
+
+ Apache License
+ Version 2.0, January 2004
+ https://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ Copyright 2015-2016 Docker, Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ https://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/docker/engine-api/types/filters/parse.go b/vendor/github.com/docker/engine-api/types/filters/parse.go
new file mode 100644
index 0000000000..dc2c48b894
--- /dev/null
+++ b/vendor/github.com/docker/engine-api/types/filters/parse.go
@@ -0,0 +1,307 @@
+// Package filters provides helper function to parse and handle command line
+// filter, used for example in docker ps or docker images commands.
+package filters
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "regexp"
+ "strings"
+
+ "github.com/docker/engine-api/types/versions"
+)
+
+// Args stores filter arguments as map key:{map key: bool}.
+// It contains an aggregation of the map of arguments (which are in the form
+// of -f 'key=value') based on the key, and stores values for the same key
+// in a map with string keys and boolean values.
+// e.g given -f 'label=label1=1' -f 'label=label2=2' -f 'image.name=ubuntu'
+// the args will be {"image.name":{"ubuntu":true},"label":{"label1=1":true,"label2=2":true}}
+type Args struct {
+ fields map[string]map[string]bool
+}
+
+// NewArgs initializes a new Args struct.
+func NewArgs() Args {
+ return Args{fields: map[string]map[string]bool{}}
+}
+
+// ParseFlag parses the argument to the filter flag. Like
+//
+// `docker ps -f 'created=today' -f 'image.name=ubuntu*'`
+//
+// If prev map is provided, then it is appended to, and returned. By default a new
+// map is created.
+func ParseFlag(arg string, prev Args) (Args, error) {
+ filters := prev
+ if len(arg) == 0 {
+ return filters, nil
+ }
+
+ if !strings.Contains(arg, "=") {
+ return filters, ErrBadFormat
+ }
+
+ f := strings.SplitN(arg, "=", 2)
+
+ name := strings.ToLower(strings.TrimSpace(f[0]))
+ value := strings.TrimSpace(f[1])
+
+ filters.Add(name, value)
+
+ return filters, nil
+}
+
+// ErrBadFormat is an error returned in case of bad format for a filter.
+var ErrBadFormat = errors.New("bad format of filter (expected name=value)")
+
+// ToParam packs the Args into a string for easy transport from client to server.
+func ToParam(a Args) (string, error) {
+ // this way we don't URL encode {}, just empty space
+ if a.Len() == 0 {
+ return "", nil
+ }
+
+ buf, err := json.Marshal(a.fields)
+ if err != nil {
+ return "", err
+ }
+ return string(buf), nil
+}
+
+// ToParamWithVersion packs the Args into a string for easy transport from client to server.
+// The generated string will depend on the specified version (corresponding to the API version).
+func ToParamWithVersion(version string, a Args) (string, error) {
+ // this way we don't URL encode {}, just empty space
+ if a.Len() == 0 {
+ return "", nil
+ }
+
+ // for daemons older than v1.10, filter must be of the form map[string][]string
+ buf := []byte{}
+ err := errors.New("")
+ if version != "" && versions.LessThan(version, "1.22") {
+ buf, err = json.Marshal(convertArgsToSlice(a.fields))
+ } else {
+ buf, err = json.Marshal(a.fields)
+ }
+ if err != nil {
+ return "", err
+ }
+ return string(buf), nil
+}
+
+// FromParam unpacks the filter Args.
+func FromParam(p string) (Args, error) {
+ if len(p) == 0 {
+ return NewArgs(), nil
+ }
+
+ r := strings.NewReader(p)
+ d := json.NewDecoder(r)
+
+ m := map[string]map[string]bool{}
+ if err := d.Decode(&m); err != nil {
+ r.Seek(0, 0)
+
+ // Allow parsing old arguments in slice format.
+ // Because other libraries might be sending them in this format.
+ deprecated := map[string][]string{}
+ if deprecatedErr := d.Decode(&deprecated); deprecatedErr == nil {
+ m = deprecatedArgs(deprecated)
+ } else {
+ return NewArgs(), err
+ }
+ }
+ return Args{m}, nil
+}
+
+// Get returns the list of values associates with a field.
+// It returns a slice of strings to keep backwards compatibility with old code.
+func (filters Args) Get(field string) []string {
+ values := filters.fields[field]
+ if values == nil {
+ return make([]string, 0)
+ }
+ slice := make([]string, 0, len(values))
+ for key := range values {
+ slice = append(slice, key)
+ }
+ return slice
+}
+
+// Add adds a new value to a filter field.
+func (filters Args) Add(name, value string) {
+ if _, ok := filters.fields[name]; ok {
+ filters.fields[name][value] = true
+ } else {
+ filters.fields[name] = map[string]bool{value: true}
+ }
+}
+
+// Del removes a value from a filter field.
+func (filters Args) Del(name, value string) {
+ if _, ok := filters.fields[name]; ok {
+ delete(filters.fields[name], value)
+ }
+}
+
+// Len returns the number of fields in the arguments.
+func (filters Args) Len() int {
+ return len(filters.fields)
+}
+
+// MatchKVList returns true if the values for the specified field matches the ones
+// from the sources.
+// e.g. given Args are {'label': {'label1=1','label2=1'}, 'image.name', {'ubuntu'}},
+// field is 'label' and sources are {'label1': '1', 'label2': '2'}
+// it returns true.
+func (filters Args) MatchKVList(field string, sources map[string]string) bool {
+ fieldValues := filters.fields[field]
+
+ //do not filter if there is no filter set or cannot determine filter
+ if len(fieldValues) == 0 {
+ return true
+ }
+
+ if sources == nil || len(sources) == 0 {
+ return false
+ }
+
+ for name2match := range fieldValues {
+ testKV := strings.SplitN(name2match, "=", 2)
+
+ v, ok := sources[testKV[0]]
+ if !ok {
+ return false
+ }
+ if len(testKV) == 2 && testKV[1] != v {
+ return false
+ }
+ }
+
+ return true
+}
+
+// Match returns true if the values for the specified field matches the source string
+// e.g. given Args are {'label': {'label1=1','label2=1'}, 'image.name', {'ubuntu'}},
+// field is 'image.name' and source is 'ubuntu'
+// it returns true.
+func (filters Args) Match(field, source string) bool {
+ if filters.ExactMatch(field, source) {
+ return true
+ }
+
+ fieldValues := filters.fields[field]
+ for name2match := range fieldValues {
+ match, err := regexp.MatchString(name2match, source)
+ if err != nil {
+ continue
+ }
+ if match {
+ return true
+ }
+ }
+ return false
+}
+
+// ExactMatch returns true if the source matches exactly one of the filters.
+func (filters Args) ExactMatch(field, source string) bool {
+ fieldValues, ok := filters.fields[field]
+ //do not filter if there is no filter set or cannot determine filter
+ if !ok || len(fieldValues) == 0 {
+ return true
+ }
+
+ // try to match full name value to avoid O(N) regular expression matching
+ return fieldValues[source]
+}
+
+// UniqueExactMatch returns true if there is only one filter and the source matches exactly this one.
+func (filters Args) UniqueExactMatch(field, source string) bool {
+ fieldValues := filters.fields[field]
+ //do not filter if there is no filter set or cannot determine filter
+ if len(fieldValues) == 0 {
+ return true
+ }
+ if len(filters.fields[field]) != 1 {
+ return false
+ }
+
+ // try to match full name value to avoid O(N) regular expression matching
+ return fieldValues[source]
+}
+
+// FuzzyMatch returns true if the source matches exactly one of the filters,
+// or the source has one of the filters as a prefix.
+func (filters Args) FuzzyMatch(field, source string) bool {
+ if filters.ExactMatch(field, source) {
+ return true
+ }
+
+ fieldValues := filters.fields[field]
+ for prefix := range fieldValues {
+ if strings.HasPrefix(source, prefix) {
+ return true
+ }
+ }
+ return false
+}
+
+// Include returns true if the name of the field to filter is in the filters.
+func (filters Args) Include(field string) bool {
+ _, ok := filters.fields[field]
+ return ok
+}
+
+// Validate ensures that all the fields in the filter are valid.
+// It returns an error as soon as it finds an invalid field.
+func (filters Args) Validate(accepted map[string]bool) error {
+ for name := range filters.fields {
+ if !accepted[name] {
+ return fmt.Errorf("Invalid filter '%s'", name)
+ }
+ }
+ return nil
+}
+
+// WalkValues iterates over the list of filtered values for a field.
+// It stops the iteration if it finds an error and it returns that error.
+func (filters Args) WalkValues(field string, op func(value string) error) error {
+ if _, ok := filters.fields[field]; !ok {
+ return nil
+ }
+ for v := range filters.fields[field] {
+ if err := op(v); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func deprecatedArgs(d map[string][]string) map[string]map[string]bool {
+ m := map[string]map[string]bool{}
+ for k, v := range d {
+ values := map[string]bool{}
+ for _, vv := range v {
+ values[vv] = true
+ }
+ m[k] = values
+ }
+ return m
+}
+
+func convertArgsToSlice(f map[string]map[string]bool) map[string][]string {
+ m := map[string][]string{}
+ for k, v := range f {
+ values := []string{}
+ for kk := range v {
+ if v[kk] {
+ values = append(values, kk)
+ }
+ }
+ m[k] = values
+ }
+ return m
+}
diff --git a/vendor/github.com/docker/engine-api/types/versions/README.md b/vendor/github.com/docker/engine-api/types/versions/README.md
new file mode 100644
index 0000000000..76c516e6a3
--- /dev/null
+++ b/vendor/github.com/docker/engine-api/types/versions/README.md
@@ -0,0 +1,14 @@
+## Legacy API type versions
+
+This package includes types for legacy API versions. The stable version of the API types live in `api/types/*.go`.
+
+Consider moving a type here when you need to keep backwards compatibility in the API. This legacy types are organized by the latest API version they appear in. For instance, types in the `v1p19` package are valid for API versions below or equal `1.19`. Types in the `v1p20` package are valid for the API version `1.20`, since the versions below that will use the legacy types in `v1p19`.
+
+### Package name conventions
+
+The package name convention is to use `v` as a prefix for the version number and `p`(patch) as a separator. We use this nomenclature due to a few restrictions in the Go package name convention:
+
+1. We cannot use `.` because it's interpreted by the language, think of `v1.20.CallFunction`.
+2. We cannot use `_` because golint complains abount it. The code is actually valid, but it looks probably more weird: `v1_20.CallFunction`.
+
+For instance, if you want to modify a type that was available in the version `1.21` of the API but it will have different fields in the version `1.22`, you want to create a new package under `api/types/versions/v1p21`.
diff --git a/vendor/github.com/docker/engine-api/types/versions/compare.go b/vendor/github.com/docker/engine-api/types/versions/compare.go
new file mode 100644
index 0000000000..611d4fed66
--- /dev/null
+++ b/vendor/github.com/docker/engine-api/types/versions/compare.go
@@ -0,0 +1,62 @@
+package versions
+
+import (
+ "strconv"
+ "strings"
+)
+
+// compare compares two version strings
+// returns -1 if v1 < v2, 1 if v1 > v2, 0 otherwise.
+func compare(v1, v2 string) int {
+ var (
+ currTab = strings.Split(v1, ".")
+ otherTab = strings.Split(v2, ".")
+ )
+
+ max := len(currTab)
+ if len(otherTab) > max {
+ max = len(otherTab)
+ }
+ for i := 0; i < max; i++ {
+ var currInt, otherInt int
+
+ if len(currTab) > i {
+ currInt, _ = strconv.Atoi(currTab[i])
+ }
+ if len(otherTab) > i {
+ otherInt, _ = strconv.Atoi(otherTab[i])
+ }
+ if currInt > otherInt {
+ return 1
+ }
+ if otherInt > currInt {
+ return -1
+ }
+ }
+ return 0
+}
+
+// LessThan checks if a version is less than another
+func LessThan(v, other string) bool {
+ return compare(v, other) == -1
+}
+
+// LessThanOrEqualTo checks if a version is less than or equal to another
+func LessThanOrEqualTo(v, other string) bool {
+ return compare(v, other) <= 0
+}
+
+// GreaterThan checks if a version is greater than another
+func GreaterThan(v, other string) bool {
+ return compare(v, other) == 1
+}
+
+// GreaterThanOrEqualTo checks if a version is greater than or equal to another
+func GreaterThanOrEqualTo(v, other string) bool {
+ return compare(v, other) >= 0
+}
+
+// Equal checks if a version is equal to another
+func Equal(v, other string) bool {
+ return compare(v, other) == 0
+}
diff --git a/vendor/github.com/docker/go-units/CONTRIBUTING.md b/vendor/github.com/docker/go-units/CONTRIBUTING.md
new file mode 100644
index 0000000000..9ea86d784e
--- /dev/null
+++ b/vendor/github.com/docker/go-units/CONTRIBUTING.md
@@ -0,0 +1,67 @@
+# Contributing to go-units
+
+Want to hack on go-units? Awesome! Here are instructions to get you started.
+
+go-units is a part of the [Docker](https://www.docker.com) project, and follows
+the same rules and principles. If you're already familiar with the way
+Docker does things, you'll feel right at home.
+
+Otherwise, go read Docker's
+[contributions guidelines](https://github.com/docker/docker/blob/master/CONTRIBUTING.md),
+[issue triaging](https://github.com/docker/docker/blob/master/project/ISSUE-TRIAGE.md),
+[review process](https://github.com/docker/docker/blob/master/project/REVIEWING.md) and
+[branches and tags](https://github.com/docker/docker/blob/master/project/BRANCHES-AND-TAGS.md).
+
+### Sign your work
+
+The sign-off is a simple line at the end of the explanation for the patch. Your
+signature certifies that you wrote the patch or otherwise have the right to pass
+it on as an open-source patch. The rules are pretty simple: if you can certify
+the below (from [developercertificate.org](http://developercertificate.org/)):
+
+```
+Developer Certificate of Origin
+Version 1.1
+
+Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
+660 York Street, Suite 102,
+San Francisco, CA 94110 USA
+
+Everyone is permitted to copy and distribute verbatim copies of this
+license document, but changing it is not allowed.
+
+Developer's Certificate of Origin 1.1
+
+By making a contribution to this project, I certify that:
+
+(a) The contribution was created in whole or in part by me and I
+ have the right to submit it under the open source license
+ indicated in the file; or
+
+(b) The contribution is based upon previous work that, to the best
+ of my knowledge, is covered under an appropriate open source
+ license and I have the right under that license to submit that
+ work with modifications, whether created in whole or in part
+ by me, under the same open source license (unless I am
+ permitted to submit under a different license), as indicated
+ in the file; or
+
+(c) The contribution was provided directly to me by some other
+ person who certified (a), (b) or (c) and I have not modified
+ it.
+
+(d) I understand and agree that this project and the contribution
+ are public and that a record of the contribution (including all
+ personal information I submit with it, including my sign-off) is
+ maintained indefinitely and may be redistributed consistent with
+ this project or the open source license(s) involved.
+```
+
+Then you just add a line to every git commit message:
+
+ Signed-off-by: Joe Smith
+
+Use your real name (sorry, no pseudonyms or anonymous contributions.)
+
+If you set your `user.name` and `user.email` git configs, you can sign your
+commit automatically with `git commit -s`.
diff --git a/vendor/github.com/docker/go-units/LICENSE b/vendor/github.com/docker/go-units/LICENSE
new file mode 100644
index 0000000000..b55b37bc31
--- /dev/null
+++ b/vendor/github.com/docker/go-units/LICENSE
@@ -0,0 +1,191 @@
+
+ Apache License
+ Version 2.0, January 2004
+ https://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ Copyright 2015 Docker, Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ https://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/docker/go-units/MAINTAINERS b/vendor/github.com/docker/go-units/MAINTAINERS
new file mode 100644
index 0000000000..477be8b214
--- /dev/null
+++ b/vendor/github.com/docker/go-units/MAINTAINERS
@@ -0,0 +1,27 @@
+# go-connections maintainers file
+#
+# This file describes who runs the docker/go-connections project and how.
+# This is a living document - if you see something out of date or missing, speak up!
+#
+# It is structured to be consumable by both humans and programs.
+# To extract its contents programmatically, use any TOML-compliant parser.
+#
+# This file is compiled into the MAINTAINERS file in docker/opensource.
+#
+[Org]
+ [Org."Core maintainers"]
+ people = [
+ "calavera",
+ ]
+
+[people]
+
+# A reference list of all people associated with the project.
+# All other sections should refer to people by their canonical key
+# in the people section.
+
+ # ADD YOURSELF HERE IN ALPHABETICAL ORDER
+ [people.calavera]
+ Name = "David Calavera"
+ Email = "david.calavera@gmail.com"
+ GitHub = "calavera"
diff --git a/vendor/github.com/docker/go-units/README.md b/vendor/github.com/docker/go-units/README.md
new file mode 100644
index 0000000000..4f70a4e134
--- /dev/null
+++ b/vendor/github.com/docker/go-units/README.md
@@ -0,0 +1,16 @@
+[![GoDoc](https://godoc.org/github.com/docker/go-units?status.svg)](https://godoc.org/github.com/docker/go-units)
+
+# Introduction
+
+go-units is a library to transform human friendly measurements into machine friendly values.
+
+## Usage
+
+See the [docs in godoc](https://godoc.org/github.com/docker/go-units) for examples and documentation.
+
+## Copyright and license
+
+Copyright © 2015 Docker, Inc.
+
+go-units is licensed under the Apache License, Version 2.0.
+See [LICENSE](LICENSE) for the full text of the license.
diff --git a/vendor/github.com/docker/go-units/circle.yml b/vendor/github.com/docker/go-units/circle.yml
new file mode 100644
index 0000000000..9043b35478
--- /dev/null
+++ b/vendor/github.com/docker/go-units/circle.yml
@@ -0,0 +1,11 @@
+dependencies:
+ post:
+ # install golint
+ - go get github.com/golang/lint/golint
+
+test:
+ pre:
+ # run analysis before tests
+ - go vet ./...
+ - test -z "$(golint ./... | tee /dev/stderr)"
+ - test -z "$(gofmt -s -l . | tee /dev/stderr)"
diff --git a/vendor/github.com/docker/go-units/duration.go b/vendor/github.com/docker/go-units/duration.go
new file mode 100644
index 0000000000..c219a8a968
--- /dev/null
+++ b/vendor/github.com/docker/go-units/duration.go
@@ -0,0 +1,33 @@
+// Package units provides helper function to parse and print size and time units
+// in human-readable format.
+package units
+
+import (
+ "fmt"
+ "time"
+)
+
+// HumanDuration returns a human-readable approximation of a duration
+// (eg. "About a minute", "4 hours ago", etc.).
+func HumanDuration(d time.Duration) string {
+ if seconds := int(d.Seconds()); seconds < 1 {
+ return "Less than a second"
+ } else if seconds < 60 {
+ return fmt.Sprintf("%d seconds", seconds)
+ } else if minutes := int(d.Minutes()); minutes == 1 {
+ return "About a minute"
+ } else if minutes < 60 {
+ return fmt.Sprintf("%d minutes", minutes)
+ } else if hours := int(d.Hours()); hours == 1 {
+ return "About an hour"
+ } else if hours < 48 {
+ return fmt.Sprintf("%d hours", hours)
+ } else if hours < 24*7*2 {
+ return fmt.Sprintf("%d days", hours/24)
+ } else if hours < 24*30*3 {
+ return fmt.Sprintf("%d weeks", hours/24/7)
+ } else if hours < 24*365*2 {
+ return fmt.Sprintf("%d months", hours/24/30)
+ }
+ return fmt.Sprintf("%d years", int(d.Hours())/24/365)
+}
diff --git a/vendor/github.com/docker/go-units/size.go b/vendor/github.com/docker/go-units/size.go
new file mode 100644
index 0000000000..f5b82ea246
--- /dev/null
+++ b/vendor/github.com/docker/go-units/size.go
@@ -0,0 +1,96 @@
+package units
+
+import (
+ "fmt"
+ "regexp"
+ "strconv"
+ "strings"
+)
+
+// See: http://en.wikipedia.org/wiki/Binary_prefix
+const (
+ // Decimal
+
+ KB = 1000
+ MB = 1000 * KB
+ GB = 1000 * MB
+ TB = 1000 * GB
+ PB = 1000 * TB
+
+ // Binary
+
+ KiB = 1024
+ MiB = 1024 * KiB
+ GiB = 1024 * MiB
+ TiB = 1024 * GiB
+ PiB = 1024 * TiB
+)
+
+type unitMap map[string]int64
+
+var (
+ decimalMap = unitMap{"k": KB, "m": MB, "g": GB, "t": TB, "p": PB}
+ binaryMap = unitMap{"k": KiB, "m": MiB, "g": GiB, "t": TiB, "p": PiB}
+ sizeRegex = regexp.MustCompile(`^(\d+(\.\d+)*) ?([kKmMgGtTpP])?[bB]?$`)
+)
+
+var decimapAbbrs = []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"}
+var binaryAbbrs = []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"}
+
+// CustomSize returns a human-readable approximation of a size
+// using custom format.
+func CustomSize(format string, size float64, base float64, _map []string) string {
+ i := 0
+ unitsLimit := len(_map) - 1
+ for size >= base && i < unitsLimit {
+ size = size / base
+ i++
+ }
+ return fmt.Sprintf(format, size, _map[i])
+}
+
+// HumanSize returns a human-readable approximation of a size
+// capped at 4 valid numbers (eg. "2.746 MB", "796 KB").
+func HumanSize(size float64) string {
+ return CustomSize("%.4g %s", size, 1000.0, decimapAbbrs)
+}
+
+// BytesSize returns a human-readable size in bytes, kibibytes,
+// mebibytes, gibibytes, or tebibytes (eg. "44kiB", "17MiB").
+func BytesSize(size float64) string {
+ return CustomSize("%.4g %s", size, 1024.0, binaryAbbrs)
+}
+
+// FromHumanSize returns an integer from a human-readable specification of a
+// size using SI standard (eg. "44kB", "17MB").
+func FromHumanSize(size string) (int64, error) {
+ return parseSize(size, decimalMap)
+}
+
+// RAMInBytes parses a human-readable string representing an amount of RAM
+// in bytes, kibibytes, mebibytes, gibibytes, or tebibytes and
+// returns the number of bytes, or -1 if the string is unparseable.
+// Units are case-insensitive, and the 'b' suffix is optional.
+func RAMInBytes(size string) (int64, error) {
+ return parseSize(size, binaryMap)
+}
+
+// Parses the human-readable size string into the amount it represents.
+func parseSize(sizeStr string, uMap unitMap) (int64, error) {
+ matches := sizeRegex.FindStringSubmatch(sizeStr)
+ if len(matches) != 4 {
+ return -1, fmt.Errorf("invalid size: '%s'", sizeStr)
+ }
+
+ size, err := strconv.ParseFloat(matches[1], 64)
+ if err != nil {
+ return -1, err
+ }
+
+ unitPrefix := strings.ToLower(matches[3])
+ if mul, ok := uMap[unitPrefix]; ok {
+ size *= float64(mul)
+ }
+
+ return int64(size), nil
+}
diff --git a/vendor/github.com/docker/go-units/ulimit.go b/vendor/github.com/docker/go-units/ulimit.go
new file mode 100644
index 0000000000..5ac7fd825f
--- /dev/null
+++ b/vendor/github.com/docker/go-units/ulimit.go
@@ -0,0 +1,118 @@
+package units
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+// Ulimit is a human friendly version of Rlimit.
+type Ulimit struct {
+ Name string
+ Hard int64
+ Soft int64
+}
+
+// Rlimit specifies the resource limits, such as max open files.
+type Rlimit struct {
+ Type int `json:"type,omitempty"`
+ Hard uint64 `json:"hard,omitempty"`
+ Soft uint64 `json:"soft,omitempty"`
+}
+
+const (
+ // magic numbers for making the syscall
+ // some of these are defined in the syscall package, but not all.
+ // Also since Windows client doesn't get access to the syscall package, need to
+ // define these here
+ rlimitAs = 9
+ rlimitCore = 4
+ rlimitCPU = 0
+ rlimitData = 2
+ rlimitFsize = 1
+ rlimitLocks = 10
+ rlimitMemlock = 8
+ rlimitMsgqueue = 12
+ rlimitNice = 13
+ rlimitNofile = 7
+ rlimitNproc = 6
+ rlimitRss = 5
+ rlimitRtprio = 14
+ rlimitRttime = 15
+ rlimitSigpending = 11
+ rlimitStack = 3
+)
+
+var ulimitNameMapping = map[string]int{
+ //"as": rlimitAs, // Disabled since this doesn't seem usable with the way Docker inits a container.
+ "core": rlimitCore,
+ "cpu": rlimitCPU,
+ "data": rlimitData,
+ "fsize": rlimitFsize,
+ "locks": rlimitLocks,
+ "memlock": rlimitMemlock,
+ "msgqueue": rlimitMsgqueue,
+ "nice": rlimitNice,
+ "nofile": rlimitNofile,
+ "nproc": rlimitNproc,
+ "rss": rlimitRss,
+ "rtprio": rlimitRtprio,
+ "rttime": rlimitRttime,
+ "sigpending": rlimitSigpending,
+ "stack": rlimitStack,
+}
+
+// ParseUlimit parses and returns a Ulimit from the specified string.
+func ParseUlimit(val string) (*Ulimit, error) {
+ parts := strings.SplitN(val, "=", 2)
+ if len(parts) != 2 {
+ return nil, fmt.Errorf("invalid ulimit argument: %s", val)
+ }
+
+ if _, exists := ulimitNameMapping[parts[0]]; !exists {
+ return nil, fmt.Errorf("invalid ulimit type: %s", parts[0])
+ }
+
+ var (
+ soft int64
+ hard = &soft // default to soft in case no hard was set
+ temp int64
+ err error
+ )
+ switch limitVals := strings.Split(parts[1], ":"); len(limitVals) {
+ case 2:
+ temp, err = strconv.ParseInt(limitVals[1], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ hard = &temp
+ fallthrough
+ case 1:
+ soft, err = strconv.ParseInt(limitVals[0], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ default:
+ return nil, fmt.Errorf("too many limit value arguments - %s, can only have up to two, `soft[:hard]`", parts[1])
+ }
+
+ if soft > *hard {
+ return nil, fmt.Errorf("ulimit soft limit must be less than or equal to hard limit: %d > %d", soft, *hard)
+ }
+
+ return &Ulimit{Name: parts[0], Soft: soft, Hard: *hard}, nil
+}
+
+// GetRlimit returns the RLimit corresponding to Ulimit.
+func (u *Ulimit) GetRlimit() (*Rlimit, error) {
+ t, exists := ulimitNameMapping[u.Name]
+ if !exists {
+ return nil, fmt.Errorf("invalid ulimit name %s", u.Name)
+ }
+
+ return &Rlimit{Type: t, Soft: uint64(u.Soft), Hard: uint64(u.Hard)}, nil
+}
+
+func (u *Ulimit) String() string {
+ return fmt.Sprintf("%s=%d:%d", u.Name, u.Soft, u.Hard)
+}
diff --git a/vendor/github.com/fsnotify/fsnotify/AUTHORS b/vendor/github.com/fsnotify/fsnotify/AUTHORS
new file mode 100644
index 0000000000..0a5bf8f617
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/AUTHORS
@@ -0,0 +1,46 @@
+# Names should be added to this file as
+# Name or Organization
+# The email address is not required for organizations.
+
+# You can update this list using the following command:
+#
+# $ git shortlog -se | awk '{print $2 " " $3 " " $4}'
+
+# Please keep the list sorted.
+
+Adrien Bustany
+Amit Krishnan
+Bjørn Erik Pedersen
+Bruno Bigras
+Caleb Spare
+Case Nelson
+Chris Howey
+Christoffer Buchholz
+Daniel Wagner-Hall
+Dave Cheney
+Evan Phoenix
+Francisco Souza
+Hari haran
+John C Barstow
+Kelvin Fo
+Ken-ichirou MATSUZAWA
+Matt Layher
+Nathan Youngman
+Patrick
+Paul Hammond
+Pawel Knap
+Pieter Droogendijk
+Pursuit92
+Riku Voipio
+Rob Figueiredo
+Slawek Ligus
+Soge Zhang
+Tiffany Jernigan
+Tilak Sharma
+Travis Cline
+Tudor Golubenco
+Yukang
+bronze1man
+debrando
+henrikedwards
+铁哥
diff --git a/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md b/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md
new file mode 100644
index 0000000000..8c732c1d85
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md
@@ -0,0 +1,307 @@
+# Changelog
+
+## v1.4.2 / 2016-10-10
+
+* Linux: use InotifyInit1 with IN_CLOEXEC to stop leaking a file descriptor to a child process when using fork/exec [#178](https://github.com/fsnotify/fsnotify/pull/178) (thanks @pattyshack)
+
+## v1.4.1 / 2016-10-04
+
+* Fix flaky inotify stress test on Linux [#177](https://github.com/fsnotify/fsnotify/pull/177) (thanks @pattyshack)
+
+## v1.4.0 / 2016-10-01
+
+* add a String() method to Event.Op [#165](https://github.com/fsnotify/fsnotify/pull/165) (thanks @oozie)
+
+## v1.3.1 / 2016-06-28
+
+* Windows: fix for double backslash when watching the root of a drive [#151](https://github.com/fsnotify/fsnotify/issues/151) (thanks @brunoqc)
+
+## v1.3.0 / 2016-04-19
+
+* Support linux/arm64 by [patching](https://go-review.googlesource.com/#/c/21971/) x/sys/unix and switching to to it from syscall (thanks @suihkulokki) [#135](https://github.com/fsnotify/fsnotify/pull/135)
+
+## v1.2.10 / 2016-03-02
+
+* Fix golint errors in windows.go [#121](https://github.com/fsnotify/fsnotify/pull/121) (thanks @tiffanyfj)
+
+## v1.2.9 / 2016-01-13
+
+kqueue: Fix logic for CREATE after REMOVE [#111](https://github.com/fsnotify/fsnotify/pull/111) (thanks @bep)
+
+## v1.2.8 / 2015-12-17
+
+* kqueue: fix race condition in Close [#105](https://github.com/fsnotify/fsnotify/pull/105) (thanks @djui for reporting the issue and @ppknap for writing a failing test)
+* inotify: fix race in test
+* enable race detection for continuous integration (Linux, Mac, Windows)
+
+## v1.2.5 / 2015-10-17
+
+* inotify: use epoll_create1 for arm64 support (requires Linux 2.6.27 or later) [#100](https://github.com/fsnotify/fsnotify/pull/100) (thanks @suihkulokki)
+* inotify: fix path leaks [#73](https://github.com/fsnotify/fsnotify/pull/73) (thanks @chamaken)
+* kqueue: watch for rename events on subdirectories [#83](https://github.com/fsnotify/fsnotify/pull/83) (thanks @guotie)
+* kqueue: avoid infinite loops from symlinks cycles [#101](https://github.com/fsnotify/fsnotify/pull/101) (thanks @illicitonion)
+
+## v1.2.1 / 2015-10-14
+
+* kqueue: don't watch named pipes [#98](https://github.com/fsnotify/fsnotify/pull/98) (thanks @evanphx)
+
+## v1.2.0 / 2015-02-08
+
+* inotify: use epoll to wake up readEvents [#66](https://github.com/fsnotify/fsnotify/pull/66) (thanks @PieterD)
+* inotify: closing watcher should now always shut down goroutine [#63](https://github.com/fsnotify/fsnotify/pull/63) (thanks @PieterD)
+* kqueue: close kqueue after removing watches, fixes [#59](https://github.com/fsnotify/fsnotify/issues/59)
+
+## v1.1.1 / 2015-02-05
+
+* inotify: Retry read on EINTR [#61](https://github.com/fsnotify/fsnotify/issues/61) (thanks @PieterD)
+
+## v1.1.0 / 2014-12-12
+
+* kqueue: rework internals [#43](https://github.com/fsnotify/fsnotify/pull/43)
+ * add low-level functions
+ * only need to store flags on directories
+ * less mutexes [#13](https://github.com/fsnotify/fsnotify/issues/13)
+ * done can be an unbuffered channel
+ * remove calls to os.NewSyscallError
+* More efficient string concatenation for Event.String() [#52](https://github.com/fsnotify/fsnotify/pull/52) (thanks @mdlayher)
+* kqueue: fix regression in rework causing subdirectories to be watched [#48](https://github.com/fsnotify/fsnotify/issues/48)
+* kqueue: cleanup internal watch before sending remove event [#51](https://github.com/fsnotify/fsnotify/issues/51)
+
+## v1.0.4 / 2014-09-07
+
+* kqueue: add dragonfly to the build tags.
+* Rename source code files, rearrange code so exported APIs are at the top.
+* Add done channel to example code. [#37](https://github.com/fsnotify/fsnotify/pull/37) (thanks @chenyukang)
+
+## v1.0.3 / 2014-08-19
+
+* [Fix] Windows MOVED_TO now translates to Create like on BSD and Linux. [#36](https://github.com/fsnotify/fsnotify/issues/36)
+
+## v1.0.2 / 2014-08-17
+
+* [Fix] Missing create events on macOS. [#14](https://github.com/fsnotify/fsnotify/issues/14) (thanks @zhsso)
+* [Fix] Make ./path and path equivalent. (thanks @zhsso)
+
+## v1.0.0 / 2014-08-15
+
+* [API] Remove AddWatch on Windows, use Add.
+* Improve documentation for exported identifiers. [#30](https://github.com/fsnotify/fsnotify/issues/30)
+* Minor updates based on feedback from golint.
+
+## dev / 2014-07-09
+
+* Moved to [github.com/fsnotify/fsnotify](https://github.com/fsnotify/fsnotify).
+* Use os.NewSyscallError instead of returning errno (thanks @hariharan-uno)
+
+## dev / 2014-07-04
+
+* kqueue: fix incorrect mutex used in Close()
+* Update example to demonstrate usage of Op.
+
+## dev / 2014-06-28
+
+* [API] Don't set the Write Op for attribute notifications [#4](https://github.com/fsnotify/fsnotify/issues/4)
+* Fix for String() method on Event (thanks Alex Brainman)
+* Don't build on Plan 9 or Solaris (thanks @4ad)
+
+## dev / 2014-06-21
+
+* Events channel of type Event rather than *Event.
+* [internal] use syscall constants directly for inotify and kqueue.
+* [internal] kqueue: rename events to kevents and fileEvent to event.
+
+## dev / 2014-06-19
+
+* Go 1.3+ required on Windows (uses syscall.ERROR_MORE_DATA internally).
+* [internal] remove cookie from Event struct (unused).
+* [internal] Event struct has the same definition across every OS.
+* [internal] remove internal watch and removeWatch methods.
+
+## dev / 2014-06-12
+
+* [API] Renamed Watch() to Add() and RemoveWatch() to Remove().
+* [API] Pluralized channel names: Events and Errors.
+* [API] Renamed FileEvent struct to Event.
+* [API] Op constants replace methods like IsCreate().
+
+## dev / 2014-06-12
+
+* Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98)
+
+## dev / 2014-05-23
+
+* [API] Remove current implementation of WatchFlags.
+ * current implementation doesn't take advantage of OS for efficiency
+ * provides little benefit over filtering events as they are received, but has extra bookkeeping and mutexes
+ * no tests for the current implementation
+ * not fully implemented on Windows [#93](https://github.com/howeyc/fsnotify/issues/93#issuecomment-39285195)
+
+## v0.9.3 / 2014-12-31
+
+* kqueue: cleanup internal watch before sending remove event [#51](https://github.com/fsnotify/fsnotify/issues/51)
+
+## v0.9.2 / 2014-08-17
+
+* [Backport] Fix missing create events on macOS. [#14](https://github.com/fsnotify/fsnotify/issues/14) (thanks @zhsso)
+
+## v0.9.1 / 2014-06-12
+
+* Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98)
+
+## v0.9.0 / 2014-01-17
+
+* IsAttrib() for events that only concern a file's metadata [#79][] (thanks @abustany)
+* [Fix] kqueue: fix deadlock [#77][] (thanks @cespare)
+* [NOTICE] Development has moved to `code.google.com/p/go.exp/fsnotify` in preparation for inclusion in the Go standard library.
+
+## v0.8.12 / 2013-11-13
+
+* [API] Remove FD_SET and friends from Linux adapter
+
+## v0.8.11 / 2013-11-02
+
+* [Doc] Add Changelog [#72][] (thanks @nathany)
+* [Doc] Spotlight and double modify events on macOS [#62][] (reported by @paulhammond)
+
+## v0.8.10 / 2013-10-19
+
+* [Fix] kqueue: remove file watches when parent directory is removed [#71][] (reported by @mdwhatcott)
+* [Fix] kqueue: race between Close and readEvents [#70][] (reported by @bernerdschaefer)
+* [Doc] specify OS-specific limits in README (thanks @debrando)
+
+## v0.8.9 / 2013-09-08
+
+* [Doc] Contributing (thanks @nathany)
+* [Doc] update package path in example code [#63][] (thanks @paulhammond)
+* [Doc] GoCI badge in README (Linux only) [#60][]
+* [Doc] Cross-platform testing with Vagrant [#59][] (thanks @nathany)
+
+## v0.8.8 / 2013-06-17
+
+* [Fix] Windows: handle `ERROR_MORE_DATA` on Windows [#49][] (thanks @jbowtie)
+
+## v0.8.7 / 2013-06-03
+
+* [API] Make syscall flags internal
+* [Fix] inotify: ignore event changes
+* [Fix] race in symlink test [#45][] (reported by @srid)
+* [Fix] tests on Windows
+* lower case error messages
+
+## v0.8.6 / 2013-05-23
+
+* kqueue: Use EVT_ONLY flag on Darwin
+* [Doc] Update README with full example
+
+## v0.8.5 / 2013-05-09
+
+* [Fix] inotify: allow monitoring of "broken" symlinks (thanks @tsg)
+
+## v0.8.4 / 2013-04-07
+
+* [Fix] kqueue: watch all file events [#40][] (thanks @ChrisBuchholz)
+
+## v0.8.3 / 2013-03-13
+
+* [Fix] inoitfy/kqueue memory leak [#36][] (reported by @nbkolchin)
+* [Fix] kqueue: use fsnFlags for watching a directory [#33][] (reported by @nbkolchin)
+
+## v0.8.2 / 2013-02-07
+
+* [Doc] add Authors
+* [Fix] fix data races for map access [#29][] (thanks @fsouza)
+
+## v0.8.1 / 2013-01-09
+
+* [Fix] Windows path separators
+* [Doc] BSD License
+
+## v0.8.0 / 2012-11-09
+
+* kqueue: directory watching improvements (thanks @vmirage)
+* inotify: add `IN_MOVED_TO` [#25][] (requested by @cpisto)
+* [Fix] kqueue: deleting watched directory [#24][] (reported by @jakerr)
+
+## v0.7.4 / 2012-10-09
+
+* [Fix] inotify: fixes from https://codereview.appspot.com/5418045/ (ugorji)
+* [Fix] kqueue: preserve watch flags when watching for delete [#21][] (reported by @robfig)
+* [Fix] kqueue: watch the directory even if it isn't a new watch (thanks @robfig)
+* [Fix] kqueue: modify after recreation of file
+
+## v0.7.3 / 2012-09-27
+
+* [Fix] kqueue: watch with an existing folder inside the watched folder (thanks @vmirage)
+* [Fix] kqueue: no longer get duplicate CREATE events
+
+## v0.7.2 / 2012-09-01
+
+* kqueue: events for created directories
+
+## v0.7.1 / 2012-07-14
+
+* [Fix] for renaming files
+
+## v0.7.0 / 2012-07-02
+
+* [Feature] FSNotify flags
+* [Fix] inotify: Added file name back to event path
+
+## v0.6.0 / 2012-06-06
+
+* kqueue: watch files after directory created (thanks @tmc)
+
+## v0.5.1 / 2012-05-22
+
+* [Fix] inotify: remove all watches before Close()
+
+## v0.5.0 / 2012-05-03
+
+* [API] kqueue: return errors during watch instead of sending over channel
+* kqueue: match symlink behavior on Linux
+* inotify: add `DELETE_SELF` (requested by @taralx)
+* [Fix] kqueue: handle EINTR (reported by @robfig)
+* [Doc] Godoc example [#1][] (thanks @davecheney)
+
+## v0.4.0 / 2012-03-30
+
+* Go 1 released: build with go tool
+* [Feature] Windows support using winfsnotify
+* Windows does not have attribute change notifications
+* Roll attribute notifications into IsModify
+
+## v0.3.0 / 2012-02-19
+
+* kqueue: add files when watch directory
+
+## v0.2.0 / 2011-12-30
+
+* update to latest Go weekly code
+
+## v0.1.0 / 2011-10-19
+
+* kqueue: add watch on file creation to match inotify
+* kqueue: create file event
+* inotify: ignore `IN_IGNORED` events
+* event String()
+* linux: common FileEvent functions
+* initial commit
+
+[#79]: https://github.com/howeyc/fsnotify/pull/79
+[#77]: https://github.com/howeyc/fsnotify/pull/77
+[#72]: https://github.com/howeyc/fsnotify/issues/72
+[#71]: https://github.com/howeyc/fsnotify/issues/71
+[#70]: https://github.com/howeyc/fsnotify/issues/70
+[#63]: https://github.com/howeyc/fsnotify/issues/63
+[#62]: https://github.com/howeyc/fsnotify/issues/62
+[#60]: https://github.com/howeyc/fsnotify/issues/60
+[#59]: https://github.com/howeyc/fsnotify/issues/59
+[#49]: https://github.com/howeyc/fsnotify/issues/49
+[#45]: https://github.com/howeyc/fsnotify/issues/45
+[#40]: https://github.com/howeyc/fsnotify/issues/40
+[#36]: https://github.com/howeyc/fsnotify/issues/36
+[#33]: https://github.com/howeyc/fsnotify/issues/33
+[#29]: https://github.com/howeyc/fsnotify/issues/29
+[#25]: https://github.com/howeyc/fsnotify/issues/25
+[#24]: https://github.com/howeyc/fsnotify/issues/24
+[#21]: https://github.com/howeyc/fsnotify/issues/21
diff --git a/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md b/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md
new file mode 100644
index 0000000000..828a60b24b
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md
@@ -0,0 +1,77 @@
+# Contributing
+
+## Issues
+
+* Request features and report bugs using the [GitHub Issue Tracker](https://github.com/fsnotify/fsnotify/issues).
+* Please indicate the platform you are using fsnotify on.
+* A code example to reproduce the problem is appreciated.
+
+## Pull Requests
+
+### Contributor License Agreement
+
+fsnotify is derived from code in the [golang.org/x/exp](https://godoc.org/golang.org/x/exp) package and it may be included [in the standard library](https://github.com/fsnotify/fsnotify/issues/1) in the future. Therefore fsnotify carries the same [LICENSE](https://github.com/fsnotify/fsnotify/blob/master/LICENSE) as Go. Contributors retain their copyright, so you need to fill out a short form before we can accept your contribution: [Google Individual Contributor License Agreement](https://developers.google.com/open-source/cla/individual).
+
+Please indicate that you have signed the CLA in your pull request.
+
+### How fsnotify is Developed
+
+* Development is done on feature branches.
+* Tests are run on BSD, Linux, macOS and Windows.
+* Pull requests are reviewed and [applied to master][am] using [hub][].
+ * Maintainers may modify or squash commits rather than asking contributors to.
+* To issue a new release, the maintainers will:
+ * Update the CHANGELOG
+ * Tag a version, which will become available through gopkg.in.
+
+### How to Fork
+
+For smooth sailing, always use the original import path. Installing with `go get` makes this easy.
+
+1. Install from GitHub (`go get -u github.com/fsnotify/fsnotify`)
+2. Create your feature branch (`git checkout -b my-new-feature`)
+3. Ensure everything works and the tests pass (see below)
+4. Commit your changes (`git commit -am 'Add some feature'`)
+
+Contribute upstream:
+
+1. Fork fsnotify on GitHub
+2. Add your remote (`git remote add fork git@github.com:mycompany/repo.git`)
+3. Push to the branch (`git push fork my-new-feature`)
+4. Create a new Pull Request on GitHub
+
+This workflow is [thoroughly explained by Katrina Owen](https://splice.com/blog/contributing-open-source-git-repositories-go/).
+
+### Testing
+
+fsnotify uses build tags to compile different code on Linux, BSD, macOS, and Windows.
+
+Before doing a pull request, please do your best to test your changes on multiple platforms, and list which platforms you were able/unable to test on.
+
+To aid in cross-platform testing there is a Vagrantfile for Linux and BSD.
+
+* Install [Vagrant](http://www.vagrantup.com/) and [VirtualBox](https://www.virtualbox.org/)
+* Setup [Vagrant Gopher](https://github.com/nathany/vagrant-gopher) in your `src` folder.
+* Run `vagrant up` from the project folder. You can also setup just one box with `vagrant up linux` or `vagrant up bsd` (note: the BSD box doesn't support Windows hosts at this time, and NFS may prompt for your host OS password)
+* Once setup, you can run the test suite on a given OS with a single command `vagrant ssh linux -c 'cd fsnotify/fsnotify; go test'`.
+* When you're done, you will want to halt or destroy the Vagrant boxes.
+
+Notice: fsnotify file system events won't trigger in shared folders. The tests get around this limitation by using the /tmp directory.
+
+Right now there is no equivalent solution for Windows and macOS, but there are Windows VMs [freely available from Microsoft](http://www.modern.ie/en-us/virtualization-tools#downloads).
+
+### Maintainers
+
+Help maintaining fsnotify is welcome. To be a maintainer:
+
+* Submit a pull request and sign the CLA as above.
+* You must be able to run the test suite on Mac, Windows, Linux and BSD.
+
+To keep master clean, the fsnotify project uses the "apply mail" workflow outlined in Nathaniel Talbott's post ["Merge pull request" Considered Harmful][am]. This requires installing [hub][].
+
+All code changes should be internal pull requests.
+
+Releases are tagged using [Semantic Versioning](http://semver.org/).
+
+[hub]: https://github.com/github/hub
+[am]: http://blog.spreedly.com/2014/06/24/merge-pull-request-considered-harmful/#.VGa5yZPF_Zs
diff --git a/vendor/github.com/fsnotify/fsnotify/LICENSE b/vendor/github.com/fsnotify/fsnotify/LICENSE
new file mode 100644
index 0000000000..f21e540800
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/LICENSE
@@ -0,0 +1,28 @@
+Copyright (c) 2012 The Go Authors. All rights reserved.
+Copyright (c) 2012 fsnotify Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/fsnotify/fsnotify/README.md b/vendor/github.com/fsnotify/fsnotify/README.md
new file mode 100644
index 0000000000..3993207413
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/README.md
@@ -0,0 +1,79 @@
+# File system notifications for Go
+
+[![GoDoc](https://godoc.org/github.com/fsnotify/fsnotify?status.svg)](https://godoc.org/github.com/fsnotify/fsnotify) [![Go Report Card](https://goreportcard.com/badge/github.com/fsnotify/fsnotify)](https://goreportcard.com/report/github.com/fsnotify/fsnotify)
+
+fsnotify utilizes [golang.org/x/sys](https://godoc.org/golang.org/x/sys) rather than `syscall` from the standard library. Ensure you have the latest version installed by running:
+
+```console
+go get -u golang.org/x/sys/...
+```
+
+Cross platform: Windows, Linux, BSD and macOS.
+
+|Adapter |OS |Status |
+|----------|----------|----------|
+|inotify |Linux 2.6.27 or later, Android\*|Supported [![Build Status](https://travis-ci.org/fsnotify/fsnotify.svg?branch=master)](https://travis-ci.org/fsnotify/fsnotify)|
+|kqueue |BSD, macOS, iOS\*|Supported [![Build Status](https://travis-ci.org/fsnotify/fsnotify.svg?branch=master)](https://travis-ci.org/fsnotify/fsnotify)|
+|ReadDirectoryChangesW|Windows|Supported [![Build status](https://ci.appveyor.com/api/projects/status/ivwjubaih4r0udeh/branch/master?svg=true)](https://ci.appveyor.com/project/NathanYoungman/fsnotify/branch/master)|
+|FSEvents |macOS |[Planned](https://github.com/fsnotify/fsnotify/issues/11)|
+|FEN |Solaris 11 |[In Progress](https://github.com/fsnotify/fsnotify/issues/12)|
+|fanotify |Linux 2.6.37+ | |
+|USN Journals |Windows |[Maybe](https://github.com/fsnotify/fsnotify/issues/53)|
+|Polling |*All* |[Maybe](https://github.com/fsnotify/fsnotify/issues/9)|
+
+\* Android and iOS are untested.
+
+Please see [the documentation](https://godoc.org/github.com/fsnotify/fsnotify) and consult the [FAQ](#faq) for usage information.
+
+## API stability
+
+fsnotify is a fork of [howeyc/fsnotify](https://godoc.org/github.com/howeyc/fsnotify) with a new API as of v1.0. The API is based on [this design document](http://goo.gl/MrYxyA).
+
+All [releases](https://github.com/fsnotify/fsnotify/releases) are tagged based on [Semantic Versioning](http://semver.org/). Further API changes are [planned](https://github.com/fsnotify/fsnotify/milestones), and will be tagged with a new major revision number.
+
+Go 1.6 supports dependencies located in the `vendor/` folder. Unless you are creating a library, it is recommended that you copy fsnotify into `vendor/github.com/fsnotify/fsnotify` within your project, and likewise for `golang.org/x/sys`.
+
+## Contributing
+
+Please refer to [CONTRIBUTING][] before opening an issue or pull request.
+
+## Example
+
+See [example_test.go](https://github.com/fsnotify/fsnotify/blob/master/example_test.go).
+
+## FAQ
+
+**When a file is moved to another directory is it still being watched?**
+
+No (it shouldn't be, unless you are watching where it was moved to).
+
+**When I watch a directory, are all subdirectories watched as well?**
+
+No, you must add watches for any directory you want to watch (a recursive watcher is on the roadmap [#18][]).
+
+**Do I have to watch the Error and Event channels in a separate goroutine?**
+
+As of now, yes. Looking into making this single-thread friendly (see [howeyc #7][#7])
+
+**Why am I receiving multiple events for the same file on OS X?**
+
+Spotlight indexing on OS X can result in multiple events (see [howeyc #62][#62]). A temporary workaround is to add your folder(s) to the *Spotlight Privacy settings* until we have a native FSEvents implementation (see [#11][]).
+
+**How many files can be watched at once?**
+
+There are OS-specific limits as to how many watches can be created:
+* Linux: /proc/sys/fs/inotify/max_user_watches contains the limit, reaching this limit results in a "no space left on device" error.
+* BSD / OSX: sysctl variables "kern.maxfiles" and "kern.maxfilesperproc", reaching these limits results in a "too many open files" error.
+
+[#62]: https://github.com/howeyc/fsnotify/issues/62
+[#18]: https://github.com/fsnotify/fsnotify/issues/18
+[#11]: https://github.com/fsnotify/fsnotify/issues/11
+[#7]: https://github.com/howeyc/fsnotify/issues/7
+
+[contributing]: https://github.com/fsnotify/fsnotify/blob/master/CONTRIBUTING.md
+
+## Related Projects
+
+* [notify](https://github.com/rjeczalik/notify)
+* [fsevents](https://github.com/fsnotify/fsevents)
+
diff --git a/vendor/github.com/fsnotify/fsnotify/fen.go b/vendor/github.com/fsnotify/fsnotify/fen.go
new file mode 100644
index 0000000000..ced39cb881
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/fen.go
@@ -0,0 +1,37 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build solaris
+
+package fsnotify
+
+import (
+ "errors"
+)
+
+// Watcher watches a set of files, delivering events to a channel.
+type Watcher struct {
+ Events chan Event
+ Errors chan error
+}
+
+// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events.
+func NewWatcher() (*Watcher, error) {
+ return nil, errors.New("FEN based watcher not yet supported for fsnotify\n")
+}
+
+// Close removes all watches and closes the events channel.
+func (w *Watcher) Close() error {
+ return nil
+}
+
+// Add starts watching the named file or directory (non-recursively).
+func (w *Watcher) Add(name string) error {
+ return nil
+}
+
+// Remove stops watching the the named file or directory (non-recursively).
+func (w *Watcher) Remove(name string) error {
+ return nil
+}
diff --git a/vendor/github.com/fsnotify/fsnotify/fsnotify.go b/vendor/github.com/fsnotify/fsnotify/fsnotify.go
new file mode 100644
index 0000000000..190bf0de57
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/fsnotify.go
@@ -0,0 +1,66 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !plan9
+
+// Package fsnotify provides a platform-independent interface for file system notifications.
+package fsnotify
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+)
+
+// Event represents a single file system notification.
+type Event struct {
+ Name string // Relative path to the file or directory.
+ Op Op // File operation that triggered the event.
+}
+
+// Op describes a set of file operations.
+type Op uint32
+
+// These are the generalized file operations that can trigger a notification.
+const (
+ Create Op = 1 << iota
+ Write
+ Remove
+ Rename
+ Chmod
+)
+
+func (op Op) String() string {
+ // Use a buffer for efficient string concatenation
+ var buffer bytes.Buffer
+
+ if op&Create == Create {
+ buffer.WriteString("|CREATE")
+ }
+ if op&Remove == Remove {
+ buffer.WriteString("|REMOVE")
+ }
+ if op&Write == Write {
+ buffer.WriteString("|WRITE")
+ }
+ if op&Rename == Rename {
+ buffer.WriteString("|RENAME")
+ }
+ if op&Chmod == Chmod {
+ buffer.WriteString("|CHMOD")
+ }
+ if buffer.Len() == 0 {
+ return ""
+ }
+ return buffer.String()[1:] // Strip leading pipe
+}
+
+// String returns a string representation of the event in the form
+// "file: REMOVE|WRITE|..."
+func (e Event) String() string {
+ return fmt.Sprintf("%q: %s", e.Name, e.Op.String())
+}
+
+// Common errors that can be reported by a watcher
+var ErrEventOverflow = errors.New("fsnotify queue overflow")
diff --git a/vendor/github.com/fsnotify/fsnotify/inotify.go b/vendor/github.com/fsnotify/fsnotify/inotify.go
new file mode 100644
index 0000000000..bfa9dbc3c7
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/inotify.go
@@ -0,0 +1,334 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build linux
+
+package fsnotify
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "strings"
+ "sync"
+ "unsafe"
+
+ "golang.org/x/sys/unix"
+)
+
+// Watcher watches a set of files, delivering events to a channel.
+type Watcher struct {
+ Events chan Event
+ Errors chan error
+ mu sync.Mutex // Map access
+ cv *sync.Cond // sync removing on rm_watch with IN_IGNORE
+ fd int
+ poller *fdPoller
+ watches map[string]*watch // Map of inotify watches (key: path)
+ paths map[int]string // Map of watched paths (key: watch descriptor)
+ done chan struct{} // Channel for sending a "quit message" to the reader goroutine
+ doneResp chan struct{} // Channel to respond to Close
+}
+
+// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events.
+func NewWatcher() (*Watcher, error) {
+ // Create inotify fd
+ fd, errno := unix.InotifyInit1(unix.IN_CLOEXEC)
+ if fd == -1 {
+ return nil, errno
+ }
+ // Create epoll
+ poller, err := newFdPoller(fd)
+ if err != nil {
+ unix.Close(fd)
+ return nil, err
+ }
+ w := &Watcher{
+ fd: fd,
+ poller: poller,
+ watches: make(map[string]*watch),
+ paths: make(map[int]string),
+ Events: make(chan Event),
+ Errors: make(chan error),
+ done: make(chan struct{}),
+ doneResp: make(chan struct{}),
+ }
+ w.cv = sync.NewCond(&w.mu)
+
+ go w.readEvents()
+ return w, nil
+}
+
+func (w *Watcher) isClosed() bool {
+ select {
+ case <-w.done:
+ return true
+ default:
+ return false
+ }
+}
+
+// Close removes all watches and closes the events channel.
+func (w *Watcher) Close() error {
+ if w.isClosed() {
+ return nil
+ }
+
+ // Send 'close' signal to goroutine, and set the Watcher to closed.
+ close(w.done)
+
+ // Wake up goroutine
+ w.poller.wake()
+
+ // Wait for goroutine to close
+ <-w.doneResp
+
+ return nil
+}
+
+// Add starts watching the named file or directory (non-recursively).
+func (w *Watcher) Add(name string) error {
+ name = filepath.Clean(name)
+ if w.isClosed() {
+ return errors.New("inotify instance already closed")
+ }
+
+ const agnosticEvents = unix.IN_MOVED_TO | unix.IN_MOVED_FROM |
+ unix.IN_CREATE | unix.IN_ATTRIB | unix.IN_MODIFY |
+ unix.IN_MOVE_SELF | unix.IN_DELETE | unix.IN_DELETE_SELF
+
+ var flags uint32 = agnosticEvents
+
+ w.mu.Lock()
+ watchEntry, found := w.watches[name]
+ w.mu.Unlock()
+ if found {
+ watchEntry.flags |= flags
+ flags |= unix.IN_MASK_ADD
+ }
+ wd, errno := unix.InotifyAddWatch(w.fd, name, flags)
+ if wd == -1 {
+ return errno
+ }
+
+ w.mu.Lock()
+ w.watches[name] = &watch{wd: uint32(wd), flags: flags}
+ w.paths[wd] = name
+ w.mu.Unlock()
+
+ return nil
+}
+
+// Remove stops watching the named file or directory (non-recursively).
+func (w *Watcher) Remove(name string) error {
+ name = filepath.Clean(name)
+
+ // Fetch the watch.
+ w.mu.Lock()
+ defer w.mu.Unlock()
+ watch, ok := w.watches[name]
+
+ // Remove it from inotify.
+ if !ok {
+ return fmt.Errorf("can't remove non-existent inotify watch for: %s", name)
+ }
+ // inotify_rm_watch will return EINVAL if the file has been deleted;
+ // the inotify will already have been removed.
+ // watches and pathes are deleted in ignoreLinux() implicitly and asynchronously
+ // by calling inotify_rm_watch() below. e.g. readEvents() goroutine receives IN_IGNORE
+ // so that EINVAL means that the wd is being rm_watch()ed or its file removed
+ // by another thread and we have not received IN_IGNORE event.
+ success, errno := unix.InotifyRmWatch(w.fd, watch.wd)
+ if success == -1 {
+ // TODO: Perhaps it's not helpful to return an error here in every case.
+ // the only two possible errors are:
+ // EBADF, which happens when w.fd is not a valid file descriptor of any kind.
+ // EINVAL, which is when fd is not an inotify descriptor or wd is not a valid watch descriptor.
+ // Watch descriptors are invalidated when they are removed explicitly or implicitly;
+ // explicitly by inotify_rm_watch, implicitly when the file they are watching is deleted.
+ return errno
+ }
+
+ // wait until ignoreLinux() deleting maps
+ exists := true
+ for exists {
+ w.cv.Wait()
+ _, exists = w.watches[name]
+ }
+
+ return nil
+}
+
+type watch struct {
+ wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall)
+ flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags)
+}
+
+// readEvents reads from the inotify file descriptor, converts the
+// received events into Event objects and sends them via the Events channel
+func (w *Watcher) readEvents() {
+ var (
+ buf [unix.SizeofInotifyEvent * 4096]byte // Buffer for a maximum of 4096 raw events
+ n int // Number of bytes read with read()
+ errno error // Syscall errno
+ ok bool // For poller.wait
+ )
+
+ defer close(w.doneResp)
+ defer close(w.Errors)
+ defer close(w.Events)
+ defer unix.Close(w.fd)
+ defer w.poller.close()
+
+ for {
+ // See if we have been closed.
+ if w.isClosed() {
+ return
+ }
+
+ ok, errno = w.poller.wait()
+ if errno != nil {
+ select {
+ case w.Errors <- errno:
+ case <-w.done:
+ return
+ }
+ continue
+ }
+
+ if !ok {
+ continue
+ }
+
+ n, errno = unix.Read(w.fd, buf[:])
+ // If a signal interrupted execution, see if we've been asked to close, and try again.
+ // http://man7.org/linux/man-pages/man7/signal.7.html :
+ // "Before Linux 3.8, reads from an inotify(7) file descriptor were not restartable"
+ if errno == unix.EINTR {
+ continue
+ }
+
+ // unix.Read might have been woken up by Close. If so, we're done.
+ if w.isClosed() {
+ return
+ }
+
+ if n < unix.SizeofInotifyEvent {
+ var err error
+ if n == 0 {
+ // If EOF is received. This should really never happen.
+ err = io.EOF
+ } else if n < 0 {
+ // If an error occurred while reading.
+ err = errno
+ } else {
+ // Read was too short.
+ err = errors.New("notify: short read in readEvents()")
+ }
+ select {
+ case w.Errors <- err:
+ case <-w.done:
+ return
+ }
+ continue
+ }
+
+ var offset uint32
+ // We don't know how many events we just read into the buffer
+ // While the offset points to at least one whole event...
+ for offset <= uint32(n-unix.SizeofInotifyEvent) {
+ // Point "raw" to the event in the buffer
+ raw := (*unix.InotifyEvent)(unsafe.Pointer(&buf[offset]))
+
+ mask := uint32(raw.Mask)
+ nameLen := uint32(raw.Len)
+
+ if mask&unix.IN_Q_OVERFLOW != 0 {
+ select {
+ case w.Errors <- ErrEventOverflow:
+ case <-w.done:
+ return
+ }
+ }
+
+ // If the event happened to the watched directory or the watched file, the kernel
+ // doesn't append the filename to the event, but we would like to always fill the
+ // the "Name" field with a valid filename. We retrieve the path of the watch from
+ // the "paths" map.
+ w.mu.Lock()
+ name := w.paths[int(raw.Wd)]
+ w.mu.Unlock()
+ if nameLen > 0 {
+ // Point "bytes" at the first byte of the filename
+ bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent]))
+ // The filename is padded with NULL bytes. TrimRight() gets rid of those.
+ name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000")
+ }
+
+ event := newEvent(name, mask)
+
+ // Send the events that are not ignored on the events channel
+ if !event.ignoreLinux(w, raw.Wd, mask) {
+ select {
+ case w.Events <- event:
+ case <-w.done:
+ return
+ }
+ }
+
+ // Move to the next event in the buffer
+ offset += unix.SizeofInotifyEvent + nameLen
+ }
+ }
+}
+
+// Certain types of events can be "ignored" and not sent over the Events
+// channel. Such as events marked ignore by the kernel, or MODIFY events
+// against files that do not exist.
+func (e *Event) ignoreLinux(w *Watcher, wd int32, mask uint32) bool {
+ // Ignore anything the inotify API says to ignore
+ if mask&unix.IN_IGNORED == unix.IN_IGNORED {
+ w.mu.Lock()
+ defer w.mu.Unlock()
+ name := w.paths[int(wd)]
+ delete(w.paths, int(wd))
+ delete(w.watches, name)
+ w.cv.Broadcast()
+ return true
+ }
+
+ // If the event is not a DELETE or RENAME, the file must exist.
+ // Otherwise the event is ignored.
+ // *Note*: this was put in place because it was seen that a MODIFY
+ // event was sent after the DELETE. This ignores that MODIFY and
+ // assumes a DELETE will come or has come if the file doesn't exist.
+ if !(e.Op&Remove == Remove || e.Op&Rename == Rename) {
+ _, statErr := os.Lstat(e.Name)
+ return os.IsNotExist(statErr)
+ }
+ return false
+}
+
+// newEvent returns an platform-independent Event based on an inotify mask.
+func newEvent(name string, mask uint32) Event {
+ e := Event{Name: name}
+ if mask&unix.IN_CREATE == unix.IN_CREATE || mask&unix.IN_MOVED_TO == unix.IN_MOVED_TO {
+ e.Op |= Create
+ }
+ if mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF || mask&unix.IN_DELETE == unix.IN_DELETE {
+ e.Op |= Remove
+ }
+ if mask&unix.IN_MODIFY == unix.IN_MODIFY {
+ e.Op |= Write
+ }
+ if mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF || mask&unix.IN_MOVED_FROM == unix.IN_MOVED_FROM {
+ e.Op |= Rename
+ }
+ if mask&unix.IN_ATTRIB == unix.IN_ATTRIB {
+ e.Op |= Chmod
+ }
+ return e
+}
diff --git a/vendor/github.com/fsnotify/fsnotify/inotify_poller.go b/vendor/github.com/fsnotify/fsnotify/inotify_poller.go
new file mode 100644
index 0000000000..cc7db4b22e
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/inotify_poller.go
@@ -0,0 +1,187 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build linux
+
+package fsnotify
+
+import (
+ "errors"
+
+ "golang.org/x/sys/unix"
+)
+
+type fdPoller struct {
+ fd int // File descriptor (as returned by the inotify_init() syscall)
+ epfd int // Epoll file descriptor
+ pipe [2]int // Pipe for waking up
+}
+
+func emptyPoller(fd int) *fdPoller {
+ poller := new(fdPoller)
+ poller.fd = fd
+ poller.epfd = -1
+ poller.pipe[0] = -1
+ poller.pipe[1] = -1
+ return poller
+}
+
+// Create a new inotify poller.
+// This creates an inotify handler, and an epoll handler.
+func newFdPoller(fd int) (*fdPoller, error) {
+ var errno error
+ poller := emptyPoller(fd)
+ defer func() {
+ if errno != nil {
+ poller.close()
+ }
+ }()
+ poller.fd = fd
+
+ // Create epoll fd
+ poller.epfd, errno = unix.EpollCreate1(0)
+ if poller.epfd == -1 {
+ return nil, errno
+ }
+ // Create pipe; pipe[0] is the read end, pipe[1] the write end.
+ errno = unix.Pipe2(poller.pipe[:], unix.O_NONBLOCK)
+ if errno != nil {
+ return nil, errno
+ }
+
+ // Register inotify fd with epoll
+ event := unix.EpollEvent{
+ Fd: int32(poller.fd),
+ Events: unix.EPOLLIN,
+ }
+ errno = unix.EpollCtl(poller.epfd, unix.EPOLL_CTL_ADD, poller.fd, &event)
+ if errno != nil {
+ return nil, errno
+ }
+
+ // Register pipe fd with epoll
+ event = unix.EpollEvent{
+ Fd: int32(poller.pipe[0]),
+ Events: unix.EPOLLIN,
+ }
+ errno = unix.EpollCtl(poller.epfd, unix.EPOLL_CTL_ADD, poller.pipe[0], &event)
+ if errno != nil {
+ return nil, errno
+ }
+
+ return poller, nil
+}
+
+// Wait using epoll.
+// Returns true if something is ready to be read,
+// false if there is not.
+func (poller *fdPoller) wait() (bool, error) {
+ // 3 possible events per fd, and 2 fds, makes a maximum of 6 events.
+ // I don't know whether epoll_wait returns the number of events returned,
+ // or the total number of events ready.
+ // I decided to catch both by making the buffer one larger than the maximum.
+ events := make([]unix.EpollEvent, 7)
+ for {
+ n, errno := unix.EpollWait(poller.epfd, events, -1)
+ if n == -1 {
+ if errno == unix.EINTR {
+ continue
+ }
+ return false, errno
+ }
+ if n == 0 {
+ // If there are no events, try again.
+ continue
+ }
+ if n > 6 {
+ // This should never happen. More events were returned than should be possible.
+ return false, errors.New("epoll_wait returned more events than I know what to do with")
+ }
+ ready := events[:n]
+ epollhup := false
+ epollerr := false
+ epollin := false
+ for _, event := range ready {
+ if event.Fd == int32(poller.fd) {
+ if event.Events&unix.EPOLLHUP != 0 {
+ // This should not happen, but if it does, treat it as a wakeup.
+ epollhup = true
+ }
+ if event.Events&unix.EPOLLERR != 0 {
+ // If an error is waiting on the file descriptor, we should pretend
+ // something is ready to read, and let unix.Read pick up the error.
+ epollerr = true
+ }
+ if event.Events&unix.EPOLLIN != 0 {
+ // There is data to read.
+ epollin = true
+ }
+ }
+ if event.Fd == int32(poller.pipe[0]) {
+ if event.Events&unix.EPOLLHUP != 0 {
+ // Write pipe descriptor was closed, by us. This means we're closing down the
+ // watcher, and we should wake up.
+ }
+ if event.Events&unix.EPOLLERR != 0 {
+ // If an error is waiting on the pipe file descriptor.
+ // This is an absolute mystery, and should never ever happen.
+ return false, errors.New("Error on the pipe descriptor.")
+ }
+ if event.Events&unix.EPOLLIN != 0 {
+ // This is a regular wakeup, so we have to clear the buffer.
+ err := poller.clearWake()
+ if err != nil {
+ return false, err
+ }
+ }
+ }
+ }
+
+ if epollhup || epollerr || epollin {
+ return true, nil
+ }
+ return false, nil
+ }
+}
+
+// Close the write end of the poller.
+func (poller *fdPoller) wake() error {
+ buf := make([]byte, 1)
+ n, errno := unix.Write(poller.pipe[1], buf)
+ if n == -1 {
+ if errno == unix.EAGAIN {
+ // Buffer is full, poller will wake.
+ return nil
+ }
+ return errno
+ }
+ return nil
+}
+
+func (poller *fdPoller) clearWake() error {
+ // You have to be woken up a LOT in order to get to 100!
+ buf := make([]byte, 100)
+ n, errno := unix.Read(poller.pipe[0], buf)
+ if n == -1 {
+ if errno == unix.EAGAIN {
+ // Buffer is empty, someone else cleared our wake.
+ return nil
+ }
+ return errno
+ }
+ return nil
+}
+
+// Close all poller file descriptors, but not the one passed to it.
+func (poller *fdPoller) close() {
+ if poller.pipe[1] != -1 {
+ unix.Close(poller.pipe[1])
+ }
+ if poller.pipe[0] != -1 {
+ unix.Close(poller.pipe[0])
+ }
+ if poller.epfd != -1 {
+ unix.Close(poller.epfd)
+ }
+}
diff --git a/vendor/github.com/fsnotify/fsnotify/kqueue.go b/vendor/github.com/fsnotify/fsnotify/kqueue.go
new file mode 100644
index 0000000000..c2b4acb18d
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/kqueue.go
@@ -0,0 +1,503 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build freebsd openbsd netbsd dragonfly darwin
+
+package fsnotify
+
+import (
+ "errors"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "sync"
+ "time"
+
+ "golang.org/x/sys/unix"
+)
+
+// Watcher watches a set of files, delivering events to a channel.
+type Watcher struct {
+ Events chan Event
+ Errors chan error
+ done chan bool // Channel for sending a "quit message" to the reader goroutine
+
+ kq int // File descriptor (as returned by the kqueue() syscall).
+
+ mu sync.Mutex // Protects access to watcher data
+ watches map[string]int // Map of watched file descriptors (key: path).
+ externalWatches map[string]bool // Map of watches added by user of the library.
+ dirFlags map[string]uint32 // Map of watched directories to fflags used in kqueue.
+ paths map[int]pathInfo // Map file descriptors to path names for processing kqueue events.
+ fileExists map[string]bool // Keep track of if we know this file exists (to stop duplicate create events).
+ isClosed bool // Set to true when Close() is first called
+}
+
+type pathInfo struct {
+ name string
+ isDir bool
+}
+
+// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events.
+func NewWatcher() (*Watcher, error) {
+ kq, err := kqueue()
+ if err != nil {
+ return nil, err
+ }
+
+ w := &Watcher{
+ kq: kq,
+ watches: make(map[string]int),
+ dirFlags: make(map[string]uint32),
+ paths: make(map[int]pathInfo),
+ fileExists: make(map[string]bool),
+ externalWatches: make(map[string]bool),
+ Events: make(chan Event),
+ Errors: make(chan error),
+ done: make(chan bool),
+ }
+
+ go w.readEvents()
+ return w, nil
+}
+
+// Close removes all watches and closes the events channel.
+func (w *Watcher) Close() error {
+ w.mu.Lock()
+ if w.isClosed {
+ w.mu.Unlock()
+ return nil
+ }
+ w.isClosed = true
+ w.mu.Unlock()
+
+ // copy paths to remove while locked
+ w.mu.Lock()
+ var pathsToRemove = make([]string, 0, len(w.watches))
+ for name := range w.watches {
+ pathsToRemove = append(pathsToRemove, name)
+ }
+ w.mu.Unlock()
+ // unlock before calling Remove, which also locks
+
+ var err error
+ for _, name := range pathsToRemove {
+ if e := w.Remove(name); e != nil && err == nil {
+ err = e
+ }
+ }
+
+ // Send "quit" message to the reader goroutine:
+ w.done <- true
+
+ return nil
+}
+
+// Add starts watching the named file or directory (non-recursively).
+func (w *Watcher) Add(name string) error {
+ w.mu.Lock()
+ w.externalWatches[name] = true
+ w.mu.Unlock()
+ _, err := w.addWatch(name, noteAllEvents)
+ return err
+}
+
+// Remove stops watching the the named file or directory (non-recursively).
+func (w *Watcher) Remove(name string) error {
+ name = filepath.Clean(name)
+ w.mu.Lock()
+ watchfd, ok := w.watches[name]
+ w.mu.Unlock()
+ if !ok {
+ return fmt.Errorf("can't remove non-existent kevent watch for: %s", name)
+ }
+
+ const registerRemove = unix.EV_DELETE
+ if err := register(w.kq, []int{watchfd}, registerRemove, 0); err != nil {
+ return err
+ }
+
+ unix.Close(watchfd)
+
+ w.mu.Lock()
+ isDir := w.paths[watchfd].isDir
+ delete(w.watches, name)
+ delete(w.paths, watchfd)
+ delete(w.dirFlags, name)
+ w.mu.Unlock()
+
+ // Find all watched paths that are in this directory that are not external.
+ if isDir {
+ var pathsToRemove []string
+ w.mu.Lock()
+ for _, path := range w.paths {
+ wdir, _ := filepath.Split(path.name)
+ if filepath.Clean(wdir) == name {
+ if !w.externalWatches[path.name] {
+ pathsToRemove = append(pathsToRemove, path.name)
+ }
+ }
+ }
+ w.mu.Unlock()
+ for _, name := range pathsToRemove {
+ // Since these are internal, not much sense in propagating error
+ // to the user, as that will just confuse them with an error about
+ // a path they did not explicitly watch themselves.
+ w.Remove(name)
+ }
+ }
+
+ return nil
+}
+
+// Watch all events (except NOTE_EXTEND, NOTE_LINK, NOTE_REVOKE)
+const noteAllEvents = unix.NOTE_DELETE | unix.NOTE_WRITE | unix.NOTE_ATTRIB | unix.NOTE_RENAME
+
+// keventWaitTime to block on each read from kevent
+var keventWaitTime = durationToTimespec(100 * time.Millisecond)
+
+// addWatch adds name to the watched file set.
+// The flags are interpreted as described in kevent(2).
+// Returns the real path to the file which was added, if any, which may be different from the one passed in the case of symlinks.
+func (w *Watcher) addWatch(name string, flags uint32) (string, error) {
+ var isDir bool
+ // Make ./name and name equivalent
+ name = filepath.Clean(name)
+
+ w.mu.Lock()
+ if w.isClosed {
+ w.mu.Unlock()
+ return "", errors.New("kevent instance already closed")
+ }
+ watchfd, alreadyWatching := w.watches[name]
+ // We already have a watch, but we can still override flags.
+ if alreadyWatching {
+ isDir = w.paths[watchfd].isDir
+ }
+ w.mu.Unlock()
+
+ if !alreadyWatching {
+ fi, err := os.Lstat(name)
+ if err != nil {
+ return "", err
+ }
+
+ // Don't watch sockets.
+ if fi.Mode()&os.ModeSocket == os.ModeSocket {
+ return "", nil
+ }
+
+ // Don't watch named pipes.
+ if fi.Mode()&os.ModeNamedPipe == os.ModeNamedPipe {
+ return "", nil
+ }
+
+ // Follow Symlinks
+ // Unfortunately, Linux can add bogus symlinks to watch list without
+ // issue, and Windows can't do symlinks period (AFAIK). To maintain
+ // consistency, we will act like everything is fine. There will simply
+ // be no file events for broken symlinks.
+ // Hence the returns of nil on errors.
+ if fi.Mode()&os.ModeSymlink == os.ModeSymlink {
+ name, err = filepath.EvalSymlinks(name)
+ if err != nil {
+ return "", nil
+ }
+
+ w.mu.Lock()
+ _, alreadyWatching = w.watches[name]
+ w.mu.Unlock()
+
+ if alreadyWatching {
+ return name, nil
+ }
+
+ fi, err = os.Lstat(name)
+ if err != nil {
+ return "", nil
+ }
+ }
+
+ watchfd, err = unix.Open(name, openMode, 0700)
+ if watchfd == -1 {
+ return "", err
+ }
+
+ isDir = fi.IsDir()
+ }
+
+ const registerAdd = unix.EV_ADD | unix.EV_CLEAR | unix.EV_ENABLE
+ if err := register(w.kq, []int{watchfd}, registerAdd, flags); err != nil {
+ unix.Close(watchfd)
+ return "", err
+ }
+
+ if !alreadyWatching {
+ w.mu.Lock()
+ w.watches[name] = watchfd
+ w.paths[watchfd] = pathInfo{name: name, isDir: isDir}
+ w.mu.Unlock()
+ }
+
+ if isDir {
+ // Watch the directory if it has not been watched before,
+ // or if it was watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles)
+ w.mu.Lock()
+
+ watchDir := (flags&unix.NOTE_WRITE) == unix.NOTE_WRITE &&
+ (!alreadyWatching || (w.dirFlags[name]&unix.NOTE_WRITE) != unix.NOTE_WRITE)
+ // Store flags so this watch can be updated later
+ w.dirFlags[name] = flags
+ w.mu.Unlock()
+
+ if watchDir {
+ if err := w.watchDirectoryFiles(name); err != nil {
+ return "", err
+ }
+ }
+ }
+ return name, nil
+}
+
+// readEvents reads from kqueue and converts the received kevents into
+// Event values that it sends down the Events channel.
+func (w *Watcher) readEvents() {
+ eventBuffer := make([]unix.Kevent_t, 10)
+
+ for {
+ // See if there is a message on the "done" channel
+ select {
+ case <-w.done:
+ err := unix.Close(w.kq)
+ if err != nil {
+ w.Errors <- err
+ }
+ close(w.Events)
+ close(w.Errors)
+ return
+ default:
+ }
+
+ // Get new events
+ kevents, err := read(w.kq, eventBuffer, &keventWaitTime)
+ // EINTR is okay, the syscall was interrupted before timeout expired.
+ if err != nil && err != unix.EINTR {
+ w.Errors <- err
+ continue
+ }
+
+ // Flush the events we received to the Events channel
+ for len(kevents) > 0 {
+ kevent := &kevents[0]
+ watchfd := int(kevent.Ident)
+ mask := uint32(kevent.Fflags)
+ w.mu.Lock()
+ path := w.paths[watchfd]
+ w.mu.Unlock()
+ event := newEvent(path.name, mask)
+
+ if path.isDir && !(event.Op&Remove == Remove) {
+ // Double check to make sure the directory exists. This can happen when
+ // we do a rm -fr on a recursively watched folders and we receive a
+ // modification event first but the folder has been deleted and later
+ // receive the delete event
+ if _, err := os.Lstat(event.Name); os.IsNotExist(err) {
+ // mark is as delete event
+ event.Op |= Remove
+ }
+ }
+
+ if event.Op&Rename == Rename || event.Op&Remove == Remove {
+ w.Remove(event.Name)
+ w.mu.Lock()
+ delete(w.fileExists, event.Name)
+ w.mu.Unlock()
+ }
+
+ if path.isDir && event.Op&Write == Write && !(event.Op&Remove == Remove) {
+ w.sendDirectoryChangeEvents(event.Name)
+ } else {
+ // Send the event on the Events channel
+ w.Events <- event
+ }
+
+ if event.Op&Remove == Remove {
+ // Look for a file that may have overwritten this.
+ // For example, mv f1 f2 will delete f2, then create f2.
+ if path.isDir {
+ fileDir := filepath.Clean(event.Name)
+ w.mu.Lock()
+ _, found := w.watches[fileDir]
+ w.mu.Unlock()
+ if found {
+ // make sure the directory exists before we watch for changes. When we
+ // do a recursive watch and perform rm -fr, the parent directory might
+ // have gone missing, ignore the missing directory and let the
+ // upcoming delete event remove the watch from the parent directory.
+ if _, err := os.Lstat(fileDir); err == nil {
+ w.sendDirectoryChangeEvents(fileDir)
+ }
+ }
+ } else {
+ filePath := filepath.Clean(event.Name)
+ if fileInfo, err := os.Lstat(filePath); err == nil {
+ w.sendFileCreatedEventIfNew(filePath, fileInfo)
+ }
+ }
+ }
+
+ // Move to next event
+ kevents = kevents[1:]
+ }
+ }
+}
+
+// newEvent returns an platform-independent Event based on kqueue Fflags.
+func newEvent(name string, mask uint32) Event {
+ e := Event{Name: name}
+ if mask&unix.NOTE_DELETE == unix.NOTE_DELETE {
+ e.Op |= Remove
+ }
+ if mask&unix.NOTE_WRITE == unix.NOTE_WRITE {
+ e.Op |= Write
+ }
+ if mask&unix.NOTE_RENAME == unix.NOTE_RENAME {
+ e.Op |= Rename
+ }
+ if mask&unix.NOTE_ATTRIB == unix.NOTE_ATTRIB {
+ e.Op |= Chmod
+ }
+ return e
+}
+
+func newCreateEvent(name string) Event {
+ return Event{Name: name, Op: Create}
+}
+
+// watchDirectoryFiles to mimic inotify when adding a watch on a directory
+func (w *Watcher) watchDirectoryFiles(dirPath string) error {
+ // Get all files
+ files, err := ioutil.ReadDir(dirPath)
+ if err != nil {
+ return err
+ }
+
+ for _, fileInfo := range files {
+ filePath := filepath.Join(dirPath, fileInfo.Name())
+ filePath, err = w.internalWatch(filePath, fileInfo)
+ if err != nil {
+ return err
+ }
+
+ w.mu.Lock()
+ w.fileExists[filePath] = true
+ w.mu.Unlock()
+ }
+
+ return nil
+}
+
+// sendDirectoryEvents searches the directory for newly created files
+// and sends them over the event channel. This functionality is to have
+// the BSD version of fsnotify match Linux inotify which provides a
+// create event for files created in a watched directory.
+func (w *Watcher) sendDirectoryChangeEvents(dirPath string) {
+ // Get all files
+ files, err := ioutil.ReadDir(dirPath)
+ if err != nil {
+ w.Errors <- err
+ }
+
+ // Search for new files
+ for _, fileInfo := range files {
+ filePath := filepath.Join(dirPath, fileInfo.Name())
+ err := w.sendFileCreatedEventIfNew(filePath, fileInfo)
+
+ if err != nil {
+ return
+ }
+ }
+}
+
+// sendFileCreatedEvent sends a create event if the file isn't already being tracked.
+func (w *Watcher) sendFileCreatedEventIfNew(filePath string, fileInfo os.FileInfo) (err error) {
+ w.mu.Lock()
+ _, doesExist := w.fileExists[filePath]
+ w.mu.Unlock()
+ if !doesExist {
+ // Send create event
+ w.Events <- newCreateEvent(filePath)
+ }
+
+ // like watchDirectoryFiles (but without doing another ReadDir)
+ filePath, err = w.internalWatch(filePath, fileInfo)
+ if err != nil {
+ return err
+ }
+
+ w.mu.Lock()
+ w.fileExists[filePath] = true
+ w.mu.Unlock()
+
+ return nil
+}
+
+func (w *Watcher) internalWatch(name string, fileInfo os.FileInfo) (string, error) {
+ if fileInfo.IsDir() {
+ // mimic Linux providing delete events for subdirectories
+ // but preserve the flags used if currently watching subdirectory
+ w.mu.Lock()
+ flags := w.dirFlags[name]
+ w.mu.Unlock()
+
+ flags |= unix.NOTE_DELETE | unix.NOTE_RENAME
+ return w.addWatch(name, flags)
+ }
+
+ // watch file to mimic Linux inotify
+ return w.addWatch(name, noteAllEvents)
+}
+
+// kqueue creates a new kernel event queue and returns a descriptor.
+func kqueue() (kq int, err error) {
+ kq, err = unix.Kqueue()
+ if kq == -1 {
+ return kq, err
+ }
+ return kq, nil
+}
+
+// register events with the queue
+func register(kq int, fds []int, flags int, fflags uint32) error {
+ changes := make([]unix.Kevent_t, len(fds))
+
+ for i, fd := range fds {
+ // SetKevent converts int to the platform-specific types:
+ unix.SetKevent(&changes[i], fd, unix.EVFILT_VNODE, flags)
+ changes[i].Fflags = fflags
+ }
+
+ // register the events
+ success, err := unix.Kevent(kq, changes, nil, nil)
+ if success == -1 {
+ return err
+ }
+ return nil
+}
+
+// read retrieves pending events, or waits until an event occurs.
+// A timeout of nil blocks indefinitely, while 0 polls the queue.
+func read(kq int, events []unix.Kevent_t, timeout *unix.Timespec) ([]unix.Kevent_t, error) {
+ n, err := unix.Kevent(kq, nil, events, timeout)
+ if err != nil {
+ return nil, err
+ }
+ return events[0:n], nil
+}
+
+// durationToTimespec prepares a timeout value
+func durationToTimespec(d time.Duration) unix.Timespec {
+ return unix.NsecToTimespec(d.Nanoseconds())
+}
diff --git a/vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go b/vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go
new file mode 100644
index 0000000000..7d8de14513
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go
@@ -0,0 +1,11 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build freebsd openbsd netbsd dragonfly
+
+package fsnotify
+
+import "golang.org/x/sys/unix"
+
+const openMode = unix.O_NONBLOCK | unix.O_RDONLY
diff --git a/vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go b/vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go
new file mode 100644
index 0000000000..9139e17161
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go
@@ -0,0 +1,12 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin
+
+package fsnotify
+
+import "golang.org/x/sys/unix"
+
+// note: this constant is not defined on BSD
+const openMode = unix.O_EVTONLY
diff --git a/vendor/github.com/fsnotify/fsnotify/windows.go b/vendor/github.com/fsnotify/fsnotify/windows.go
new file mode 100644
index 0000000000..09436f31d8
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/windows.go
@@ -0,0 +1,561 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build windows
+
+package fsnotify
+
+import (
+ "errors"
+ "fmt"
+ "os"
+ "path/filepath"
+ "runtime"
+ "sync"
+ "syscall"
+ "unsafe"
+)
+
+// Watcher watches a set of files, delivering events to a channel.
+type Watcher struct {
+ Events chan Event
+ Errors chan error
+ isClosed bool // Set to true when Close() is first called
+ mu sync.Mutex // Map access
+ port syscall.Handle // Handle to completion port
+ watches watchMap // Map of watches (key: i-number)
+ input chan *input // Inputs to the reader are sent on this channel
+ quit chan chan<- error
+}
+
+// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events.
+func NewWatcher() (*Watcher, error) {
+ port, e := syscall.CreateIoCompletionPort(syscall.InvalidHandle, 0, 0, 0)
+ if e != nil {
+ return nil, os.NewSyscallError("CreateIoCompletionPort", e)
+ }
+ w := &Watcher{
+ port: port,
+ watches: make(watchMap),
+ input: make(chan *input, 1),
+ Events: make(chan Event, 50),
+ Errors: make(chan error),
+ quit: make(chan chan<- error, 1),
+ }
+ go w.readEvents()
+ return w, nil
+}
+
+// Close removes all watches and closes the events channel.
+func (w *Watcher) Close() error {
+ if w.isClosed {
+ return nil
+ }
+ w.isClosed = true
+
+ // Send "quit" message to the reader goroutine
+ ch := make(chan error)
+ w.quit <- ch
+ if err := w.wakeupReader(); err != nil {
+ return err
+ }
+ return <-ch
+}
+
+// Add starts watching the named file or directory (non-recursively).
+func (w *Watcher) Add(name string) error {
+ if w.isClosed {
+ return errors.New("watcher already closed")
+ }
+ in := &input{
+ op: opAddWatch,
+ path: filepath.Clean(name),
+ flags: sysFSALLEVENTS,
+ reply: make(chan error),
+ }
+ w.input <- in
+ if err := w.wakeupReader(); err != nil {
+ return err
+ }
+ return <-in.reply
+}
+
+// Remove stops watching the the named file or directory (non-recursively).
+func (w *Watcher) Remove(name string) error {
+ in := &input{
+ op: opRemoveWatch,
+ path: filepath.Clean(name),
+ reply: make(chan error),
+ }
+ w.input <- in
+ if err := w.wakeupReader(); err != nil {
+ return err
+ }
+ return <-in.reply
+}
+
+const (
+ // Options for AddWatch
+ sysFSONESHOT = 0x80000000
+ sysFSONLYDIR = 0x1000000
+
+ // Events
+ sysFSACCESS = 0x1
+ sysFSALLEVENTS = 0xfff
+ sysFSATTRIB = 0x4
+ sysFSCLOSE = 0x18
+ sysFSCREATE = 0x100
+ sysFSDELETE = 0x200
+ sysFSDELETESELF = 0x400
+ sysFSMODIFY = 0x2
+ sysFSMOVE = 0xc0
+ sysFSMOVEDFROM = 0x40
+ sysFSMOVEDTO = 0x80
+ sysFSMOVESELF = 0x800
+
+ // Special events
+ sysFSIGNORED = 0x8000
+ sysFSQOVERFLOW = 0x4000
+)
+
+func newEvent(name string, mask uint32) Event {
+ e := Event{Name: name}
+ if mask&sysFSCREATE == sysFSCREATE || mask&sysFSMOVEDTO == sysFSMOVEDTO {
+ e.Op |= Create
+ }
+ if mask&sysFSDELETE == sysFSDELETE || mask&sysFSDELETESELF == sysFSDELETESELF {
+ e.Op |= Remove
+ }
+ if mask&sysFSMODIFY == sysFSMODIFY {
+ e.Op |= Write
+ }
+ if mask&sysFSMOVE == sysFSMOVE || mask&sysFSMOVESELF == sysFSMOVESELF || mask&sysFSMOVEDFROM == sysFSMOVEDFROM {
+ e.Op |= Rename
+ }
+ if mask&sysFSATTRIB == sysFSATTRIB {
+ e.Op |= Chmod
+ }
+ return e
+}
+
+const (
+ opAddWatch = iota
+ opRemoveWatch
+)
+
+const (
+ provisional uint64 = 1 << (32 + iota)
+)
+
+type input struct {
+ op int
+ path string
+ flags uint32
+ reply chan error
+}
+
+type inode struct {
+ handle syscall.Handle
+ volume uint32
+ index uint64
+}
+
+type watch struct {
+ ov syscall.Overlapped
+ ino *inode // i-number
+ path string // Directory path
+ mask uint64 // Directory itself is being watched with these notify flags
+ names map[string]uint64 // Map of names being watched and their notify flags
+ rename string // Remembers the old name while renaming a file
+ buf [4096]byte
+}
+
+type indexMap map[uint64]*watch
+type watchMap map[uint32]indexMap
+
+func (w *Watcher) wakeupReader() error {
+ e := syscall.PostQueuedCompletionStatus(w.port, 0, 0, nil)
+ if e != nil {
+ return os.NewSyscallError("PostQueuedCompletionStatus", e)
+ }
+ return nil
+}
+
+func getDir(pathname string) (dir string, err error) {
+ attr, e := syscall.GetFileAttributes(syscall.StringToUTF16Ptr(pathname))
+ if e != nil {
+ return "", os.NewSyscallError("GetFileAttributes", e)
+ }
+ if attr&syscall.FILE_ATTRIBUTE_DIRECTORY != 0 {
+ dir = pathname
+ } else {
+ dir, _ = filepath.Split(pathname)
+ dir = filepath.Clean(dir)
+ }
+ return
+}
+
+func getIno(path string) (ino *inode, err error) {
+ h, e := syscall.CreateFile(syscall.StringToUTF16Ptr(path),
+ syscall.FILE_LIST_DIRECTORY,
+ syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE,
+ nil, syscall.OPEN_EXISTING,
+ syscall.FILE_FLAG_BACKUP_SEMANTICS|syscall.FILE_FLAG_OVERLAPPED, 0)
+ if e != nil {
+ return nil, os.NewSyscallError("CreateFile", e)
+ }
+ var fi syscall.ByHandleFileInformation
+ if e = syscall.GetFileInformationByHandle(h, &fi); e != nil {
+ syscall.CloseHandle(h)
+ return nil, os.NewSyscallError("GetFileInformationByHandle", e)
+ }
+ ino = &inode{
+ handle: h,
+ volume: fi.VolumeSerialNumber,
+ index: uint64(fi.FileIndexHigh)<<32 | uint64(fi.FileIndexLow),
+ }
+ return ino, nil
+}
+
+// Must run within the I/O thread.
+func (m watchMap) get(ino *inode) *watch {
+ if i := m[ino.volume]; i != nil {
+ return i[ino.index]
+ }
+ return nil
+}
+
+// Must run within the I/O thread.
+func (m watchMap) set(ino *inode, watch *watch) {
+ i := m[ino.volume]
+ if i == nil {
+ i = make(indexMap)
+ m[ino.volume] = i
+ }
+ i[ino.index] = watch
+}
+
+// Must run within the I/O thread.
+func (w *Watcher) addWatch(pathname string, flags uint64) error {
+ dir, err := getDir(pathname)
+ if err != nil {
+ return err
+ }
+ if flags&sysFSONLYDIR != 0 && pathname != dir {
+ return nil
+ }
+ ino, err := getIno(dir)
+ if err != nil {
+ return err
+ }
+ w.mu.Lock()
+ watchEntry := w.watches.get(ino)
+ w.mu.Unlock()
+ if watchEntry == nil {
+ if _, e := syscall.CreateIoCompletionPort(ino.handle, w.port, 0, 0); e != nil {
+ syscall.CloseHandle(ino.handle)
+ return os.NewSyscallError("CreateIoCompletionPort", e)
+ }
+ watchEntry = &watch{
+ ino: ino,
+ path: dir,
+ names: make(map[string]uint64),
+ }
+ w.mu.Lock()
+ w.watches.set(ino, watchEntry)
+ w.mu.Unlock()
+ flags |= provisional
+ } else {
+ syscall.CloseHandle(ino.handle)
+ }
+ if pathname == dir {
+ watchEntry.mask |= flags
+ } else {
+ watchEntry.names[filepath.Base(pathname)] |= flags
+ }
+ if err = w.startRead(watchEntry); err != nil {
+ return err
+ }
+ if pathname == dir {
+ watchEntry.mask &= ^provisional
+ } else {
+ watchEntry.names[filepath.Base(pathname)] &= ^provisional
+ }
+ return nil
+}
+
+// Must run within the I/O thread.
+func (w *Watcher) remWatch(pathname string) error {
+ dir, err := getDir(pathname)
+ if err != nil {
+ return err
+ }
+ ino, err := getIno(dir)
+ if err != nil {
+ return err
+ }
+ w.mu.Lock()
+ watch := w.watches.get(ino)
+ w.mu.Unlock()
+ if watch == nil {
+ return fmt.Errorf("can't remove non-existent watch for: %s", pathname)
+ }
+ if pathname == dir {
+ w.sendEvent(watch.path, watch.mask&sysFSIGNORED)
+ watch.mask = 0
+ } else {
+ name := filepath.Base(pathname)
+ w.sendEvent(filepath.Join(watch.path, name), watch.names[name]&sysFSIGNORED)
+ delete(watch.names, name)
+ }
+ return w.startRead(watch)
+}
+
+// Must run within the I/O thread.
+func (w *Watcher) deleteWatch(watch *watch) {
+ for name, mask := range watch.names {
+ if mask&provisional == 0 {
+ w.sendEvent(filepath.Join(watch.path, name), mask&sysFSIGNORED)
+ }
+ delete(watch.names, name)
+ }
+ if watch.mask != 0 {
+ if watch.mask&provisional == 0 {
+ w.sendEvent(watch.path, watch.mask&sysFSIGNORED)
+ }
+ watch.mask = 0
+ }
+}
+
+// Must run within the I/O thread.
+func (w *Watcher) startRead(watch *watch) error {
+ if e := syscall.CancelIo(watch.ino.handle); e != nil {
+ w.Errors <- os.NewSyscallError("CancelIo", e)
+ w.deleteWatch(watch)
+ }
+ mask := toWindowsFlags(watch.mask)
+ for _, m := range watch.names {
+ mask |= toWindowsFlags(m)
+ }
+ if mask == 0 {
+ if e := syscall.CloseHandle(watch.ino.handle); e != nil {
+ w.Errors <- os.NewSyscallError("CloseHandle", e)
+ }
+ w.mu.Lock()
+ delete(w.watches[watch.ino.volume], watch.ino.index)
+ w.mu.Unlock()
+ return nil
+ }
+ e := syscall.ReadDirectoryChanges(watch.ino.handle, &watch.buf[0],
+ uint32(unsafe.Sizeof(watch.buf)), false, mask, nil, &watch.ov, 0)
+ if e != nil {
+ err := os.NewSyscallError("ReadDirectoryChanges", e)
+ if e == syscall.ERROR_ACCESS_DENIED && watch.mask&provisional == 0 {
+ // Watched directory was probably removed
+ if w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) {
+ if watch.mask&sysFSONESHOT != 0 {
+ watch.mask = 0
+ }
+ }
+ err = nil
+ }
+ w.deleteWatch(watch)
+ w.startRead(watch)
+ return err
+ }
+ return nil
+}
+
+// readEvents reads from the I/O completion port, converts the
+// received events into Event objects and sends them via the Events channel.
+// Entry point to the I/O thread.
+func (w *Watcher) readEvents() {
+ var (
+ n, key uint32
+ ov *syscall.Overlapped
+ )
+ runtime.LockOSThread()
+
+ for {
+ e := syscall.GetQueuedCompletionStatus(w.port, &n, &key, &ov, syscall.INFINITE)
+ watch := (*watch)(unsafe.Pointer(ov))
+
+ if watch == nil {
+ select {
+ case ch := <-w.quit:
+ w.mu.Lock()
+ var indexes []indexMap
+ for _, index := range w.watches {
+ indexes = append(indexes, index)
+ }
+ w.mu.Unlock()
+ for _, index := range indexes {
+ for _, watch := range index {
+ w.deleteWatch(watch)
+ w.startRead(watch)
+ }
+ }
+ var err error
+ if e := syscall.CloseHandle(w.port); e != nil {
+ err = os.NewSyscallError("CloseHandle", e)
+ }
+ close(w.Events)
+ close(w.Errors)
+ ch <- err
+ return
+ case in := <-w.input:
+ switch in.op {
+ case opAddWatch:
+ in.reply <- w.addWatch(in.path, uint64(in.flags))
+ case opRemoveWatch:
+ in.reply <- w.remWatch(in.path)
+ }
+ default:
+ }
+ continue
+ }
+
+ switch e {
+ case syscall.ERROR_MORE_DATA:
+ if watch == nil {
+ w.Errors <- errors.New("ERROR_MORE_DATA has unexpectedly null lpOverlapped buffer")
+ } else {
+ // The i/o succeeded but the buffer is full.
+ // In theory we should be building up a full packet.
+ // In practice we can get away with just carrying on.
+ n = uint32(unsafe.Sizeof(watch.buf))
+ }
+ case syscall.ERROR_ACCESS_DENIED:
+ // Watched directory was probably removed
+ w.sendEvent(watch.path, watch.mask&sysFSDELETESELF)
+ w.deleteWatch(watch)
+ w.startRead(watch)
+ continue
+ case syscall.ERROR_OPERATION_ABORTED:
+ // CancelIo was called on this handle
+ continue
+ default:
+ w.Errors <- os.NewSyscallError("GetQueuedCompletionPort", e)
+ continue
+ case nil:
+ }
+
+ var offset uint32
+ for {
+ if n == 0 {
+ w.Events <- newEvent("", sysFSQOVERFLOW)
+ w.Errors <- errors.New("short read in readEvents()")
+ break
+ }
+
+ // Point "raw" to the event in the buffer
+ raw := (*syscall.FileNotifyInformation)(unsafe.Pointer(&watch.buf[offset]))
+ buf := (*[syscall.MAX_PATH]uint16)(unsafe.Pointer(&raw.FileName))
+ name := syscall.UTF16ToString(buf[:raw.FileNameLength/2])
+ fullname := filepath.Join(watch.path, name)
+
+ var mask uint64
+ switch raw.Action {
+ case syscall.FILE_ACTION_REMOVED:
+ mask = sysFSDELETESELF
+ case syscall.FILE_ACTION_MODIFIED:
+ mask = sysFSMODIFY
+ case syscall.FILE_ACTION_RENAMED_OLD_NAME:
+ watch.rename = name
+ case syscall.FILE_ACTION_RENAMED_NEW_NAME:
+ if watch.names[watch.rename] != 0 {
+ watch.names[name] |= watch.names[watch.rename]
+ delete(watch.names, watch.rename)
+ mask = sysFSMOVESELF
+ }
+ }
+
+ sendNameEvent := func() {
+ if w.sendEvent(fullname, watch.names[name]&mask) {
+ if watch.names[name]&sysFSONESHOT != 0 {
+ delete(watch.names, name)
+ }
+ }
+ }
+ if raw.Action != syscall.FILE_ACTION_RENAMED_NEW_NAME {
+ sendNameEvent()
+ }
+ if raw.Action == syscall.FILE_ACTION_REMOVED {
+ w.sendEvent(fullname, watch.names[name]&sysFSIGNORED)
+ delete(watch.names, name)
+ }
+ if w.sendEvent(fullname, watch.mask&toFSnotifyFlags(raw.Action)) {
+ if watch.mask&sysFSONESHOT != 0 {
+ watch.mask = 0
+ }
+ }
+ if raw.Action == syscall.FILE_ACTION_RENAMED_NEW_NAME {
+ fullname = filepath.Join(watch.path, watch.rename)
+ sendNameEvent()
+ }
+
+ // Move to the next event in the buffer
+ if raw.NextEntryOffset == 0 {
+ break
+ }
+ offset += raw.NextEntryOffset
+
+ // Error!
+ if offset >= n {
+ w.Errors <- errors.New("Windows system assumed buffer larger than it is, events have likely been missed.")
+ break
+ }
+ }
+
+ if err := w.startRead(watch); err != nil {
+ w.Errors <- err
+ }
+ }
+}
+
+func (w *Watcher) sendEvent(name string, mask uint64) bool {
+ if mask == 0 {
+ return false
+ }
+ event := newEvent(name, uint32(mask))
+ select {
+ case ch := <-w.quit:
+ w.quit <- ch
+ case w.Events <- event:
+ }
+ return true
+}
+
+func toWindowsFlags(mask uint64) uint32 {
+ var m uint32
+ if mask&sysFSACCESS != 0 {
+ m |= syscall.FILE_NOTIFY_CHANGE_LAST_ACCESS
+ }
+ if mask&sysFSMODIFY != 0 {
+ m |= syscall.FILE_NOTIFY_CHANGE_LAST_WRITE
+ }
+ if mask&sysFSATTRIB != 0 {
+ m |= syscall.FILE_NOTIFY_CHANGE_ATTRIBUTES
+ }
+ if mask&(sysFSMOVE|sysFSCREATE|sysFSDELETE) != 0 {
+ m |= syscall.FILE_NOTIFY_CHANGE_FILE_NAME | syscall.FILE_NOTIFY_CHANGE_DIR_NAME
+ }
+ return m
+}
+
+func toFSnotifyFlags(action uint32) uint64 {
+ switch action {
+ case syscall.FILE_ACTION_ADDED:
+ return sysFSCREATE
+ case syscall.FILE_ACTION_REMOVED:
+ return sysFSDELETE
+ case syscall.FILE_ACTION_MODIFIED:
+ return sysFSMODIFY
+ case syscall.FILE_ACTION_RENAMED_OLD_NAME:
+ return sysFSMOVEDFROM
+ case syscall.FILE_ACTION_RENAMED_NEW_NAME:
+ return sysFSMOVEDTO
+ }
+ return 0
+}
diff --git a/vendor/github.com/fsouza/go-dockerclient/AUTHORS b/vendor/github.com/fsouza/go-dockerclient/AUTHORS
new file mode 100644
index 0000000000..e2493d18f3
--- /dev/null
+++ b/vendor/github.com/fsouza/go-dockerclient/AUTHORS
@@ -0,0 +1,144 @@
+# This is the official list of go-dockerclient authors for copyright purposes.
+
+Abhishek Chanda
+Adam Bell-Hanssen
+Adrien Kohlbecker
+Aldrin Leal
+Alex Dadgar
+Andreas Jaekle
+Andrews Medina
+Andrey Sibiryov
+Andy Goldstein
+Antonio Murdaca
+Artem Sidorenko
+Ben Marini
+Ben McCann
+Ben Parees
+Benno van den Berg
+Bradley Cicenas
+Brendan Fosberry
+Brian Lalor
+Brian P. Hamachek
+Brian Palmer
+Bryan Boreham
+Burke Libbey
+Carlos Diaz-Padron
+Carson A
+Cesar Wong
+Cezar Sa Espinola
+Cheah Chu Yeow
+cheneydeng
+Chris Bednarski
+CMGS
+Colin Hebert
+Craig Jellick
+Dan Williams
+Daniel, Dao Quang Minh
+Daniel Garcia
+Daniel Hiltgen
+Darren Shepherd
+Dave Choi
+David Huie
+Dawn Chen
+Dinesh Subhraveti
+Drew Wells
+Ed
+Elias G. Schneevoigt
+Erez Horev
+Eric Anderson
+Ethan Mosbaugh
+Ewout Prangsma
+Fabio Rehm
+Fatih Arslan
+Flavia Missi
+Francisco Souza
+Frank Groeneveld
+George Moura
+Grégoire Delattre
+Guillermo Álvarez Fernández
+Harry Zhang
+He Simei
+Ivan Mikushin
+James Bardin
+James Nugent
+Jari Kolehmainen
+Jason Wilder
+Jawher Moussa
+Jean-Baptiste Dalido
+Jeff Mitchell
+Jeffrey Hulten
+Jen Andre
+Jérôme Laurens
+Johan Euphrosine
+John Hughes
+Kamil Domanski
+Karan Misra
+Ken Herner
+Kim, Hirokuni
+Kostas Lekkas
+Kyle Allan
+Liron Levin
+Lior Yankovich
+Liu Peng
+Lorenz Leutgeb
+Lucas Clemente
+Lucas Weiblen
+Lyon Hill
+Mantas Matelis
+Marguerite des Trois Maisons
+Mariusz Borsa
+Martin Sweeney
+Máximo Cuadros Ortiz
+Michael Schmatz
+Michal Fojtik
+Mike Dillon
+Mrunal Patel
+Nate Jones
+Nguyen Sy Thanh Son
+Nicholas Van Wiggeren
+Nick Ethier
+Omeid Matten
+Orivej Desh
+Paul Bellamy
+Paul Morie
+Paul Weil
+Peter Edge
+Peter Jihoon Kim
+Phil Lu
+Philippe Lafoucrière
+Rafe Colton
+Raphaël Pinson
+Rob Miller
+Robbert Klarenbeek
+Robert Williamson
+Roman Khlystik
+Russell Haering
+Salvador Gironès
+Sam Rijs
+Sami Wagiaalla
+Samuel Archambault
+Samuel Karp
+Seth Jennings
+Shane Xie
+Silas Sewell
+Simon Eskildsen
+Simon Menke
+Skolos
+Soulou
+Sridhar Ratnakumar
+Summer Mousa
+Sunjin Lee
+Tarsis Azevedo
+Tim Schindler
+Timothy St. Clair
+Tobi Knaup
+Tom Wilkie
+Tonic
+ttyh061
+Victor Marmol
+Vincenzo Prignano
+Vlad Alexandru Ionescu
+Wiliam Souza
+Ye Yin
+Yu, Zou
+Yuriy Bogdanov
diff --git a/vendor/github.com/fsouza/go-dockerclient/DOCKER-LICENSE b/vendor/github.com/fsouza/go-dockerclient/DOCKER-LICENSE
new file mode 100644
index 0000000000..7066344748
--- /dev/null
+++ b/vendor/github.com/fsouza/go-dockerclient/DOCKER-LICENSE
@@ -0,0 +1,6 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+You can find the Docker license at the following link:
+https://raw.githubusercontent.com/docker/docker/master/LICENSE
diff --git a/vendor/github.com/fsouza/go-dockerclient/LICENSE b/vendor/github.com/fsouza/go-dockerclient/LICENSE
new file mode 100644
index 0000000000..b1cdd4cd20
--- /dev/null
+++ b/vendor/github.com/fsouza/go-dockerclient/LICENSE
@@ -0,0 +1,22 @@
+Copyright (c) 2016, go-dockerclient authors
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice,
+this list of conditions and the following disclaimer in the documentation
+and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/fsouza/go-dockerclient/Makefile b/vendor/github.com/fsouza/go-dockerclient/Makefile
new file mode 100644
index 0000000000..417503f687
--- /dev/null
+++ b/vendor/github.com/fsouza/go-dockerclient/Makefile
@@ -0,0 +1,43 @@
+.PHONY: \
+ all \
+ lint \
+ vet \
+ fmt \
+ fmtcheck \
+ pretest \
+ test \
+ integration \
+ clean
+
+all: test
+
+lint:
+ @ go get -v github.com/golang/lint/golint
+ @ export output="$$(golint . | grep -v 'type name will be used as docker.DockerInfo')"; \
+ [ -n "$${output}" ] && echo "$${output}" && export status=1; \
+ exit $${status:-0}
+
+vet:
+ go vet ./...
+
+fmt:
+ gofmt -s -w .
+
+fmtcheck:
+ [ -z "$$(gofmt -s -d . | tee /dev/stderr)" ]
+
+testdeps:
+ go get -d -t ./...
+
+pretest: testdeps lint vet fmtcheck
+
+gotest:
+ go test $(GO_TEST_FLAGS) ./...
+
+test: pretest gotest
+
+integration:
+ go test -tags docker_integration -run TestIntegration -v
+
+clean:
+ go clean ./...
diff --git a/vendor/github.com/fsouza/go-dockerclient/README.markdown b/vendor/github.com/fsouza/go-dockerclient/README.markdown
new file mode 100644
index 0000000000..f638ec35fb
--- /dev/null
+++ b/vendor/github.com/fsouza/go-dockerclient/README.markdown
@@ -0,0 +1,99 @@
+# go-dockerclient
+
+[![Travis](https://img.shields.io/travis/fsouza/go-dockerclient/master.svg?style=flat-square)](https://travis-ci.org/fsouza/go-dockerclient)
+[![GoDoc](https://img.shields.io/badge/api-Godoc-blue.svg?style=flat-square)](https://godoc.org/github.com/fsouza/go-dockerclient)
+
+This package presents a client for the Docker remote API. It also provides
+support for the extensions in the [Swarm API](https://docs.docker.com/swarm/swarm-api/).
+It currently supports the Docker API up to version 1.23.
+
+This package also provides support for docker's network API, which is a simple
+passthrough to the libnetwork remote API. Note that docker's network API is
+only available in docker 1.8 and above, and only enabled in docker if
+DOCKER_EXPERIMENTAL is defined during the docker build process.
+
+For more details, check the [remote API documentation](http://docs.docker.com/engine/reference/api/docker_remote_api/).
+
+## Example
+
+```go
+package main
+
+import (
+ "fmt"
+
+ "github.com/fsouza/go-dockerclient"
+)
+
+func main() {
+ endpoint := "unix:///var/run/docker.sock"
+ client, _ := docker.NewClient(endpoint)
+ imgs, _ := client.ListImages(docker.ListImagesOptions{All: false})
+ for _, img := range imgs {
+ fmt.Println("ID: ", img.ID)
+ fmt.Println("RepoTags: ", img.RepoTags)
+ fmt.Println("Created: ", img.Created)
+ fmt.Println("Size: ", img.Size)
+ fmt.Println("VirtualSize: ", img.VirtualSize)
+ fmt.Println("ParentId: ", img.ParentID)
+ }
+}
+```
+
+## Using with TLS
+
+In order to instantiate the client for a TLS-enabled daemon, you should use NewTLSClient, passing the endpoint and path for key and certificates as parameters.
+
+```go
+package main
+
+import (
+ "fmt"
+
+ "github.com/fsouza/go-dockerclient"
+)
+
+func main() {
+ endpoint := "tcp://[ip]:[port]"
+ path := os.Getenv("DOCKER_CERT_PATH")
+ ca := fmt.Sprintf("%s/ca.pem", path)
+ cert := fmt.Sprintf("%s/cert.pem", path)
+ key := fmt.Sprintf("%s/key.pem", path)
+ client, _ := docker.NewTLSClient(endpoint, cert, key, ca)
+ // use client
+}
+```
+
+If using [docker-machine](https://docs.docker.com/machine/), or another application that exports environment variables
+`DOCKER_HOST, DOCKER_TLS_VERIFY, DOCKER_CERT_PATH`, you can use NewClientFromEnv.
+
+
+```go
+package main
+
+import (
+ "fmt"
+
+ "github.com/fsouza/go-dockerclient"
+)
+
+func main() {
+ client, _ := docker.NewClientFromEnv()
+ // use client
+}
+```
+
+See the documentation for more details.
+
+## Developing
+
+All development commands can be seen in the [Makefile](Makefile).
+
+Commited code must pass:
+
+* [golint](https://github.com/golang/lint)
+* [go vet](https://godoc.org/golang.org/x/tools/cmd/vet)
+* [gofmt](https://golang.org/cmd/gofmt)
+* [go test](https://golang.org/cmd/go/#hdr-Test_packages)
+
+Running `make test` will check all of these. If your editor does not automatically call gofmt, `make fmt` will format all go files in this repository.
diff --git a/vendor/github.com/fsouza/go-dockerclient/auth.go b/vendor/github.com/fsouza/go-dockerclient/auth.go
new file mode 100644
index 0000000000..5ba9468410
--- /dev/null
+++ b/vendor/github.com/fsouza/go-dockerclient/auth.go
@@ -0,0 +1,182 @@
+// Copyright 2015 go-dockerclient authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package docker
+
+import (
+ "bytes"
+ "encoding/base64"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "path"
+ "strings"
+)
+
+// ErrCannotParseDockercfg is the error returned by NewAuthConfigurations when the dockercfg cannot be parsed.
+var ErrCannotParseDockercfg = errors.New("Failed to read authentication from dockercfg")
+
+// AuthConfiguration represents authentication options to use in the PushImage
+// method. It represents the authentication in the Docker index server.
+type AuthConfiguration struct {
+ Username string `json:"username,omitempty"`
+ Password string `json:"password,omitempty"`
+ Email string `json:"email,omitempty"`
+ ServerAddress string `json:"serveraddress,omitempty"`
+}
+
+// AuthConfigurations represents authentication options to use for the
+// PushImage method accommodating the new X-Registry-Config header
+type AuthConfigurations struct {
+ Configs map[string]AuthConfiguration `json:"configs"`
+}
+
+// AuthConfigurations119 is used to serialize a set of AuthConfigurations
+// for Docker API >= 1.19.
+type AuthConfigurations119 map[string]AuthConfiguration
+
+// dockerConfig represents a registry authentation configuration from the
+// .dockercfg file.
+type dockerConfig struct {
+ Auth string `json:"auth"`
+ Email string `json:"email"`
+}
+
+// NewAuthConfigurationsFromFile returns AuthConfigurations from a path containing JSON
+// in the same format as the .dockercfg file.
+func NewAuthConfigurationsFromFile(path string) (*AuthConfigurations, error) {
+ r, err := os.Open(path)
+ if err != nil {
+ return nil, err
+ }
+ return NewAuthConfigurations(r)
+}
+
+func cfgPaths(dockerConfigEnv string, homeEnv string) []string {
+ var paths []string
+ if dockerConfigEnv != "" {
+ paths = append(paths, path.Join(dockerConfigEnv, "config.json"))
+ }
+ if homeEnv != "" {
+ paths = append(paths, path.Join(homeEnv, ".docker", "config.json"))
+ paths = append(paths, path.Join(homeEnv, ".dockercfg"))
+ }
+ return paths
+}
+
+// NewAuthConfigurationsFromDockerCfg returns AuthConfigurations from
+// system config files. The following files are checked in the order listed:
+// - $DOCKER_CONFIG/config.json if DOCKER_CONFIG set in the environment,
+// - $HOME/.docker/config.json
+// - $HOME/.dockercfg
+func NewAuthConfigurationsFromDockerCfg() (*AuthConfigurations, error) {
+ err := fmt.Errorf("No docker configuration found")
+ var auths *AuthConfigurations
+
+ pathsToTry := cfgPaths(os.Getenv("DOCKER_CONFIG"), os.Getenv("HOME"))
+ for _, path := range pathsToTry {
+ auths, err = NewAuthConfigurationsFromFile(path)
+ if err == nil {
+ return auths, nil
+ }
+ }
+ return auths, err
+}
+
+// NewAuthConfigurations returns AuthConfigurations from a JSON encoded string in the
+// same format as the .dockercfg file.
+func NewAuthConfigurations(r io.Reader) (*AuthConfigurations, error) {
+ var auth *AuthConfigurations
+ confs, err := parseDockerConfig(r)
+ if err != nil {
+ return nil, err
+ }
+ auth, err = authConfigs(confs)
+ if err != nil {
+ return nil, err
+ }
+ return auth, nil
+}
+
+func parseDockerConfig(r io.Reader) (map[string]dockerConfig, error) {
+ buf := new(bytes.Buffer)
+ buf.ReadFrom(r)
+ byteData := buf.Bytes()
+
+ confsWrapper := struct {
+ Auths map[string]dockerConfig `json:"auths"`
+ }{}
+ if err := json.Unmarshal(byteData, &confsWrapper); err == nil {
+ if len(confsWrapper.Auths) > 0 {
+ return confsWrapper.Auths, nil
+ }
+ }
+
+ var confs map[string]dockerConfig
+ if err := json.Unmarshal(byteData, &confs); err != nil {
+ return nil, err
+ }
+ return confs, nil
+}
+
+// authConfigs converts a dockerConfigs map to a AuthConfigurations object.
+func authConfigs(confs map[string]dockerConfig) (*AuthConfigurations, error) {
+ c := &AuthConfigurations{
+ Configs: make(map[string]AuthConfiguration),
+ }
+ for reg, conf := range confs {
+ data, err := base64.StdEncoding.DecodeString(conf.Auth)
+ if err != nil {
+ return nil, err
+ }
+ userpass := strings.SplitN(string(data), ":", 2)
+ if len(userpass) != 2 {
+ return nil, ErrCannotParseDockercfg
+ }
+ c.Configs[reg] = AuthConfiguration{
+ Email: conf.Email,
+ Username: userpass[0],
+ Password: userpass[1],
+ ServerAddress: reg,
+ }
+ }
+ return c, nil
+}
+
+// AuthStatus returns the authentication status for Docker API versions >= 1.23.
+type AuthStatus struct {
+ Status string `json:"Status,omitempty" yaml:"Status,omitempty"`
+ IdentityToken string `json:"IdentityToken,omitempty" yaml:"IdentityToken,omitempty"`
+}
+
+// AuthCheck validates the given credentials. It returns nil if successful.
+//
+// For Docker API versions >= 1.23, the AuthStatus struct will be populated, otherwise it will be empty.`
+//
+// See https://goo.gl/6nsZkH for more details.
+func (c *Client) AuthCheck(conf *AuthConfiguration) (AuthStatus, error) {
+ var authStatus AuthStatus
+ if conf == nil {
+ return authStatus, fmt.Errorf("conf is nil")
+ }
+ resp, err := c.do("POST", "/auth", doOptions{data: conf})
+ if err != nil {
+ return authStatus, err
+ }
+ defer resp.Body.Close()
+ data, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return authStatus, err
+ }
+ if len(data) == 0 {
+ return authStatus, nil
+ }
+ if err := json.Unmarshal(data, &authStatus); err != nil {
+ return authStatus, err
+ }
+ return authStatus, nil
+}
diff --git a/vendor/github.com/fsouza/go-dockerclient/cancelable.go b/vendor/github.com/fsouza/go-dockerclient/cancelable.go
new file mode 100644
index 0000000000..375fbd15c3
--- /dev/null
+++ b/vendor/github.com/fsouza/go-dockerclient/cancelable.go
@@ -0,0 +1,17 @@
+// Copyright 2016 go-dockerclient authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build go1.5
+
+package docker
+
+import "net/http"
+
+func cancelable(client *http.Client, req *http.Request) func() {
+ ch := make(chan struct{})
+ req.Cancel = ch
+ return func() {
+ close(ch)
+ }
+}
diff --git a/vendor/github.com/fsouza/go-dockerclient/cancelable_go14.go b/vendor/github.com/fsouza/go-dockerclient/cancelable_go14.go
new file mode 100644
index 0000000000..3c203986fc
--- /dev/null
+++ b/vendor/github.com/fsouza/go-dockerclient/cancelable_go14.go
@@ -0,0 +1,19 @@
+// Copyright 2016 go-dockerclient authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !go1.5
+
+package docker
+
+import "net/http"
+
+func cancelable(client *http.Client, req *http.Request) func() {
+ return func() {
+ if rc, ok := client.Transport.(interface {
+ CancelRequest(*http.Request)
+ }); ok {
+ rc.CancelRequest(req)
+ }
+ }
+}
diff --git a/vendor/github.com/fsouza/go-dockerclient/change.go b/vendor/github.com/fsouza/go-dockerclient/change.go
new file mode 100644
index 0000000000..d133594d48
--- /dev/null
+++ b/vendor/github.com/fsouza/go-dockerclient/change.go
@@ -0,0 +1,43 @@
+// Copyright 2014 go-dockerclient authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package docker
+
+import "fmt"
+
+// ChangeType is a type for constants indicating the type of change
+// in a container
+type ChangeType int
+
+const (
+ // ChangeModify is the ChangeType for container modifications
+ ChangeModify ChangeType = iota
+
+ // ChangeAdd is the ChangeType for additions to a container
+ ChangeAdd
+
+ // ChangeDelete is the ChangeType for deletions from a container
+ ChangeDelete
+)
+
+// Change represents a change in a container.
+//
+// See https://goo.gl/9GsTIF for more details.
+type Change struct {
+ Path string
+ Kind ChangeType
+}
+
+func (change *Change) String() string {
+ var kind string
+ switch change.Kind {
+ case ChangeModify:
+ kind = "C"
+ case ChangeAdd:
+ kind = "A"
+ case ChangeDelete:
+ kind = "D"
+ }
+ return fmt.Sprintf("%s %s", kind, change.Path)
+}
diff --git a/vendor/github.com/fsouza/go-dockerclient/client.go b/vendor/github.com/fsouza/go-dockerclient/client.go
new file mode 100644
index 0000000000..9f1c0b5650
--- /dev/null
+++ b/vendor/github.com/fsouza/go-dockerclient/client.go
@@ -0,0 +1,1008 @@
+// Copyright 2015 go-dockerclient authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package docker provides a client for the Docker remote API.
+//
+// See https://goo.gl/G3plxW for more details on the remote API.
+package docker
+
+import (
+ "bufio"
+ "bytes"
+ "crypto/tls"
+ "crypto/x509"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net"
+ "net/http"
+ "net/http/httputil"
+ "net/url"
+ "os"
+ "path/filepath"
+ "reflect"
+ "runtime"
+ "strconv"
+ "strings"
+ "sync/atomic"
+ "time"
+
+ "github.com/docker/docker/opts"
+ "github.com/docker/docker/pkg/homedir"
+ "github.com/docker/docker/pkg/stdcopy"
+ "github.com/hashicorp/go-cleanhttp"
+)
+
+const userAgent = "go-dockerclient"
+
+var (
+ // ErrInvalidEndpoint is returned when the endpoint is not a valid HTTP URL.
+ ErrInvalidEndpoint = errors.New("invalid endpoint")
+
+ // ErrConnectionRefused is returned when the client cannot connect to the given endpoint.
+ ErrConnectionRefused = errors.New("cannot connect to Docker endpoint")
+
+ // ErrInactivityTimeout is returned when a streamable call has been inactive for some time.
+ ErrInactivityTimeout = errors.New("inactivity time exceeded timeout")
+
+ apiVersion112, _ = NewAPIVersion("1.12")
+ apiVersion119, _ = NewAPIVersion("1.19")
+ apiVersion124, _ = NewAPIVersion("1.24")
+)
+
+// APIVersion is an internal representation of a version of the Remote API.
+type APIVersion []int
+
+// NewAPIVersion returns an instance of APIVersion for the given string.
+//
+// The given string must be in the form .., where ,
+// and are integer numbers.
+func NewAPIVersion(input string) (APIVersion, error) {
+ if !strings.Contains(input, ".") {
+ return nil, fmt.Errorf("Unable to parse version %q", input)
+ }
+ raw := strings.Split(input, "-")
+ arr := strings.Split(raw[0], ".")
+ ret := make(APIVersion, len(arr))
+ var err error
+ for i, val := range arr {
+ ret[i], err = strconv.Atoi(val)
+ if err != nil {
+ return nil, fmt.Errorf("Unable to parse version %q: %q is not an integer", input, val)
+ }
+ }
+ return ret, nil
+}
+
+func (version APIVersion) String() string {
+ var str string
+ for i, val := range version {
+ str += strconv.Itoa(val)
+ if i < len(version)-1 {
+ str += "."
+ }
+ }
+ return str
+}
+
+// LessThan is a function for comparing APIVersion structs
+func (version APIVersion) LessThan(other APIVersion) bool {
+ return version.compare(other) < 0
+}
+
+// LessThanOrEqualTo is a function for comparing APIVersion structs
+func (version APIVersion) LessThanOrEqualTo(other APIVersion) bool {
+ return version.compare(other) <= 0
+}
+
+// GreaterThan is a function for comparing APIVersion structs
+func (version APIVersion) GreaterThan(other APIVersion) bool {
+ return version.compare(other) > 0
+}
+
+// GreaterThanOrEqualTo is a function for comparing APIVersion structs
+func (version APIVersion) GreaterThanOrEqualTo(other APIVersion) bool {
+ return version.compare(other) >= 0
+}
+
+func (version APIVersion) compare(other APIVersion) int {
+ for i, v := range version {
+ if i <= len(other)-1 {
+ otherVersion := other[i]
+
+ if v < otherVersion {
+ return -1
+ } else if v > otherVersion {
+ return 1
+ }
+ }
+ }
+ if len(version) > len(other) {
+ return 1
+ }
+ if len(version) < len(other) {
+ return -1
+ }
+ return 0
+}
+
+// Client is the basic type of this package. It provides methods for
+// interaction with the API.
+type Client struct {
+ SkipServerVersionCheck bool
+ HTTPClient *http.Client
+ TLSConfig *tls.Config
+ Dialer *net.Dialer
+
+ endpoint string
+ endpointURL *url.URL
+ eventMonitor *eventMonitoringState
+ requestedAPIVersion APIVersion
+ serverAPIVersion APIVersion
+ expectedAPIVersion APIVersion
+ unixHTTPClient *http.Client
+
+ // A timeout to use when using both the unixHTTPClient and HTTPClient
+ timeout time.Duration
+}
+
+// NewClient returns a Client instance ready for communication with the given
+// server endpoint. It will use the latest remote API version available in the
+// server.
+func NewClient(endpoint string) (*Client, error) {
+ client, err := NewVersionedClient(endpoint, "")
+ if err != nil {
+ return nil, err
+ }
+ client.SkipServerVersionCheck = true
+ return client, nil
+}
+
+// NewTLSClient returns a Client instance ready for TLS communications with the givens
+// server endpoint, key and certificates . It will use the latest remote API version
+// available in the server.
+func NewTLSClient(endpoint string, cert, key, ca string) (*Client, error) {
+ client, err := NewVersionedTLSClient(endpoint, cert, key, ca, "")
+ if err != nil {
+ return nil, err
+ }
+ client.SkipServerVersionCheck = true
+ return client, nil
+}
+
+// NewTLSClientFromBytes returns a Client instance ready for TLS communications with the givens
+// server endpoint, key and certificates (passed inline to the function as opposed to being
+// read from a local file). It will use the latest remote API version available in the server.
+func NewTLSClientFromBytes(endpoint string, certPEMBlock, keyPEMBlock, caPEMCert []byte) (*Client, error) {
+ client, err := NewVersionedTLSClientFromBytes(endpoint, certPEMBlock, keyPEMBlock, caPEMCert, "")
+ if err != nil {
+ return nil, err
+ }
+ client.SkipServerVersionCheck = true
+ return client, nil
+}
+
+// NewVersionedClient returns a Client instance ready for communication with
+// the given server endpoint, using a specific remote API version.
+func NewVersionedClient(endpoint string, apiVersionString string) (*Client, error) {
+ u, err := parseEndpoint(endpoint, false)
+ if err != nil {
+ return nil, err
+ }
+ var requestedAPIVersion APIVersion
+ if strings.Contains(apiVersionString, ".") {
+ requestedAPIVersion, err = NewAPIVersion(apiVersionString)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return &Client{
+ HTTPClient: cleanhttp.DefaultClient(),
+ Dialer: &net.Dialer{},
+ endpoint: endpoint,
+ endpointURL: u,
+ eventMonitor: new(eventMonitoringState),
+ requestedAPIVersion: requestedAPIVersion,
+ }, nil
+}
+
+// NewVersionnedTLSClient has been DEPRECATED, please use NewVersionedTLSClient.
+func NewVersionnedTLSClient(endpoint string, cert, key, ca, apiVersionString string) (*Client, error) {
+ return NewVersionedTLSClient(endpoint, cert, key, ca, apiVersionString)
+}
+
+// NewVersionedTLSClient returns a Client instance ready for TLS communications with the givens
+// server endpoint, key and certificates, using a specific remote API version.
+func NewVersionedTLSClient(endpoint string, cert, key, ca, apiVersionString string) (*Client, error) {
+ certPEMBlock, err := ioutil.ReadFile(cert)
+ if err != nil {
+ return nil, err
+ }
+ keyPEMBlock, err := ioutil.ReadFile(key)
+ if err != nil {
+ return nil, err
+ }
+ caPEMCert, err := ioutil.ReadFile(ca)
+ if err != nil {
+ return nil, err
+ }
+ return NewVersionedTLSClientFromBytes(endpoint, certPEMBlock, keyPEMBlock, caPEMCert, apiVersionString)
+}
+
+// NewClientFromEnv returns a Client instance ready for communication created from
+// Docker's default logic for the environment variables DOCKER_HOST, DOCKER_TLS_VERIFY, and DOCKER_CERT_PATH.
+//
+// See https://github.com/docker/docker/blob/1f963af697e8df3a78217f6fdbf67b8123a7db94/docker/docker.go#L68.
+// See https://github.com/docker/compose/blob/81707ef1ad94403789166d2fe042c8a718a4c748/compose/cli/docker_client.py#L7.
+func NewClientFromEnv() (*Client, error) {
+ client, err := NewVersionedClientFromEnv("")
+ if err != nil {
+ return nil, err
+ }
+ client.SkipServerVersionCheck = true
+ return client, nil
+}
+
+// NewVersionedClientFromEnv returns a Client instance ready for TLS communications created from
+// Docker's default logic for the environment variables DOCKER_HOST, DOCKER_TLS_VERIFY, and DOCKER_CERT_PATH,
+// and using a specific remote API version.
+//
+// See https://github.com/docker/docker/blob/1f963af697e8df3a78217f6fdbf67b8123a7db94/docker/docker.go#L68.
+// See https://github.com/docker/compose/blob/81707ef1ad94403789166d2fe042c8a718a4c748/compose/cli/docker_client.py#L7.
+func NewVersionedClientFromEnv(apiVersionString string) (*Client, error) {
+ dockerEnv, err := getDockerEnv()
+ if err != nil {
+ return nil, err
+ }
+ dockerHost := dockerEnv.dockerHost
+ if dockerEnv.dockerTLSVerify {
+ parts := strings.SplitN(dockerEnv.dockerHost, "://", 2)
+ if len(parts) != 2 {
+ return nil, fmt.Errorf("could not split %s into two parts by ://", dockerHost)
+ }
+ cert := filepath.Join(dockerEnv.dockerCertPath, "cert.pem")
+ key := filepath.Join(dockerEnv.dockerCertPath, "key.pem")
+ ca := filepath.Join(dockerEnv.dockerCertPath, "ca.pem")
+ return NewVersionedTLSClient(dockerEnv.dockerHost, cert, key, ca, apiVersionString)
+ }
+ return NewVersionedClient(dockerEnv.dockerHost, apiVersionString)
+}
+
+// NewVersionedTLSClientFromBytes returns a Client instance ready for TLS communications with the givens
+// server endpoint, key and certificates (passed inline to the function as opposed to being
+// read from a local file), using a specific remote API version.
+func NewVersionedTLSClientFromBytes(endpoint string, certPEMBlock, keyPEMBlock, caPEMCert []byte, apiVersionString string) (*Client, error) {
+ u, err := parseEndpoint(endpoint, true)
+ if err != nil {
+ return nil, err
+ }
+ var requestedAPIVersion APIVersion
+ if strings.Contains(apiVersionString, ".") {
+ requestedAPIVersion, err = NewAPIVersion(apiVersionString)
+ if err != nil {
+ return nil, err
+ }
+ }
+ if certPEMBlock == nil || keyPEMBlock == nil {
+ return nil, errors.New("Both cert and key are required")
+ }
+ tlsCert, err := tls.X509KeyPair(certPEMBlock, keyPEMBlock)
+ if err != nil {
+ return nil, err
+ }
+ tlsConfig := &tls.Config{Certificates: []tls.Certificate{tlsCert}}
+ if caPEMCert == nil {
+ tlsConfig.InsecureSkipVerify = true
+ } else {
+ caPool := x509.NewCertPool()
+ if !caPool.AppendCertsFromPEM(caPEMCert) {
+ return nil, errors.New("Could not add RootCA pem")
+ }
+ tlsConfig.RootCAs = caPool
+ }
+ tr := cleanhttp.DefaultTransport()
+ tr.TLSClientConfig = tlsConfig
+ if err != nil {
+ return nil, err
+ }
+ return &Client{
+ HTTPClient: &http.Client{Transport: tr},
+ TLSConfig: tlsConfig,
+ Dialer: &net.Dialer{},
+ endpoint: endpoint,
+ endpointURL: u,
+ eventMonitor: new(eventMonitoringState),
+ requestedAPIVersion: requestedAPIVersion,
+ }, nil
+}
+
+// SetTimeout takes a timeout and applies it to subsequent requests to the
+// docker engine
+func (c *Client) SetTimeout(t time.Duration) {
+ c.timeout = t
+}
+
+func (c *Client) checkAPIVersion() error {
+ serverAPIVersionString, err := c.getServerAPIVersionString()
+ if err != nil {
+ return err
+ }
+ c.serverAPIVersion, err = NewAPIVersion(serverAPIVersionString)
+ if err != nil {
+ return err
+ }
+ if c.requestedAPIVersion == nil {
+ c.expectedAPIVersion = c.serverAPIVersion
+ } else {
+ c.expectedAPIVersion = c.requestedAPIVersion
+ }
+ return nil
+}
+
+// Endpoint returns the current endpoint. It's useful for getting the endpoint
+// when using functions that get this data from the environment (like
+// NewClientFromEnv.
+func (c *Client) Endpoint() string {
+ return c.endpoint
+}
+
+// Ping pings the docker server
+//
+// See https://goo.gl/kQCfJj for more details.
+func (c *Client) Ping() error {
+ path := "/_ping"
+ resp, err := c.do("GET", path, doOptions{})
+ if err != nil {
+ return err
+ }
+ if resp.StatusCode != http.StatusOK {
+ return newError(resp)
+ }
+ resp.Body.Close()
+ return nil
+}
+
+func (c *Client) getServerAPIVersionString() (version string, err error) {
+ resp, err := c.do("GET", "/version", doOptions{})
+ if err != nil {
+ return "", err
+ }
+ defer resp.Body.Close()
+ if resp.StatusCode != http.StatusOK {
+ return "", fmt.Errorf("Received unexpected status %d while trying to retrieve the server version", resp.StatusCode)
+ }
+ var versionResponse map[string]interface{}
+ if err := json.NewDecoder(resp.Body).Decode(&versionResponse); err != nil {
+ return "", err
+ }
+ if version, ok := (versionResponse["ApiVersion"]).(string); ok {
+ return version, nil
+ }
+ return "", nil
+}
+
+type doOptions struct {
+ data interface{}
+ forceJSON bool
+ headers map[string]string
+}
+
+func (c *Client) do(method, path string, doOptions doOptions) (*http.Response, error) {
+ var params io.Reader
+ if doOptions.data != nil || doOptions.forceJSON {
+ buf, err := json.Marshal(doOptions.data)
+ if err != nil {
+ return nil, err
+ }
+ params = bytes.NewBuffer(buf)
+ }
+ if path != "/version" && !c.SkipServerVersionCheck && c.expectedAPIVersion == nil {
+ err := c.checkAPIVersion()
+ if err != nil {
+ return nil, err
+ }
+ }
+ httpClient := c.HTTPClient
+ protocol := c.endpointURL.Scheme
+ var u string
+ if protocol == "unix" {
+ httpClient = c.unixClient()
+ u = c.getFakeUnixURL(path)
+ } else {
+ u = c.getURL(path)
+ }
+
+ // If the user has provided a timeout, apply it.
+ if c.timeout != 0 {
+ httpClient.Timeout = c.timeout
+ }
+
+ req, err := http.NewRequest(method, u, params)
+ if err != nil {
+ return nil, err
+ }
+ req.Header.Set("User-Agent", userAgent)
+ if doOptions.data != nil {
+ req.Header.Set("Content-Type", "application/json")
+ } else if method == "POST" {
+ req.Header.Set("Content-Type", "plain/text")
+ }
+
+ for k, v := range doOptions.headers {
+ req.Header.Set(k, v)
+ }
+ resp, err := httpClient.Do(req)
+ if err != nil {
+ if strings.Contains(err.Error(), "connection refused") {
+ return nil, ErrConnectionRefused
+ }
+ return nil, err
+ }
+ if resp.StatusCode < 200 || resp.StatusCode >= 400 {
+ return nil, newError(resp)
+ }
+ return resp, nil
+}
+
+type streamOptions struct {
+ setRawTerminal bool
+ rawJSONStream bool
+ useJSONDecoder bool
+ headers map[string]string
+ in io.Reader
+ stdout io.Writer
+ stderr io.Writer
+ // timeout is the initial connection timeout
+ timeout time.Duration
+ // Timeout with no data is received, it's reset every time new data
+ // arrives
+ inactivityTimeout time.Duration
+}
+
+func (c *Client) stream(method, path string, streamOptions streamOptions) error {
+ if (method == "POST" || method == "PUT") && streamOptions.in == nil {
+ streamOptions.in = bytes.NewReader(nil)
+ }
+ if path != "/version" && !c.SkipServerVersionCheck && c.expectedAPIVersion == nil {
+ err := c.checkAPIVersion()
+ if err != nil {
+ return err
+ }
+ }
+ req, err := http.NewRequest(method, c.getURL(path), streamOptions.in)
+ if err != nil {
+ return err
+ }
+ req.Header.Set("User-Agent", userAgent)
+ if method == "POST" {
+ req.Header.Set("Content-Type", "plain/text")
+ }
+ for key, val := range streamOptions.headers {
+ req.Header.Set(key, val)
+ }
+ var resp *http.Response
+ protocol := c.endpointURL.Scheme
+ address := c.endpointURL.Path
+ if streamOptions.stdout == nil {
+ streamOptions.stdout = ioutil.Discard
+ }
+ if streamOptions.stderr == nil {
+ streamOptions.stderr = ioutil.Discard
+ }
+ cancelRequest := cancelable(c.HTTPClient, req)
+ if protocol == "unix" {
+ dial, err := c.Dialer.Dial(protocol, address)
+ if err != nil {
+ return err
+ }
+ cancelRequest = func() { dial.Close() }
+ defer dial.Close()
+ breader := bufio.NewReader(dial)
+ err = req.Write(dial)
+ if err != nil {
+ return err
+ }
+
+ // ReadResponse may hang if server does not replay
+ if streamOptions.timeout > 0 {
+ dial.SetDeadline(time.Now().Add(streamOptions.timeout))
+ }
+
+ if resp, err = http.ReadResponse(breader, req); err != nil {
+ // Cancel timeout for future I/O operations
+ if streamOptions.timeout > 0 {
+ dial.SetDeadline(time.Time{})
+ }
+ if strings.Contains(err.Error(), "connection refused") {
+ return ErrConnectionRefused
+ }
+ return err
+ }
+ } else {
+ if resp, err = c.HTTPClient.Do(req); err != nil {
+ if strings.Contains(err.Error(), "connection refused") {
+ return ErrConnectionRefused
+ }
+ return err
+ }
+ }
+ defer resp.Body.Close()
+ if resp.StatusCode < 200 || resp.StatusCode >= 400 {
+ return newError(resp)
+ }
+ var canceled uint32
+ if streamOptions.inactivityTimeout > 0 {
+ ch := handleInactivityTimeout(&streamOptions, cancelRequest, &canceled)
+ defer close(ch)
+ }
+ err = handleStreamResponse(resp, &streamOptions)
+ if err != nil {
+ if atomic.LoadUint32(&canceled) != 0 {
+ return ErrInactivityTimeout
+ }
+ return err
+ }
+ return nil
+}
+
+func handleStreamResponse(resp *http.Response, streamOptions *streamOptions) error {
+ var err error
+ if !streamOptions.useJSONDecoder && resp.Header.Get("Content-Type") != "application/json" {
+ if streamOptions.setRawTerminal {
+ _, err = io.Copy(streamOptions.stdout, resp.Body)
+ } else {
+ _, err = stdcopy.StdCopy(streamOptions.stdout, streamOptions.stderr, resp.Body)
+ }
+ return err
+ }
+ // if we want to get raw json stream, just copy it back to output
+ // without decoding it
+ if streamOptions.rawJSONStream {
+ _, err = io.Copy(streamOptions.stdout, resp.Body)
+ return err
+ }
+ dec := json.NewDecoder(resp.Body)
+ for {
+ var m jsonMessage
+ if err := dec.Decode(&m); err == io.EOF {
+ break
+ } else if err != nil {
+ return err
+ }
+ if m.Stream != "" {
+ fmt.Fprint(streamOptions.stdout, m.Stream)
+ } else if m.Progress != "" {
+ fmt.Fprintf(streamOptions.stdout, "%s %s\r", m.Status, m.Progress)
+ } else if m.Error != "" {
+ return errors.New(m.Error)
+ }
+ if m.Status != "" {
+ fmt.Fprintln(streamOptions.stdout, m.Status)
+ }
+ }
+ return nil
+}
+
+type proxyWriter struct {
+ io.Writer
+ calls uint64
+}
+
+func (p *proxyWriter) callCount() uint64 {
+ return atomic.LoadUint64(&p.calls)
+}
+
+func (p *proxyWriter) Write(data []byte) (int, error) {
+ atomic.AddUint64(&p.calls, 1)
+ return p.Writer.Write(data)
+}
+
+func handleInactivityTimeout(options *streamOptions, cancelRequest func(), canceled *uint32) chan<- struct{} {
+ done := make(chan struct{})
+ proxyStdout := &proxyWriter{Writer: options.stdout}
+ proxyStderr := &proxyWriter{Writer: options.stderr}
+ options.stdout = proxyStdout
+ options.stderr = proxyStderr
+ go func() {
+ var lastCallCount uint64
+ for {
+ select {
+ case <-time.After(options.inactivityTimeout):
+ case <-done:
+ return
+ }
+ curCallCount := proxyStdout.callCount() + proxyStderr.callCount()
+ if curCallCount == lastCallCount {
+ atomic.AddUint32(canceled, 1)
+ cancelRequest()
+ return
+ }
+ lastCallCount = curCallCount
+ }
+ }()
+ return done
+}
+
+type hijackOptions struct {
+ success chan struct{}
+ setRawTerminal bool
+ in io.Reader
+ stdout io.Writer
+ stderr io.Writer
+ data interface{}
+}
+
+// CloseWaiter is an interface with methods for closing the underlying resource
+// and then waiting for it to finish processing.
+type CloseWaiter interface {
+ io.Closer
+ Wait() error
+}
+
+type waiterFunc func() error
+
+func (w waiterFunc) Wait() error { return w() }
+
+type closerFunc func() error
+
+func (c closerFunc) Close() error { return c() }
+
+func (c *Client) hijack(method, path string, hijackOptions hijackOptions) (CloseWaiter, error) {
+ if path != "/version" && !c.SkipServerVersionCheck && c.expectedAPIVersion == nil {
+ err := c.checkAPIVersion()
+ if err != nil {
+ return nil, err
+ }
+ }
+ var params io.Reader
+ if hijackOptions.data != nil {
+ buf, err := json.Marshal(hijackOptions.data)
+ if err != nil {
+ return nil, err
+ }
+ params = bytes.NewBuffer(buf)
+ }
+ req, err := http.NewRequest(method, c.getURL(path), params)
+ if err != nil {
+ return nil, err
+ }
+ req.Header.Set("Content-Type", "application/json")
+ req.Header.Set("Connection", "Upgrade")
+ req.Header.Set("Upgrade", "tcp")
+ protocol := c.endpointURL.Scheme
+ address := c.endpointURL.Path
+ if protocol != "unix" {
+ protocol = "tcp"
+ address = c.endpointURL.Host
+ }
+ var dial net.Conn
+ if c.TLSConfig != nil && protocol != "unix" {
+ dial, err = tlsDialWithDialer(c.Dialer, protocol, address, c.TLSConfig)
+ if err != nil {
+ return nil, err
+ }
+ } else {
+ dial, err = c.Dialer.Dial(protocol, address)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ errs := make(chan error)
+ quit := make(chan struct{})
+ go func() {
+ clientconn := httputil.NewClientConn(dial, nil)
+ defer clientconn.Close()
+ clientconn.Do(req)
+ if hijackOptions.success != nil {
+ hijackOptions.success <- struct{}{}
+ <-hijackOptions.success
+ }
+ rwc, br := clientconn.Hijack()
+ defer rwc.Close()
+
+ errChanOut := make(chan error, 1)
+ errChanIn := make(chan error, 1)
+ if hijackOptions.stdout == nil && hijackOptions.stderr == nil {
+ close(errChanOut)
+ } else {
+ // Only copy if hijackOptions.stdout and/or hijackOptions.stderr is actually set.
+ // Otherwise, if the only stream you care about is stdin, your attach session
+ // will "hang" until the container terminates, even though you're not reading
+ // stdout/stderr
+ if hijackOptions.stdout == nil {
+ hijackOptions.stdout = ioutil.Discard
+ }
+ if hijackOptions.stderr == nil {
+ hijackOptions.stderr = ioutil.Discard
+ }
+
+ go func() {
+ defer func() {
+ if hijackOptions.in != nil {
+ if closer, ok := hijackOptions.in.(io.Closer); ok {
+ closer.Close()
+ }
+ errChanIn <- nil
+ }
+ }()
+
+ var err error
+ if hijackOptions.setRawTerminal {
+ _, err = io.Copy(hijackOptions.stdout, br)
+ } else {
+ _, err = stdcopy.StdCopy(hijackOptions.stdout, hijackOptions.stderr, br)
+ }
+ errChanOut <- err
+ }()
+ }
+
+ go func() {
+ var err error
+ if hijackOptions.in != nil {
+ _, err = io.Copy(rwc, hijackOptions.in)
+ }
+ errChanIn <- err
+ rwc.(interface {
+ CloseWrite() error
+ }).CloseWrite()
+ }()
+
+ var errIn error
+ select {
+ case errIn = <-errChanIn:
+ case <-quit:
+ return
+ }
+
+ var errOut error
+ select {
+ case errOut = <-errChanOut:
+ case <-quit:
+ return
+ }
+
+ if errIn != nil {
+ errs <- errIn
+ } else {
+ errs <- errOut
+ }
+ }()
+
+ return struct {
+ closerFunc
+ waiterFunc
+ }{
+ closerFunc(func() error { close(quit); return nil }),
+ waiterFunc(func() error { return <-errs }),
+ }, nil
+}
+
+func (c *Client) getURL(path string) string {
+ urlStr := strings.TrimRight(c.endpointURL.String(), "/")
+ if c.endpointURL.Scheme == "unix" {
+ urlStr = ""
+ }
+ if c.requestedAPIVersion != nil {
+ return fmt.Sprintf("%s/v%s%s", urlStr, c.requestedAPIVersion, path)
+ }
+ return fmt.Sprintf("%s%s", urlStr, path)
+}
+
+// getFakeUnixURL returns the URL needed to make an HTTP request over a UNIX
+// domain socket to the given path.
+func (c *Client) getFakeUnixURL(path string) string {
+ u := *c.endpointURL // Copy.
+
+ // Override URL so that net/http will not complain.
+ u.Scheme = "http"
+ u.Host = "unix.sock" // Doesn't matter what this is - it's not used.
+ u.Path = ""
+ urlStr := strings.TrimRight(u.String(), "/")
+ if c.requestedAPIVersion != nil {
+ return fmt.Sprintf("%s/v%s%s", urlStr, c.requestedAPIVersion, path)
+ }
+ return fmt.Sprintf("%s%s", urlStr, path)
+}
+
+func (c *Client) unixClient() *http.Client {
+ if c.unixHTTPClient != nil {
+ return c.unixHTTPClient
+ }
+ socketPath := c.endpointURL.Path
+ tr := cleanhttp.DefaultTransport()
+ tr.Dial = func(network, addr string) (net.Conn, error) {
+ return c.Dialer.Dial("unix", socketPath)
+ }
+ c.unixHTTPClient = &http.Client{Transport: tr}
+ return c.unixHTTPClient
+}
+
+type jsonMessage struct {
+ Status string `json:"status,omitempty"`
+ Progress string `json:"progress,omitempty"`
+ Error string `json:"error,omitempty"`
+ Stream string `json:"stream,omitempty"`
+}
+
+func queryString(opts interface{}) string {
+ if opts == nil {
+ return ""
+ }
+ value := reflect.ValueOf(opts)
+ if value.Kind() == reflect.Ptr {
+ value = value.Elem()
+ }
+ if value.Kind() != reflect.Struct {
+ return ""
+ }
+ items := url.Values(map[string][]string{})
+ for i := 0; i < value.NumField(); i++ {
+ field := value.Type().Field(i)
+ if field.PkgPath != "" {
+ continue
+ }
+ key := field.Tag.Get("qs")
+ if key == "" {
+ key = strings.ToLower(field.Name)
+ } else if key == "-" {
+ continue
+ }
+ addQueryStringValue(items, key, value.Field(i))
+ }
+ return items.Encode()
+}
+
+func addQueryStringValue(items url.Values, key string, v reflect.Value) {
+ switch v.Kind() {
+ case reflect.Bool:
+ if v.Bool() {
+ items.Add(key, "1")
+ }
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ if v.Int() > 0 {
+ items.Add(key, strconv.FormatInt(v.Int(), 10))
+ }
+ case reflect.Float32, reflect.Float64:
+ if v.Float() > 0 {
+ items.Add(key, strconv.FormatFloat(v.Float(), 'f', -1, 64))
+ }
+ case reflect.String:
+ if v.String() != "" {
+ items.Add(key, v.String())
+ }
+ case reflect.Ptr:
+ if !v.IsNil() {
+ if b, err := json.Marshal(v.Interface()); err == nil {
+ items.Add(key, string(b))
+ }
+ }
+ case reflect.Map:
+ if len(v.MapKeys()) > 0 {
+ if b, err := json.Marshal(v.Interface()); err == nil {
+ items.Add(key, string(b))
+ }
+ }
+ case reflect.Array, reflect.Slice:
+ vLen := v.Len()
+ if vLen > 0 {
+ for i := 0; i < vLen; i++ {
+ addQueryStringValue(items, key, v.Index(i))
+ }
+ }
+ }
+}
+
+// Error represents failures in the API. It represents a failure from the API.
+type Error struct {
+ Status int
+ Message string
+}
+
+func newError(resp *http.Response) *Error {
+ defer resp.Body.Close()
+ data, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return &Error{Status: resp.StatusCode, Message: fmt.Sprintf("cannot read body, err: %v", err)}
+ }
+ return &Error{Status: resp.StatusCode, Message: string(data)}
+}
+
+func (e *Error) Error() string {
+ return fmt.Sprintf("API error (%d): %s", e.Status, e.Message)
+}
+
+func parseEndpoint(endpoint string, tls bool) (*url.URL, error) {
+ if endpoint != "" && !strings.Contains(endpoint, "://") {
+ endpoint = "tcp://" + endpoint
+ }
+ u, err := url.Parse(endpoint)
+ if err != nil {
+ return nil, ErrInvalidEndpoint
+ }
+ if tls {
+ u.Scheme = "https"
+ }
+ switch u.Scheme {
+ case "unix":
+ return u, nil
+ case "http", "https", "tcp":
+ _, port, err := net.SplitHostPort(u.Host)
+ if err != nil {
+ if e, ok := err.(*net.AddrError); ok {
+ if e.Err == "missing port in address" {
+ return u, nil
+ }
+ }
+ return nil, ErrInvalidEndpoint
+ }
+ number, err := strconv.ParseInt(port, 10, 64)
+ if err == nil && number > 0 && number < 65536 {
+ if u.Scheme == "tcp" {
+ if tls {
+ u.Scheme = "https"
+ } else {
+ u.Scheme = "http"
+ }
+ }
+ return u, nil
+ }
+ return nil, ErrInvalidEndpoint
+ default:
+ return nil, ErrInvalidEndpoint
+ }
+}
+
+type dockerEnv struct {
+ dockerHost string
+ dockerTLSVerify bool
+ dockerCertPath string
+}
+
+func getDockerEnv() (*dockerEnv, error) {
+ dockerHost := os.Getenv("DOCKER_HOST")
+ var err error
+ if dockerHost == "" {
+ dockerHost, err = DefaultDockerHost()
+ if err != nil {
+ return nil, err
+ }
+ }
+ dockerTLSVerify := os.Getenv("DOCKER_TLS_VERIFY") != ""
+ var dockerCertPath string
+ if dockerTLSVerify {
+ dockerCertPath = os.Getenv("DOCKER_CERT_PATH")
+ if dockerCertPath == "" {
+ home := homedir.Get()
+ if home == "" {
+ return nil, errors.New("environment variable HOME must be set if DOCKER_CERT_PATH is not set")
+ }
+ dockerCertPath = filepath.Join(home, ".docker")
+ dockerCertPath, err = filepath.Abs(dockerCertPath)
+ if err != nil {
+ return nil, err
+ }
+ }
+ }
+ return &dockerEnv{
+ dockerHost: dockerHost,
+ dockerTLSVerify: dockerTLSVerify,
+ dockerCertPath: dockerCertPath,
+ }, nil
+}
+
+// DefaultDockerHost returns the default docker socket for the current OS
+func DefaultDockerHost() (string, error) {
+ var defaultHost string
+ if runtime.GOOS == "windows" {
+ // If we do not have a host, default to TCP socket on Windows
+ defaultHost = fmt.Sprintf("tcp://%s:%d", opts.DefaultHTTPHost, opts.DefaultHTTPPort)
+ } else {
+ // If we do not have a host, default to unix socket
+ defaultHost = fmt.Sprintf("unix://%s", opts.DefaultUnixSocket)
+ }
+ return opts.ValidateHost(defaultHost)
+}
diff --git a/vendor/github.com/fsouza/go-dockerclient/container.go b/vendor/github.com/fsouza/go-dockerclient/container.go
new file mode 100644
index 0000000000..281d3cca76
--- /dev/null
+++ b/vendor/github.com/fsouza/go-dockerclient/container.go
@@ -0,0 +1,1363 @@
+// Copyright 2015 go-dockerclient authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package docker
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+ "net/url"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/docker/go-units"
+)
+
+// ErrContainerAlreadyExists is the error returned by CreateContainer when the
+// container already exists.
+var ErrContainerAlreadyExists = errors.New("container already exists")
+
+// ListContainersOptions specify parameters to the ListContainers function.
+//
+// See https://goo.gl/47a6tO for more details.
+type ListContainersOptions struct {
+ All bool
+ Size bool
+ Limit int
+ Since string
+ Before string
+ Filters map[string][]string
+}
+
+// APIPort is a type that represents a port mapping returned by the Docker API
+type APIPort struct {
+ PrivatePort int64 `json:"PrivatePort,omitempty" yaml:"PrivatePort,omitempty"`
+ PublicPort int64 `json:"PublicPort,omitempty" yaml:"PublicPort,omitempty"`
+ Type string `json:"Type,omitempty" yaml:"Type,omitempty"`
+ IP string `json:"IP,omitempty" yaml:"IP,omitempty"`
+}
+
+// APIMount represents a mount point for a container.
+type APIMount struct {
+ Name string `json:"Name,omitempty" yaml:"Name,omitempty"`
+ Source string `json:"Source,omitempty" yaml:"Source,omitempty"`
+ Destination string `json:"Destination,omitempty" yaml:"Destination,omitempty"`
+ Driver string `json:"Driver,omitempty" yaml:"Driver,omitempty"`
+ Mode string `json:"Mode,omitempty" yaml:"Mode,omitempty"`
+ RW bool `json:"RW,omitempty" yaml:"RW,omitempty"`
+ Propogation string `json:"Propogation,omitempty" yaml:"Propogation,omitempty"`
+}
+
+// APIContainers represents each container in the list returned by
+// ListContainers.
+type APIContainers struct {
+ ID string `json:"Id" yaml:"Id"`
+ Image string `json:"Image,omitempty" yaml:"Image,omitempty"`
+ Command string `json:"Command,omitempty" yaml:"Command,omitempty"`
+ Created int64 `json:"Created,omitempty" yaml:"Created,omitempty"`
+ State string `json:"State,omitempty" yaml:"State,omitempty"`
+ Status string `json:"Status,omitempty" yaml:"Status,omitempty"`
+ Ports []APIPort `json:"Ports,omitempty" yaml:"Ports,omitempty"`
+ SizeRw int64 `json:"SizeRw,omitempty" yaml:"SizeRw,omitempty"`
+ SizeRootFs int64 `json:"SizeRootFs,omitempty" yaml:"SizeRootFs,omitempty"`
+ Names []string `json:"Names,omitempty" yaml:"Names,omitempty"`
+ Labels map[string]string `json:"Labels,omitempty" yaml:"Labels,omitempty"`
+ Networks NetworkList `json:"NetworkSettings,omitempty" yaml:"NetworkSettings,omitempty"`
+ Mounts []APIMount `json:"Mounts,omitempty" yaml:"Mounts,omitempty"`
+}
+
+// NetworkList encapsulates a map of networks, as returned by the Docker API in
+// ListContainers.
+type NetworkList struct {
+ Networks map[string]ContainerNetwork `json:"Networks" yaml:"Networks,omitempty"`
+}
+
+// ListContainers returns a slice of containers matching the given criteria.
+//
+// See https://goo.gl/47a6tO for more details.
+func (c *Client) ListContainers(opts ListContainersOptions) ([]APIContainers, error) {
+ path := "/containers/json?" + queryString(opts)
+ resp, err := c.do("GET", path, doOptions{})
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+ var containers []APIContainers
+ if err := json.NewDecoder(resp.Body).Decode(&containers); err != nil {
+ return nil, err
+ }
+ return containers, nil
+}
+
+// Port represents the port number and the protocol, in the form
+// /. For example: 80/tcp.
+type Port string
+
+// Port returns the number of the port.
+func (p Port) Port() string {
+ return strings.Split(string(p), "/")[0]
+}
+
+// Proto returns the name of the protocol.
+func (p Port) Proto() string {
+ parts := strings.Split(string(p), "/")
+ if len(parts) == 1 {
+ return "tcp"
+ }
+ return parts[1]
+}
+
+// State represents the state of a container.
+type State struct {
+ Status string `json:"Status,omitempty" yaml:"Status,omitempty"`
+ Running bool `json:"Running,omitempty" yaml:"Running,omitempty"`
+ Paused bool `json:"Paused,omitempty" yaml:"Paused,omitempty"`
+ Restarting bool `json:"Restarting,omitempty" yaml:"Restarting,omitempty"`
+ OOMKilled bool `json:"OOMKilled,omitempty" yaml:"OOMKilled,omitempty"`
+ RemovalInProgress bool `json:"RemovalInProgress,omitempty" yaml:"RemovalInProgress,omitempty"`
+ Dead bool `json:"Dead,omitempty" yaml:"Dead,omitempty"`
+ Pid int `json:"Pid,omitempty" yaml:"Pid,omitempty"`
+ ExitCode int `json:"ExitCode,omitempty" yaml:"ExitCode,omitempty"`
+ Error string `json:"Error,omitempty" yaml:"Error,omitempty"`
+ StartedAt time.Time `json:"StartedAt,omitempty" yaml:"StartedAt,omitempty"`
+ FinishedAt time.Time `json:"FinishedAt,omitempty" yaml:"FinishedAt,omitempty"`
+}
+
+// String returns a human-readable description of the state
+func (s *State) String() string {
+ if s.Running {
+ if s.Paused {
+ return fmt.Sprintf("Up %s (Paused)", units.HumanDuration(time.Now().UTC().Sub(s.StartedAt)))
+ }
+ if s.Restarting {
+ return fmt.Sprintf("Restarting (%d) %s ago", s.ExitCode, units.HumanDuration(time.Now().UTC().Sub(s.FinishedAt)))
+ }
+
+ return fmt.Sprintf("Up %s", units.HumanDuration(time.Now().UTC().Sub(s.StartedAt)))
+ }
+
+ if s.RemovalInProgress {
+ return "Removal In Progress"
+ }
+
+ if s.Dead {
+ return "Dead"
+ }
+
+ if s.StartedAt.IsZero() {
+ return "Created"
+ }
+
+ if s.FinishedAt.IsZero() {
+ return ""
+ }
+
+ return fmt.Sprintf("Exited (%d) %s ago", s.ExitCode, units.HumanDuration(time.Now().UTC().Sub(s.FinishedAt)))
+}
+
+// StateString returns a single string to describe state
+func (s *State) StateString() string {
+ if s.Running {
+ if s.Paused {
+ return "paused"
+ }
+ if s.Restarting {
+ return "restarting"
+ }
+ return "running"
+ }
+
+ if s.Dead {
+ return "dead"
+ }
+
+ if s.StartedAt.IsZero() {
+ return "created"
+ }
+
+ return "exited"
+}
+
+// PortBinding represents the host/container port mapping as returned in the
+// `docker inspect` json
+type PortBinding struct {
+ HostIP string `json:"HostIP,omitempty" yaml:"HostIP,omitempty"`
+ HostPort string `json:"HostPort,omitempty" yaml:"HostPort,omitempty"`
+}
+
+// PortMapping represents a deprecated field in the `docker inspect` output,
+// and its value as found in NetworkSettings should always be nil
+type PortMapping map[string]string
+
+// ContainerNetwork represents the networking settings of a container per network.
+type ContainerNetwork struct {
+ MacAddress string `json:"MacAddress,omitempty" yaml:"MacAddress,omitempty"`
+ GlobalIPv6PrefixLen int `json:"GlobalIPv6PrefixLen,omitempty" yaml:"GlobalIPv6PrefixLen,omitempty"`
+ GlobalIPv6Address string `json:"GlobalIPv6Address,omitempty" yaml:"GlobalIPv6Address,omitempty"`
+ IPv6Gateway string `json:"IPv6Gateway,omitempty" yaml:"IPv6Gateway,omitempty"`
+ IPPrefixLen int `json:"IPPrefixLen,omitempty" yaml:"IPPrefixLen,omitempty"`
+ IPAddress string `json:"IPAddress,omitempty" yaml:"IPAddress,omitempty"`
+ Gateway string `json:"Gateway,omitempty" yaml:"Gateway,omitempty"`
+ EndpointID string `json:"EndpointID,omitempty" yaml:"EndpointID,omitempty"`
+ NetworkID string `json:"NetworkID,omitempty" yaml:"NetworkID,omitempty"`
+}
+
+// NetworkSettings contains network-related information about a container
+type NetworkSettings struct {
+ Networks map[string]ContainerNetwork `json:"Networks,omitempty" yaml:"Networks,omitempty"`
+ IPAddress string `json:"IPAddress,omitempty" yaml:"IPAddress,omitempty"`
+ IPPrefixLen int `json:"IPPrefixLen,omitempty" yaml:"IPPrefixLen,omitempty"`
+ MacAddress string `json:"MacAddress,omitempty" yaml:"MacAddress,omitempty"`
+ Gateway string `json:"Gateway,omitempty" yaml:"Gateway,omitempty"`
+ Bridge string `json:"Bridge,omitempty" yaml:"Bridge,omitempty"`
+ PortMapping map[string]PortMapping `json:"PortMapping,omitempty" yaml:"PortMapping,omitempty"`
+ Ports map[Port][]PortBinding `json:"Ports,omitempty" yaml:"Ports,omitempty"`
+ NetworkID string `json:"NetworkID,omitempty" yaml:"NetworkID,omitempty"`
+ EndpointID string `json:"EndpointID,omitempty" yaml:"EndpointID,omitempty"`
+ SandboxKey string `json:"SandboxKey,omitempty" yaml:"SandboxKey,omitempty"`
+ GlobalIPv6Address string `json:"GlobalIPv6Address,omitempty" yaml:"GlobalIPv6Address,omitempty"`
+ GlobalIPv6PrefixLen int `json:"GlobalIPv6PrefixLen,omitempty" yaml:"GlobalIPv6PrefixLen,omitempty"`
+ IPv6Gateway string `json:"IPv6Gateway,omitempty" yaml:"IPv6Gateway,omitempty"`
+ LinkLocalIPv6Address string `json:"LinkLocalIPv6Address,omitempty" yaml:"LinkLocalIPv6Address,omitempty"`
+ LinkLocalIPv6PrefixLen int `json:"LinkLocalIPv6PrefixLen,omitempty" yaml:"LinkLocalIPv6PrefixLen,omitempty"`
+ SecondaryIPAddresses []string `json:"SecondaryIPAddresses,omitempty" yaml:"SecondaryIPAddresses,omitempty"`
+ SecondaryIPv6Addresses []string `json:"SecondaryIPv6Addresses,omitempty" yaml:"SecondaryIPv6Addresses,omitempty"`
+}
+
+// PortMappingAPI translates the port mappings as contained in NetworkSettings
+// into the format in which they would appear when returned by the API
+func (settings *NetworkSettings) PortMappingAPI() []APIPort {
+ var mapping []APIPort
+ for port, bindings := range settings.Ports {
+ p, _ := parsePort(port.Port())
+ if len(bindings) == 0 {
+ mapping = append(mapping, APIPort{
+ PrivatePort: int64(p),
+ Type: port.Proto(),
+ })
+ continue
+ }
+ for _, binding := range bindings {
+ p, _ := parsePort(port.Port())
+ h, _ := parsePort(binding.HostPort)
+ mapping = append(mapping, APIPort{
+ PrivatePort: int64(p),
+ PublicPort: int64(h),
+ Type: port.Proto(),
+ IP: binding.HostIP,
+ })
+ }
+ }
+ return mapping
+}
+
+func parsePort(rawPort string) (int, error) {
+ port, err := strconv.ParseUint(rawPort, 10, 16)
+ if err != nil {
+ return 0, err
+ }
+ return int(port), nil
+}
+
+// Config is the list of configuration options used when creating a container.
+// Config does not contain the options that are specific to starting a container on a
+// given host. Those are contained in HostConfig
+type Config struct {
+ Hostname string `json:"Hostname,omitempty" yaml:"Hostname,omitempty"`
+ Domainname string `json:"Domainname,omitempty" yaml:"Domainname,omitempty"`
+ User string `json:"User,omitempty" yaml:"User,omitempty"`
+ Memory int64 `json:"Memory,omitempty" yaml:"Memory,omitempty"`
+ MemorySwap int64 `json:"MemorySwap,omitempty" yaml:"MemorySwap,omitempty"`
+ MemoryReservation int64 `json:"MemoryReservation,omitempty" yaml:"MemoryReservation,omitempty"`
+ KernelMemory int64 `json:"KernelMemory,omitempty" yaml:"KernelMemory,omitempty"`
+ CPUShares int64 `json:"CpuShares,omitempty" yaml:"CpuShares,omitempty"`
+ CPUSet string `json:"Cpuset,omitempty" yaml:"Cpuset,omitempty"`
+ AttachStdin bool `json:"AttachStdin,omitempty" yaml:"AttachStdin,omitempty"`
+ AttachStdout bool `json:"AttachStdout,omitempty" yaml:"AttachStdout,omitempty"`
+ AttachStderr bool `json:"AttachStderr,omitempty" yaml:"AttachStderr,omitempty"`
+ PortSpecs []string `json:"PortSpecs,omitempty" yaml:"PortSpecs,omitempty"`
+ ExposedPorts map[Port]struct{} `json:"ExposedPorts,omitempty" yaml:"ExposedPorts,omitempty"`
+ PublishService string `json:"PublishService,omitempty" yaml:"PublishService,omitempty"`
+ ArgsEscaped bool `json:"ArgsEscaped,omitempty" yaml:"ArgsEscaped,omitempty"`
+ StopSignal string `json:"StopSignal,omitempty" yaml:"StopSignal,omitempty"`
+ Tty bool `json:"Tty,omitempty" yaml:"Tty,omitempty"`
+ OpenStdin bool `json:"OpenStdin,omitempty" yaml:"OpenStdin,omitempty"`
+ StdinOnce bool `json:"StdinOnce,omitempty" yaml:"StdinOnce,omitempty"`
+ Env []string `json:"Env,omitempty" yaml:"Env,omitempty"`
+ Cmd []string `json:"Cmd" yaml:"Cmd"`
+ DNS []string `json:"Dns,omitempty" yaml:"Dns,omitempty"` // For Docker API v1.9 and below only
+ Image string `json:"Image,omitempty" yaml:"Image,omitempty"`
+ Volumes map[string]struct{} `json:"Volumes,omitempty" yaml:"Volumes,omitempty"`
+ VolumeDriver string `json:"VolumeDriver,omitempty" yaml:"VolumeDriver,omitempty"`
+ VolumesFrom string `json:"VolumesFrom,omitempty" yaml:"VolumesFrom,omitempty"`
+ WorkingDir string `json:"WorkingDir,omitempty" yaml:"WorkingDir,omitempty"`
+ MacAddress string `json:"MacAddress,omitempty" yaml:"MacAddress,omitempty"`
+ Entrypoint []string `json:"Entrypoint" yaml:"Entrypoint"`
+ NetworkDisabled bool `json:"NetworkDisabled,omitempty" yaml:"NetworkDisabled,omitempty"`
+ SecurityOpts []string `json:"SecurityOpts,omitempty" yaml:"SecurityOpts,omitempty"`
+ OnBuild []string `json:"OnBuild,omitempty" yaml:"OnBuild,omitempty"`
+ Mounts []Mount `json:"Mounts,omitempty" yaml:"Mounts,omitempty"`
+ Labels map[string]string `json:"Labels,omitempty" yaml:"Labels,omitempty"`
+}
+
+// Mount represents a mount point in the container.
+//
+// It has been added in the version 1.20 of the Docker API, available since
+// Docker 1.8.
+type Mount struct {
+ Name string
+ Source string
+ Destination string
+ Driver string
+ Mode string
+ RW bool
+}
+
+// LogConfig defines the log driver type and the configuration for it.
+type LogConfig struct {
+ Type string `json:"Type,omitempty" yaml:"Type,omitempty"`
+ Config map[string]string `json:"Config,omitempty" yaml:"Config,omitempty"`
+}
+
+// ULimit defines system-wide resource limitations
+// This can help a lot in system administration, e.g. when a user starts too many processes and therefore makes the system unresponsive for other users.
+type ULimit struct {
+ Name string `json:"Name,omitempty" yaml:"Name,omitempty"`
+ Soft int64 `json:"Soft,omitempty" yaml:"Soft,omitempty"`
+ Hard int64 `json:"Hard,omitempty" yaml:"Hard,omitempty"`
+}
+
+// SwarmNode containers information about which Swarm node the container is on
+type SwarmNode struct {
+ ID string `json:"ID,omitempty" yaml:"ID,omitempty"`
+ IP string `json:"IP,omitempty" yaml:"IP,omitempty"`
+ Addr string `json:"Addr,omitempty" yaml:"Addr,omitempty"`
+ Name string `json:"Name,omitempty" yaml:"Name,omitempty"`
+ CPUs int64 `json:"CPUs,omitempty" yaml:"CPUs,omitempty"`
+ Memory int64 `json:"Memory,omitempty" yaml:"Memory,omitempty"`
+ Labels map[string]string `json:"Labels,omitempty" yaml:"Labels,omitempty"`
+}
+
+// GraphDriver contains information about the GraphDriver used by the container
+type GraphDriver struct {
+ Name string `json:"Name,omitempty" yaml:"Name,omitempty"`
+ Data map[string]string `json:"Data,omitempty" yaml:"Data,omitempty"`
+}
+
+// Container is the type encompasing everything about a container - its config,
+// hostconfig, etc.
+type Container struct {
+ ID string `json:"Id" yaml:"Id"`
+
+ Created time.Time `json:"Created,omitempty" yaml:"Created,omitempty"`
+
+ Path string `json:"Path,omitempty" yaml:"Path,omitempty"`
+ Args []string `json:"Args,omitempty" yaml:"Args,omitempty"`
+
+ Config *Config `json:"Config,omitempty" yaml:"Config,omitempty"`
+ State State `json:"State,omitempty" yaml:"State,omitempty"`
+ Image string `json:"Image,omitempty" yaml:"Image,omitempty"`
+
+ Node *SwarmNode `json:"Node,omitempty" yaml:"Node,omitempty"`
+
+ NetworkSettings *NetworkSettings `json:"NetworkSettings,omitempty" yaml:"NetworkSettings,omitempty"`
+
+ SysInitPath string `json:"SysInitPath,omitempty" yaml:"SysInitPath,omitempty"`
+ ResolvConfPath string `json:"ResolvConfPath,omitempty" yaml:"ResolvConfPath,omitempty"`
+ HostnamePath string `json:"HostnamePath,omitempty" yaml:"HostnamePath,omitempty"`
+ HostsPath string `json:"HostsPath,omitempty" yaml:"HostsPath,omitempty"`
+ LogPath string `json:"LogPath,omitempty" yaml:"LogPath,omitempty"`
+ Name string `json:"Name,omitempty" yaml:"Name,omitempty"`
+ Driver string `json:"Driver,omitempty" yaml:"Driver,omitempty"`
+ Mounts []Mount `json:"Mounts,omitempty" yaml:"Mounts,omitempty"`
+
+ Volumes map[string]string `json:"Volumes,omitempty" yaml:"Volumes,omitempty"`
+ VolumesRW map[string]bool `json:"VolumesRW,omitempty" yaml:"VolumesRW,omitempty"`
+ HostConfig *HostConfig `json:"HostConfig,omitempty" yaml:"HostConfig,omitempty"`
+ ExecIDs []string `json:"ExecIDs,omitempty" yaml:"ExecIDs,omitempty"`
+ GraphDriver *GraphDriver `json:"GraphDriver,omitempty" yaml:"GraphDriver,omitempty"`
+
+ RestartCount int `json:"RestartCount,omitempty" yaml:"RestartCount,omitempty"`
+
+ AppArmorProfile string `json:"AppArmorProfile,omitempty" yaml:"AppArmorProfile,omitempty"`
+}
+
+// UpdateContainerOptions specify parameters to the UpdateContainer function.
+//
+// See https://goo.gl/Y6fXUy for more details.
+type UpdateContainerOptions struct {
+ BlkioWeight int `json:"BlkioWeight"`
+ CPUShares int `json:"CpuShares"`
+ CPUPeriod int `json:"CpuPeriod"`
+ CPUQuota int `json:"CpuQuota"`
+ CpusetCpus string `json:"CpusetCpus"`
+ CpusetMems string `json:"CpusetMems"`
+ Memory int `json:"Memory"`
+ MemorySwap int `json:"MemorySwap"`
+ MemoryReservation int `json:"MemoryReservation"`
+ KernelMemory int `json:"KernelMemory"`
+ RestartPolicy RestartPolicy `json:"RestartPolicy,omitempty"`
+}
+
+// UpdateContainer updates the container at ID with the options
+//
+// See https://goo.gl/Y6fXUy for more details.
+func (c *Client) UpdateContainer(id string, opts UpdateContainerOptions) error {
+ resp, err := c.do("POST", fmt.Sprintf("/containers/"+id+"/update"), doOptions{data: opts, forceJSON: true})
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+ return nil
+}
+
+// RenameContainerOptions specify parameters to the RenameContainer function.
+//
+// See https://goo.gl/laSOIy for more details.
+type RenameContainerOptions struct {
+ // ID of container to rename
+ ID string `qs:"-"`
+
+ // New name
+ Name string `json:"name,omitempty" yaml:"name,omitempty"`
+}
+
+// RenameContainer updates and existing containers name
+//
+// See https://goo.gl/laSOIy for more details.
+func (c *Client) RenameContainer(opts RenameContainerOptions) error {
+ resp, err := c.do("POST", fmt.Sprintf("/containers/"+opts.ID+"/rename?%s", queryString(opts)), doOptions{})
+ if err != nil {
+ return err
+ }
+ resp.Body.Close()
+ return nil
+}
+
+// InspectContainer returns information about a container by its ID.
+//
+// See https://goo.gl/RdIq0b for more details.
+func (c *Client) InspectContainer(id string) (*Container, error) {
+ path := "/containers/" + id + "/json"
+ resp, err := c.do("GET", path, doOptions{})
+ if err != nil {
+ if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
+ return nil, &NoSuchContainer{ID: id}
+ }
+ return nil, err
+ }
+ defer resp.Body.Close()
+ var container Container
+ if err := json.NewDecoder(resp.Body).Decode(&container); err != nil {
+ return nil, err
+ }
+ return &container, nil
+}
+
+// ContainerChanges returns changes in the filesystem of the given container.
+//
+// See https://goo.gl/9GsTIF for more details.
+func (c *Client) ContainerChanges(id string) ([]Change, error) {
+ path := "/containers/" + id + "/changes"
+ resp, err := c.do("GET", path, doOptions{})
+ if err != nil {
+ if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
+ return nil, &NoSuchContainer{ID: id}
+ }
+ return nil, err
+ }
+ defer resp.Body.Close()
+ var changes []Change
+ if err := json.NewDecoder(resp.Body).Decode(&changes); err != nil {
+ return nil, err
+ }
+ return changes, nil
+}
+
+// CreateContainerOptions specify parameters to the CreateContainer function.
+//
+// See https://goo.gl/WxQzrr for more details.
+type CreateContainerOptions struct {
+ Name string
+ Config *Config `qs:"-"`
+ HostConfig *HostConfig `qs:"-"`
+ NetworkingConfig *NetworkingConfig `qs:"-"`
+}
+
+// CreateContainer creates a new container, returning the container instance,
+// or an error in case of failure.
+//
+// See https://goo.gl/WxQzrr for more details.
+func (c *Client) CreateContainer(opts CreateContainerOptions) (*Container, error) {
+ path := "/containers/create?" + queryString(opts)
+ resp, err := c.do(
+ "POST",
+ path,
+ doOptions{
+ data: struct {
+ *Config
+ HostConfig *HostConfig `json:"HostConfig,omitempty" yaml:"HostConfig,omitempty"`
+ NetworkingConfig *NetworkingConfig `json:"NetworkingConfig,omitempty" yaml:"NetworkingConfig,omitempty"`
+ }{
+ opts.Config,
+ opts.HostConfig,
+ opts.NetworkingConfig,
+ },
+ },
+ )
+
+ if e, ok := err.(*Error); ok {
+ if e.Status == http.StatusNotFound {
+ return nil, ErrNoSuchImage
+ }
+ if e.Status == http.StatusConflict {
+ return nil, ErrContainerAlreadyExists
+ }
+ }
+
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+ var container Container
+ if err := json.NewDecoder(resp.Body).Decode(&container); err != nil {
+ return nil, err
+ }
+
+ container.Name = opts.Name
+
+ return &container, nil
+}
+
+// KeyValuePair is a type for generic key/value pairs as used in the Lxc
+// configuration
+type KeyValuePair struct {
+ Key string `json:"Key,omitempty" yaml:"Key,omitempty"`
+ Value string `json:"Value,omitempty" yaml:"Value,omitempty"`
+}
+
+// RestartPolicy represents the policy for automatically restarting a container.
+//
+// Possible values are:
+//
+// - always: the docker daemon will always restart the container
+// - on-failure: the docker daemon will restart the container on failures, at
+// most MaximumRetryCount times
+// - unless-stopped: the docker daemon will always restart the container except
+// when user has manually stopped the container
+// - no: the docker daemon will not restart the container automatically
+type RestartPolicy struct {
+ Name string `json:"Name,omitempty" yaml:"Name,omitempty"`
+ MaximumRetryCount int `json:"MaximumRetryCount,omitempty" yaml:"MaximumRetryCount,omitempty"`
+}
+
+// AlwaysRestart returns a restart policy that tells the Docker daemon to
+// always restart the container.
+func AlwaysRestart() RestartPolicy {
+ return RestartPolicy{Name: "always"}
+}
+
+// RestartOnFailure returns a restart policy that tells the Docker daemon to
+// restart the container on failures, trying at most maxRetry times.
+func RestartOnFailure(maxRetry int) RestartPolicy {
+ return RestartPolicy{Name: "on-failure", MaximumRetryCount: maxRetry}
+}
+
+// RestartUnlessStopped returns a restart policy that tells the Docker daemon to
+// always restart the container except when user has manually stopped the container.
+func RestartUnlessStopped() RestartPolicy {
+ return RestartPolicy{Name: "unless-stopped"}
+}
+
+// NeverRestart returns a restart policy that tells the Docker daemon to never
+// restart the container on failures.
+func NeverRestart() RestartPolicy {
+ return RestartPolicy{Name: "no"}
+}
+
+// Device represents a device mapping between the Docker host and the
+// container.
+type Device struct {
+ PathOnHost string `json:"PathOnHost,omitempty" yaml:"PathOnHost,omitempty"`
+ PathInContainer string `json:"PathInContainer,omitempty" yaml:"PathInContainer,omitempty"`
+ CgroupPermissions string `json:"CgroupPermissions,omitempty" yaml:"CgroupPermissions,omitempty"`
+}
+
+// BlockWeight represents a relative device weight for an individual device inside
+// of a container
+//
+// See https://goo.gl/FSdP0H for more details.
+type BlockWeight struct {
+ Path string `json:"Path,omitempty"`
+ Weight string `json:"Weight,omitempty"`
+}
+
+// BlockLimit represents a read/write limit in IOPS or Bandwidth for a device
+// inside of a container
+//
+// See https://goo.gl/FSdP0H for more details.
+type BlockLimit struct {
+ Path string `json:"Path,omitempty"`
+ Rate string `json:"Rate,omitempty"`
+}
+
+// HostConfig contains the container options related to starting a container on
+// a given host
+type HostConfig struct {
+ Binds []string `json:"Binds,omitempty" yaml:"Binds,omitempty"`
+ CapAdd []string `json:"CapAdd,omitempty" yaml:"CapAdd,omitempty"`
+ CapDrop []string `json:"CapDrop,omitempty" yaml:"CapDrop,omitempty"`
+ GroupAdd []string `json:"GroupAdd,omitempty" yaml:"GroupAdd,omitempty"`
+ ContainerIDFile string `json:"ContainerIDFile,omitempty" yaml:"ContainerIDFile,omitempty"`
+ LxcConf []KeyValuePair `json:"LxcConf,omitempty" yaml:"LxcConf,omitempty"`
+ Privileged bool `json:"Privileged,omitempty" yaml:"Privileged,omitempty"`
+ PortBindings map[Port][]PortBinding `json:"PortBindings,omitempty" yaml:"PortBindings,omitempty"`
+ Links []string `json:"Links,omitempty" yaml:"Links,omitempty"`
+ PublishAllPorts bool `json:"PublishAllPorts,omitempty" yaml:"PublishAllPorts,omitempty"`
+ DNS []string `json:"Dns,omitempty" yaml:"Dns,omitempty"` // For Docker API v1.10 and above only
+ DNSOptions []string `json:"DnsOptions,omitempty" yaml:"DnsOptions,omitempty"`
+ DNSSearch []string `json:"DnsSearch,omitempty" yaml:"DnsSearch,omitempty"`
+ ExtraHosts []string `json:"ExtraHosts,omitempty" yaml:"ExtraHosts,omitempty"`
+ VolumesFrom []string `json:"VolumesFrom,omitempty" yaml:"VolumesFrom,omitempty"`
+ UsernsMode string `json:"UsernsMode,omitempty" yaml:"UsernsMode,omitempty"`
+ NetworkMode string `json:"NetworkMode,omitempty" yaml:"NetworkMode,omitempty"`
+ IpcMode string `json:"IpcMode,omitempty" yaml:"IpcMode,omitempty"`
+ PidMode string `json:"PidMode,omitempty" yaml:"PidMode,omitempty"`
+ UTSMode string `json:"UTSMode,omitempty" yaml:"UTSMode,omitempty"`
+ RestartPolicy RestartPolicy `json:"RestartPolicy,omitempty" yaml:"RestartPolicy,omitempty"`
+ Devices []Device `json:"Devices,omitempty" yaml:"Devices,omitempty"`
+ LogConfig LogConfig `json:"LogConfig,omitempty" yaml:"LogConfig,omitempty"`
+ ReadonlyRootfs bool `json:"ReadonlyRootfs,omitempty" yaml:"ReadonlyRootfs,omitempty"`
+ SecurityOpt []string `json:"SecurityOpt,omitempty" yaml:"SecurityOpt,omitempty"`
+ Cgroup string `json:"Cgroup,omitempty" yaml:"Cgroup,omitempty"`
+ CgroupParent string `json:"CgroupParent,omitempty" yaml:"CgroupParent,omitempty"`
+ Memory int64 `json:"Memory,omitempty" yaml:"Memory,omitempty"`
+ MemoryReservation int64 `json:"MemoryReservation,omitempty" yaml:"MemoryReservation,omitempty"`
+ KernelMemory int64 `json:"KernelMemory,omitempty" yaml:"KernelMemory,omitempty"`
+ MemorySwap int64 `json:"MemorySwap,omitempty" yaml:"MemorySwap,omitempty"`
+ MemorySwappiness int64 `json:"MemorySwappiness,omitempty" yaml:"MemorySwappiness,omitempty"`
+ OOMKillDisable bool `json:"OomKillDisable,omitempty" yaml:"OomKillDisable,omitempty"`
+ CPUShares int64 `json:"CpuShares,omitempty" yaml:"CpuShares,omitempty"`
+ CPUSet string `json:"Cpuset,omitempty" yaml:"Cpuset,omitempty"`
+ CPUSetCPUs string `json:"CpusetCpus,omitempty" yaml:"CpusetCpus,omitempty"`
+ CPUSetMEMs string `json:"CpusetMems,omitempty" yaml:"CpusetMems,omitempty"`
+ CPUQuota int64 `json:"CpuQuota,omitempty" yaml:"CpuQuota,omitempty"`
+ CPUPeriod int64 `json:"CpuPeriod,omitempty" yaml:"CpuPeriod,omitempty"`
+ BlkioWeight int64 `json:"BlkioWeight,omitempty" yaml:"BlkioWeight,omitempty"`
+ BlkioWeightDevice []BlockWeight `json:"BlkioWeightDevice,omitempty" yaml:"BlkioWeightDevice,omitempty"`
+ BlkioDeviceReadBps []BlockLimit `json:"BlkioDeviceReadBps,omitempty" yaml:"BlkioDeviceReadBps,omitempty"`
+ BlkioDeviceReadIOps []BlockLimit `json:"BlkioDeviceReadIOps,omitempty" yaml:"BlkioDeviceReadIOps,omitempty"`
+ BlkioDeviceWriteBps []BlockLimit `json:"BlkioDeviceWriteBps,omitempty" yaml:"BlkioDeviceWriteBps,omitempty"`
+ BlkioDeviceWriteIOps []BlockLimit `json:"BlkioDeviceWriteIOps,omitempty" yaml:"BlkioDeviceWriteIOps,omitempty"`
+ Ulimits []ULimit `json:"Ulimits,omitempty" yaml:"Ulimits,omitempty"`
+ VolumeDriver string `json:"VolumeDriver,omitempty" yaml:"VolumeDriver,omitempty"`
+ OomScoreAdj int `json:"OomScoreAdj,omitempty" yaml:"OomScoreAdj,omitempty"`
+ PidsLimit int64 `json:"PidsLimit,omitempty" yaml:"PidsLimit,omitempty"`
+ ShmSize int64 `json:"ShmSize,omitempty" yaml:"ShmSize,omitempty"`
+ Tmpfs map[string]string `json:"Tmpfs,omitempty" yaml:"Tmpfs,omitempty"`
+ AutoRemove bool `json:"AutoRemove,omitempty" yaml:"AutoRemove,omitempty"`
+ StorageOpt map[string]string `json:"StorageOpt,omitempty" yaml:"StorageOpt,omitempty"`
+ Sysctls map[string]string `json:"Sysctls,omitempty" yaml:"Sysctls,omitempty"`
+}
+
+// NetworkingConfig represents the container's networking configuration for each of its interfaces
+// Carries the networking configs specified in the `docker run` and `docker network connect` commands
+type NetworkingConfig struct {
+ EndpointsConfig map[string]*EndpointConfig // Endpoint configs for each connecting network
+}
+
+// StartContainer starts a container, returning an error in case of failure.
+//
+// Passing the HostConfig to this method has been deprecated in Docker API 1.22
+// (Docker Engine 1.10.x) and totally removed in Docker API 1.24 (Docker Engine
+// 1.12.x). The client will ignore the parameter when communicating with Docker
+// API 1.24 or greater.
+//
+// See https://goo.gl/MrBAJv for more details.
+func (c *Client) StartContainer(id string, hostConfig *HostConfig) error {
+ var opts doOptions
+ path := "/containers/" + id + "/start"
+ if c.serverAPIVersion == nil {
+ c.checkAPIVersion()
+ }
+ if c.serverAPIVersion != nil && c.serverAPIVersion.LessThan(apiVersion124) {
+ opts = doOptions{data: hostConfig, forceJSON: true}
+ }
+ resp, err := c.do("POST", path, opts)
+ if err != nil {
+ if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
+ return &NoSuchContainer{ID: id, Err: err}
+ }
+ return err
+ }
+ if resp.StatusCode == http.StatusNotModified {
+ return &ContainerAlreadyRunning{ID: id}
+ }
+ resp.Body.Close()
+ return nil
+}
+
+// StopContainer stops a container, killing it after the given timeout (in
+// seconds).
+//
+// See https://goo.gl/USqsFt for more details.
+func (c *Client) StopContainer(id string, timeout uint) error {
+ path := fmt.Sprintf("/containers/%s/stop?t=%d", id, timeout)
+ resp, err := c.do("POST", path, doOptions{})
+ if err != nil {
+ if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
+ return &NoSuchContainer{ID: id}
+ }
+ return err
+ }
+ if resp.StatusCode == http.StatusNotModified {
+ return &ContainerNotRunning{ID: id}
+ }
+ resp.Body.Close()
+ return nil
+}
+
+// RestartContainer stops a container, killing it after the given timeout (in
+// seconds), during the stop process.
+//
+// See https://goo.gl/QzsDnz for more details.
+func (c *Client) RestartContainer(id string, timeout uint) error {
+ path := fmt.Sprintf("/containers/%s/restart?t=%d", id, timeout)
+ resp, err := c.do("POST", path, doOptions{})
+ if err != nil {
+ if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
+ return &NoSuchContainer{ID: id}
+ }
+ return err
+ }
+ resp.Body.Close()
+ return nil
+}
+
+// PauseContainer pauses the given container.
+//
+// See https://goo.gl/OF7W9X for more details.
+func (c *Client) PauseContainer(id string) error {
+ path := fmt.Sprintf("/containers/%s/pause", id)
+ resp, err := c.do("POST", path, doOptions{})
+ if err != nil {
+ if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
+ return &NoSuchContainer{ID: id}
+ }
+ return err
+ }
+ resp.Body.Close()
+ return nil
+}
+
+// UnpauseContainer unpauses the given container.
+//
+// See https://goo.gl/7dwyPA for more details.
+func (c *Client) UnpauseContainer(id string) error {
+ path := fmt.Sprintf("/containers/%s/unpause", id)
+ resp, err := c.do("POST", path, doOptions{})
+ if err != nil {
+ if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
+ return &NoSuchContainer{ID: id}
+ }
+ return err
+ }
+ resp.Body.Close()
+ return nil
+}
+
+// TopResult represents the list of processes running in a container, as
+// returned by /containers//top.
+//
+// See https://goo.gl/Rb46aY for more details.
+type TopResult struct {
+ Titles []string
+ Processes [][]string
+}
+
+// TopContainer returns processes running inside a container
+//
+// See https://goo.gl/Rb46aY for more details.
+func (c *Client) TopContainer(id string, psArgs string) (TopResult, error) {
+ var args string
+ var result TopResult
+ if psArgs != "" {
+ args = fmt.Sprintf("?ps_args=%s", psArgs)
+ }
+ path := fmt.Sprintf("/containers/%s/top%s", id, args)
+ resp, err := c.do("GET", path, doOptions{})
+ if err != nil {
+ if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
+ return result, &NoSuchContainer{ID: id}
+ }
+ return result, err
+ }
+ defer resp.Body.Close()
+ if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
+ return result, err
+ }
+ return result, nil
+}
+
+// Stats represents container statistics, returned by /containers//stats.
+//
+// See https://goo.gl/GNmLHb for more details.
+type Stats struct {
+ Read time.Time `json:"read,omitempty" yaml:"read,omitempty"`
+ PidsStats struct {
+ Current uint64 `json:"current,omitempty" yaml:"current,omitempty"`
+ } `json:"pids_stats,omitempty" yaml:"pids_stats,omitempty"`
+ Network NetworkStats `json:"network,omitempty" yaml:"network,omitempty"`
+ Networks map[string]NetworkStats `json:"networks,omitempty" yaml:"networks,omitempty"`
+ MemoryStats struct {
+ Stats struct {
+ TotalPgmafault uint64 `json:"total_pgmafault,omitempty" yaml:"total_pgmafault,omitempty"`
+ Cache uint64 `json:"cache,omitempty" yaml:"cache,omitempty"`
+ MappedFile uint64 `json:"mapped_file,omitempty" yaml:"mapped_file,omitempty"`
+ TotalInactiveFile uint64 `json:"total_inactive_file,omitempty" yaml:"total_inactive_file,omitempty"`
+ Pgpgout uint64 `json:"pgpgout,omitempty" yaml:"pgpgout,omitempty"`
+ Rss uint64 `json:"rss,omitempty" yaml:"rss,omitempty"`
+ TotalMappedFile uint64 `json:"total_mapped_file,omitempty" yaml:"total_mapped_file,omitempty"`
+ Writeback uint64 `json:"writeback,omitempty" yaml:"writeback,omitempty"`
+ Unevictable uint64 `json:"unevictable,omitempty" yaml:"unevictable,omitempty"`
+ Pgpgin uint64 `json:"pgpgin,omitempty" yaml:"pgpgin,omitempty"`
+ TotalUnevictable uint64 `json:"total_unevictable,omitempty" yaml:"total_unevictable,omitempty"`
+ Pgmajfault uint64 `json:"pgmajfault,omitempty" yaml:"pgmajfault,omitempty"`
+ TotalRss uint64 `json:"total_rss,omitempty" yaml:"total_rss,omitempty"`
+ TotalRssHuge uint64 `json:"total_rss_huge,omitempty" yaml:"total_rss_huge,omitempty"`
+ TotalWriteback uint64 `json:"total_writeback,omitempty" yaml:"total_writeback,omitempty"`
+ TotalInactiveAnon uint64 `json:"total_inactive_anon,omitempty" yaml:"total_inactive_anon,omitempty"`
+ RssHuge uint64 `json:"rss_huge,omitempty" yaml:"rss_huge,omitempty"`
+ HierarchicalMemoryLimit uint64 `json:"hierarchical_memory_limit,omitempty" yaml:"hierarchical_memory_limit,omitempty"`
+ TotalPgfault uint64 `json:"total_pgfault,omitempty" yaml:"total_pgfault,omitempty"`
+ TotalActiveFile uint64 `json:"total_active_file,omitempty" yaml:"total_active_file,omitempty"`
+ ActiveAnon uint64 `json:"active_anon,omitempty" yaml:"active_anon,omitempty"`
+ TotalActiveAnon uint64 `json:"total_active_anon,omitempty" yaml:"total_active_anon,omitempty"`
+ TotalPgpgout uint64 `json:"total_pgpgout,omitempty" yaml:"total_pgpgout,omitempty"`
+ TotalCache uint64 `json:"total_cache,omitempty" yaml:"total_cache,omitempty"`
+ InactiveAnon uint64 `json:"inactive_anon,omitempty" yaml:"inactive_anon,omitempty"`
+ ActiveFile uint64 `json:"active_file,omitempty" yaml:"active_file,omitempty"`
+ Pgfault uint64 `json:"pgfault,omitempty" yaml:"pgfault,omitempty"`
+ InactiveFile uint64 `json:"inactive_file,omitempty" yaml:"inactive_file,omitempty"`
+ TotalPgpgin uint64 `json:"total_pgpgin,omitempty" yaml:"total_pgpgin,omitempty"`
+ HierarchicalMemswLimit uint64 `json:"hierarchical_memsw_limit,omitempty" yaml:"hierarchical_memsw_limit,omitempty"`
+ Swap uint64 `json:"swap,omitempty" yaml:"swap,omitempty"`
+ } `json:"stats,omitempty" yaml:"stats,omitempty"`
+ MaxUsage uint64 `json:"max_usage,omitempty" yaml:"max_usage,omitempty"`
+ Usage uint64 `json:"usage,omitempty" yaml:"usage,omitempty"`
+ Failcnt uint64 `json:"failcnt,omitempty" yaml:"failcnt,omitempty"`
+ Limit uint64 `json:"limit,omitempty" yaml:"limit,omitempty"`
+ } `json:"memory_stats,omitempty" yaml:"memory_stats,omitempty"`
+ BlkioStats struct {
+ IOServiceBytesRecursive []BlkioStatsEntry `json:"io_service_bytes_recursive,omitempty" yaml:"io_service_bytes_recursive,omitempty"`
+ IOServicedRecursive []BlkioStatsEntry `json:"io_serviced_recursive,omitempty" yaml:"io_serviced_recursive,omitempty"`
+ IOQueueRecursive []BlkioStatsEntry `json:"io_queue_recursive,omitempty" yaml:"io_queue_recursive,omitempty"`
+ IOServiceTimeRecursive []BlkioStatsEntry `json:"io_service_time_recursive,omitempty" yaml:"io_service_time_recursive,omitempty"`
+ IOWaitTimeRecursive []BlkioStatsEntry `json:"io_wait_time_recursive,omitempty" yaml:"io_wait_time_recursive,omitempty"`
+ IOMergedRecursive []BlkioStatsEntry `json:"io_merged_recursive,omitempty" yaml:"io_merged_recursive,omitempty"`
+ IOTimeRecursive []BlkioStatsEntry `json:"io_time_recursive,omitempty" yaml:"io_time_recursive,omitempty"`
+ SectorsRecursive []BlkioStatsEntry `json:"sectors_recursive,omitempty" yaml:"sectors_recursive,omitempty"`
+ } `json:"blkio_stats,omitempty" yaml:"blkio_stats,omitempty"`
+ CPUStats CPUStats `json:"cpu_stats,omitempty" yaml:"cpu_stats,omitempty"`
+ PreCPUStats CPUStats `json:"precpu_stats,omitempty"`
+}
+
+// NetworkStats is a stats entry for network stats
+type NetworkStats struct {
+ RxDropped uint64 `json:"rx_dropped,omitempty" yaml:"rx_dropped,omitempty"`
+ RxBytes uint64 `json:"rx_bytes,omitempty" yaml:"rx_bytes,omitempty"`
+ RxErrors uint64 `json:"rx_errors,omitempty" yaml:"rx_errors,omitempty"`
+ TxPackets uint64 `json:"tx_packets,omitempty" yaml:"tx_packets,omitempty"`
+ TxDropped uint64 `json:"tx_dropped,omitempty" yaml:"tx_dropped,omitempty"`
+ RxPackets uint64 `json:"rx_packets,omitempty" yaml:"rx_packets,omitempty"`
+ TxErrors uint64 `json:"tx_errors,omitempty" yaml:"tx_errors,omitempty"`
+ TxBytes uint64 `json:"tx_bytes,omitempty" yaml:"tx_bytes,omitempty"`
+}
+
+// CPUStats is a stats entry for cpu stats
+type CPUStats struct {
+ CPUUsage struct {
+ PercpuUsage []uint64 `json:"percpu_usage,omitempty" yaml:"percpu_usage,omitempty"`
+ UsageInUsermode uint64 `json:"usage_in_usermode,omitempty" yaml:"usage_in_usermode,omitempty"`
+ TotalUsage uint64 `json:"total_usage,omitempty" yaml:"total_usage,omitempty"`
+ UsageInKernelmode uint64 `json:"usage_in_kernelmode,omitempty" yaml:"usage_in_kernelmode,omitempty"`
+ } `json:"cpu_usage,omitempty" yaml:"cpu_usage,omitempty"`
+ SystemCPUUsage uint64 `json:"system_cpu_usage,omitempty" yaml:"system_cpu_usage,omitempty"`
+ ThrottlingData struct {
+ Periods uint64 `json:"periods,omitempty"`
+ ThrottledPeriods uint64 `json:"throttled_periods,omitempty"`
+ ThrottledTime uint64 `json:"throttled_time,omitempty"`
+ } `json:"throttling_data,omitempty" yaml:"throttling_data,omitempty"`
+}
+
+// BlkioStatsEntry is a stats entry for blkio_stats
+type BlkioStatsEntry struct {
+ Major uint64 `json:"major,omitempty" yaml:"major,omitempty"`
+ Minor uint64 `json:"minor,omitempty" yaml:"minor,omitempty"`
+ Op string `json:"op,omitempty" yaml:"op,omitempty"`
+ Value uint64 `json:"value,omitempty" yaml:"value,omitempty"`
+}
+
+// StatsOptions specify parameters to the Stats function.
+//
+// See https://goo.gl/GNmLHb for more details.
+type StatsOptions struct {
+ ID string
+ Stats chan<- *Stats
+ Stream bool
+ // A flag that enables stopping the stats operation
+ Done <-chan bool
+ // Initial connection timeout
+ Timeout time.Duration
+ // Timeout with no data is received, it's reset every time new data
+ // arrives
+ InactivityTimeout time.Duration `qs:"-"`
+}
+
+// Stats sends container statistics for the given container to the given channel.
+//
+// This function is blocking, similar to a streaming call for logs, and should be run
+// on a separate goroutine from the caller. Note that this function will block until
+// the given container is removed, not just exited. When finished, this function
+// will close the given channel. Alternatively, function can be stopped by
+// signaling on the Done channel.
+//
+// See https://goo.gl/GNmLHb for more details.
+func (c *Client) Stats(opts StatsOptions) (retErr error) {
+ errC := make(chan error, 1)
+ readCloser, writeCloser := io.Pipe()
+
+ defer func() {
+ close(opts.Stats)
+
+ select {
+ case err := <-errC:
+ if err != nil && retErr == nil {
+ retErr = err
+ }
+ default:
+ // No errors
+ }
+
+ if err := readCloser.Close(); err != nil && retErr == nil {
+ retErr = err
+ }
+ }()
+
+ go func() {
+ err := c.stream("GET", fmt.Sprintf("/containers/%s/stats?stream=%v", opts.ID, opts.Stream), streamOptions{
+ rawJSONStream: true,
+ useJSONDecoder: true,
+ stdout: writeCloser,
+ timeout: opts.Timeout,
+ inactivityTimeout: opts.InactivityTimeout,
+ })
+ if err != nil {
+ dockerError, ok := err.(*Error)
+ if ok {
+ if dockerError.Status == http.StatusNotFound {
+ err = &NoSuchContainer{ID: opts.ID}
+ }
+ }
+ }
+ if closeErr := writeCloser.Close(); closeErr != nil && err == nil {
+ err = closeErr
+ }
+ errC <- err
+ close(errC)
+ }()
+
+ quit := make(chan struct{})
+ defer close(quit)
+ go func() {
+ // block here waiting for the signal to stop function
+ select {
+ case <-opts.Done:
+ readCloser.Close()
+ case <-quit:
+ return
+ }
+ }()
+
+ decoder := json.NewDecoder(readCloser)
+ stats := new(Stats)
+ for err := decoder.Decode(stats); err != io.EOF; err = decoder.Decode(stats) {
+ if err != nil {
+ return err
+ }
+ opts.Stats <- stats
+ stats = new(Stats)
+ }
+ return nil
+}
+
+// KillContainerOptions represents the set of options that can be used in a
+// call to KillContainer.
+//
+// See https://goo.gl/hkS9i8 for more details.
+type KillContainerOptions struct {
+ // The ID of the container.
+ ID string `qs:"-"`
+
+ // The signal to send to the container. When omitted, Docker server
+ // will assume SIGKILL.
+ Signal Signal
+}
+
+// KillContainer sends a signal to a container, returning an error in case of
+// failure.
+//
+// See https://goo.gl/hkS9i8 for more details.
+func (c *Client) KillContainer(opts KillContainerOptions) error {
+ path := "/containers/" + opts.ID + "/kill" + "?" + queryString(opts)
+ resp, err := c.do("POST", path, doOptions{})
+ if err != nil {
+ if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
+ return &NoSuchContainer{ID: opts.ID}
+ }
+ return err
+ }
+ resp.Body.Close()
+ return nil
+}
+
+// RemoveContainerOptions encapsulates options to remove a container.
+//
+// See https://goo.gl/RQyX62 for more details.
+type RemoveContainerOptions struct {
+ // The ID of the container.
+ ID string `qs:"-"`
+
+ // A flag that indicates whether Docker should remove the volumes
+ // associated to the container.
+ RemoveVolumes bool `qs:"v"`
+
+ // A flag that indicates whether Docker should remove the container
+ // even if it is currently running.
+ Force bool
+}
+
+// RemoveContainer removes a container, returning an error in case of failure.
+//
+// See https://goo.gl/RQyX62 for more details.
+func (c *Client) RemoveContainer(opts RemoveContainerOptions) error {
+ path := "/containers/" + opts.ID + "?" + queryString(opts)
+ resp, err := c.do("DELETE", path, doOptions{})
+ if err != nil {
+ if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
+ return &NoSuchContainer{ID: opts.ID}
+ }
+ return err
+ }
+ resp.Body.Close()
+ return nil
+}
+
+// UploadToContainerOptions is the set of options that can be used when
+// uploading an archive into a container.
+//
+// See https://goo.gl/Ss97HW for more details.
+type UploadToContainerOptions struct {
+ InputStream io.Reader `json:"-" qs:"-"`
+ Path string `qs:"path"`
+ NoOverwriteDirNonDir bool `qs:"noOverwriteDirNonDir"`
+}
+
+// UploadToContainer uploads a tar archive to be extracted to a path in the
+// filesystem of the container.
+//
+// See https://goo.gl/Ss97HW for more details.
+func (c *Client) UploadToContainer(id string, opts UploadToContainerOptions) error {
+ url := fmt.Sprintf("/containers/%s/archive?", id) + queryString(opts)
+
+ return c.stream("PUT", url, streamOptions{
+ in: opts.InputStream,
+ })
+}
+
+// DownloadFromContainerOptions is the set of options that can be used when
+// downloading resources from a container.
+//
+// See https://goo.gl/KnZJDX for more details.
+type DownloadFromContainerOptions struct {
+ OutputStream io.Writer `json:"-" qs:"-"`
+ Path string `qs:"path"`
+ InactivityTimeout time.Duration `qs:"-"`
+}
+
+// DownloadFromContainer downloads a tar archive of files or folders in a container.
+//
+// See https://goo.gl/KnZJDX for more details.
+func (c *Client) DownloadFromContainer(id string, opts DownloadFromContainerOptions) error {
+ url := fmt.Sprintf("/containers/%s/archive?", id) + queryString(opts)
+
+ return c.stream("GET", url, streamOptions{
+ setRawTerminal: true,
+ stdout: opts.OutputStream,
+ inactivityTimeout: opts.InactivityTimeout,
+ })
+}
+
+// CopyFromContainerOptions has been DEPRECATED, please use DownloadFromContainerOptions along with DownloadFromContainer.
+//
+// See https://goo.gl/R2jevW for more details.
+type CopyFromContainerOptions struct {
+ OutputStream io.Writer `json:"-"`
+ Container string `json:"-"`
+ Resource string
+}
+
+// CopyFromContainer has been DEPRECATED, please use DownloadFromContainerOptions along with DownloadFromContainer.
+//
+// See https://goo.gl/R2jevW for more details.
+func (c *Client) CopyFromContainer(opts CopyFromContainerOptions) error {
+ if opts.Container == "" {
+ return &NoSuchContainer{ID: opts.Container}
+ }
+ url := fmt.Sprintf("/containers/%s/copy", opts.Container)
+ resp, err := c.do("POST", url, doOptions{data: opts})
+ if err != nil {
+ if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
+ return &NoSuchContainer{ID: opts.Container}
+ }
+ return err
+ }
+ defer resp.Body.Close()
+ _, err = io.Copy(opts.OutputStream, resp.Body)
+ return err
+}
+
+// WaitContainer blocks until the given container stops, return the exit code
+// of the container status.
+//
+// See https://goo.gl/Gc1rge for more details.
+func (c *Client) WaitContainer(id string) (int, error) {
+ resp, err := c.do("POST", "/containers/"+id+"/wait", doOptions{})
+ if err != nil {
+ if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
+ return 0, &NoSuchContainer{ID: id}
+ }
+ return 0, err
+ }
+ defer resp.Body.Close()
+ var r struct{ StatusCode int }
+ if err := json.NewDecoder(resp.Body).Decode(&r); err != nil {
+ return 0, err
+ }
+ return r.StatusCode, nil
+}
+
+// CommitContainerOptions aggregates parameters to the CommitContainer method.
+//
+// See https://goo.gl/mqfoCw for more details.
+type CommitContainerOptions struct {
+ Container string
+ Repository string `qs:"repo"`
+ Tag string
+ Message string `qs:"comment"`
+ Author string
+ Run *Config `qs:"-"`
+}
+
+// CommitContainer creates a new image from a container's changes.
+//
+// See https://goo.gl/mqfoCw for more details.
+func (c *Client) CommitContainer(opts CommitContainerOptions) (*Image, error) {
+ path := "/commit?" + queryString(opts)
+ resp, err := c.do("POST", path, doOptions{data: opts.Run})
+ if err != nil {
+ if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
+ return nil, &NoSuchContainer{ID: opts.Container}
+ }
+ return nil, err
+ }
+ defer resp.Body.Close()
+ var image Image
+ if err := json.NewDecoder(resp.Body).Decode(&image); err != nil {
+ return nil, err
+ }
+ return &image, nil
+}
+
+// AttachToContainerOptions is the set of options that can be used when
+// attaching to a container.
+//
+// See https://goo.gl/NKpkFk for more details.
+type AttachToContainerOptions struct {
+ Container string `qs:"-"`
+ InputStream io.Reader `qs:"-"`
+ OutputStream io.Writer `qs:"-"`
+ ErrorStream io.Writer `qs:"-"`
+
+ // Get container logs, sending it to OutputStream.
+ Logs bool
+
+ // Stream the response?
+ Stream bool
+
+ // Attach to stdin, and use InputStream.
+ Stdin bool
+
+ // Attach to stdout, and use OutputStream.
+ Stdout bool
+
+ // Attach to stderr, and use ErrorStream.
+ Stderr bool
+
+ // If set, after a successful connect, a sentinel will be sent and then the
+ // client will block on receive before continuing.
+ //
+ // It must be an unbuffered channel. Using a buffered channel can lead
+ // to unexpected behavior.
+ Success chan struct{}
+
+ // Use raw terminal? Usually true when the container contains a TTY.
+ RawTerminal bool `qs:"-"`
+}
+
+// AttachToContainer attaches to a container, using the given options.
+//
+// See https://goo.gl/NKpkFk for more details.
+func (c *Client) AttachToContainer(opts AttachToContainerOptions) error {
+ cw, err := c.AttachToContainerNonBlocking(opts)
+ if err != nil {
+ return err
+ }
+ return cw.Wait()
+}
+
+// AttachToContainerNonBlocking attaches to a container, using the given options.
+// This function does not block.
+//
+// See https://goo.gl/NKpkFk for more details.
+func (c *Client) AttachToContainerNonBlocking(opts AttachToContainerOptions) (CloseWaiter, error) {
+ if opts.Container == "" {
+ return nil, &NoSuchContainer{ID: opts.Container}
+ }
+ path := "/containers/" + opts.Container + "/attach?" + queryString(opts)
+ return c.hijack("POST", path, hijackOptions{
+ success: opts.Success,
+ setRawTerminal: opts.RawTerminal,
+ in: opts.InputStream,
+ stdout: opts.OutputStream,
+ stderr: opts.ErrorStream,
+ })
+}
+
+// LogsOptions represents the set of options used when getting logs from a
+// container.
+//
+// See https://goo.gl/yl8PGm for more details.
+type LogsOptions struct {
+ Container string `qs:"-"`
+ OutputStream io.Writer `qs:"-"`
+ ErrorStream io.Writer `qs:"-"`
+ InactivityTimeout time.Duration `qs:"-"`
+ Follow bool
+ Stdout bool
+ Stderr bool
+ Since int64
+ Timestamps bool
+ Tail string
+
+ // Use raw terminal? Usually true when the container contains a TTY.
+ RawTerminal bool `qs:"-"`
+}
+
+// Logs gets stdout and stderr logs from the specified container.
+//
+// See https://goo.gl/yl8PGm for more details.
+func (c *Client) Logs(opts LogsOptions) error {
+ if opts.Container == "" {
+ return &NoSuchContainer{ID: opts.Container}
+ }
+ if opts.Tail == "" {
+ opts.Tail = "all"
+ }
+ path := "/containers/" + opts.Container + "/logs?" + queryString(opts)
+ return c.stream("GET", path, streamOptions{
+ setRawTerminal: opts.RawTerminal,
+ stdout: opts.OutputStream,
+ stderr: opts.ErrorStream,
+ inactivityTimeout: opts.InactivityTimeout,
+ })
+}
+
+// ResizeContainerTTY resizes the terminal to the given height and width.
+//
+// See https://goo.gl/xERhCc for more details.
+func (c *Client) ResizeContainerTTY(id string, height, width int) error {
+ params := make(url.Values)
+ params.Set("h", strconv.Itoa(height))
+ params.Set("w", strconv.Itoa(width))
+ resp, err := c.do("POST", "/containers/"+id+"/resize?"+params.Encode(), doOptions{})
+ if err != nil {
+ return err
+ }
+ resp.Body.Close()
+ return nil
+}
+
+// ExportContainerOptions is the set of parameters to the ExportContainer
+// method.
+//
+// See https://goo.gl/dOkTyk for more details.
+type ExportContainerOptions struct {
+ ID string
+ OutputStream io.Writer
+ InactivityTimeout time.Duration `qs:"-"`
+}
+
+// ExportContainer export the contents of container id as tar archive
+// and prints the exported contents to stdout.
+//
+// See https://goo.gl/dOkTyk for more details.
+func (c *Client) ExportContainer(opts ExportContainerOptions) error {
+ if opts.ID == "" {
+ return &NoSuchContainer{ID: opts.ID}
+ }
+ url := fmt.Sprintf("/containers/%s/export", opts.ID)
+ return c.stream("GET", url, streamOptions{
+ setRawTerminal: true,
+ stdout: opts.OutputStream,
+ inactivityTimeout: opts.InactivityTimeout,
+ })
+}
+
+// NoSuchContainer is the error returned when a given container does not exist.
+type NoSuchContainer struct {
+ ID string
+ Err error
+}
+
+func (err *NoSuchContainer) Error() string {
+ if err.Err != nil {
+ return err.Err.Error()
+ }
+ return "No such container: " + err.ID
+}
+
+// ContainerAlreadyRunning is the error returned when a given container is
+// already running.
+type ContainerAlreadyRunning struct {
+ ID string
+}
+
+func (err *ContainerAlreadyRunning) Error() string {
+ return "Container already running: " + err.ID
+}
+
+// ContainerNotRunning is the error returned when a given container is not
+// running.
+type ContainerNotRunning struct {
+ ID string
+}
+
+func (err *ContainerNotRunning) Error() string {
+ return "Container not running: " + err.ID
+}
diff --git a/vendor/github.com/fsouza/go-dockerclient/env.go b/vendor/github.com/fsouza/go-dockerclient/env.go
new file mode 100644
index 0000000000..c54b0b0e80
--- /dev/null
+++ b/vendor/github.com/fsouza/go-dockerclient/env.go
@@ -0,0 +1,168 @@
+// Copyright 2014 Docker authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the DOCKER-LICENSE file.
+
+package docker
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+ "strconv"
+ "strings"
+)
+
+// Env represents a list of key-pair represented in the form KEY=VALUE.
+type Env []string
+
+// Get returns the string value of the given key.
+func (env *Env) Get(key string) (value string) {
+ return env.Map()[key]
+}
+
+// Exists checks whether the given key is defined in the internal Env
+// representation.
+func (env *Env) Exists(key string) bool {
+ _, exists := env.Map()[key]
+ return exists
+}
+
+// GetBool returns a boolean representation of the given key. The key is false
+// whenever its value if 0, no, false, none or an empty string. Any other value
+// will be interpreted as true.
+func (env *Env) GetBool(key string) (value bool) {
+ s := strings.ToLower(strings.Trim(env.Get(key), " \t"))
+ if s == "" || s == "0" || s == "no" || s == "false" || s == "none" {
+ return false
+ }
+ return true
+}
+
+// SetBool defines a boolean value to the given key.
+func (env *Env) SetBool(key string, value bool) {
+ if value {
+ env.Set(key, "1")
+ } else {
+ env.Set(key, "0")
+ }
+}
+
+// GetInt returns the value of the provided key, converted to int.
+//
+// It the value cannot be represented as an integer, it returns -1.
+func (env *Env) GetInt(key string) int {
+ return int(env.GetInt64(key))
+}
+
+// SetInt defines an integer value to the given key.
+func (env *Env) SetInt(key string, value int) {
+ env.Set(key, strconv.Itoa(value))
+}
+
+// GetInt64 returns the value of the provided key, converted to int64.
+//
+// It the value cannot be represented as an integer, it returns -1.
+func (env *Env) GetInt64(key string) int64 {
+ s := strings.Trim(env.Get(key), " \t")
+ val, err := strconv.ParseInt(s, 10, 64)
+ if err != nil {
+ return -1
+ }
+ return val
+}
+
+// SetInt64 defines an integer (64-bit wide) value to the given key.
+func (env *Env) SetInt64(key string, value int64) {
+ env.Set(key, strconv.FormatInt(value, 10))
+}
+
+// GetJSON unmarshals the value of the provided key in the provided iface.
+//
+// iface is a value that can be provided to the json.Unmarshal function.
+func (env *Env) GetJSON(key string, iface interface{}) error {
+ sval := env.Get(key)
+ if sval == "" {
+ return nil
+ }
+ return json.Unmarshal([]byte(sval), iface)
+}
+
+// SetJSON marshals the given value to JSON format and stores it using the
+// provided key.
+func (env *Env) SetJSON(key string, value interface{}) error {
+ sval, err := json.Marshal(value)
+ if err != nil {
+ return err
+ }
+ env.Set(key, string(sval))
+ return nil
+}
+
+// GetList returns a list of strings matching the provided key. It handles the
+// list as a JSON representation of a list of strings.
+//
+// If the given key matches to a single string, it will return a list
+// containing only the value that matches the key.
+func (env *Env) GetList(key string) []string {
+ sval := env.Get(key)
+ if sval == "" {
+ return nil
+ }
+ var l []string
+ if err := json.Unmarshal([]byte(sval), &l); err != nil {
+ l = append(l, sval)
+ }
+ return l
+}
+
+// SetList stores the given list in the provided key, after serializing it to
+// JSON format.
+func (env *Env) SetList(key string, value []string) error {
+ return env.SetJSON(key, value)
+}
+
+// Set defines the value of a key to the given string.
+func (env *Env) Set(key, value string) {
+ *env = append(*env, key+"="+value)
+}
+
+// Decode decodes `src` as a json dictionary, and adds each decoded key-value
+// pair to the environment.
+//
+// If `src` cannot be decoded as a json dictionary, an error is returned.
+func (env *Env) Decode(src io.Reader) error {
+ m := make(map[string]interface{})
+ if err := json.NewDecoder(src).Decode(&m); err != nil {
+ return err
+ }
+ for k, v := range m {
+ env.SetAuto(k, v)
+ }
+ return nil
+}
+
+// SetAuto will try to define the Set* method to call based on the given value.
+func (env *Env) SetAuto(key string, value interface{}) {
+ if fval, ok := value.(float64); ok {
+ env.SetInt64(key, int64(fval))
+ } else if sval, ok := value.(string); ok {
+ env.Set(key, sval)
+ } else if val, err := json.Marshal(value); err == nil {
+ env.Set(key, string(val))
+ } else {
+ env.Set(key, fmt.Sprintf("%v", value))
+ }
+}
+
+// Map returns the map representation of the env.
+func (env *Env) Map() map[string]string {
+ if len(*env) == 0 {
+ return nil
+ }
+ m := make(map[string]string)
+ for _, kv := range *env {
+ parts := strings.SplitN(kv, "=", 2)
+ m[parts[0]] = parts[1]
+ }
+ return m
+}
diff --git a/vendor/github.com/fsouza/go-dockerclient/event.go b/vendor/github.com/fsouza/go-dockerclient/event.go
new file mode 100644
index 0000000000..8868c0cf8c
--- /dev/null
+++ b/vendor/github.com/fsouza/go-dockerclient/event.go
@@ -0,0 +1,387 @@
+// Copyright 2015 go-dockerclient authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package docker
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "math"
+ "net"
+ "net/http"
+ "net/http/httputil"
+ "sync"
+ "sync/atomic"
+ "time"
+)
+
+// APIEvents represents events coming from the Docker API
+// The fields in the Docker API changed in API version 1.22, and
+// events for more than images and containers are now fired off.
+// To maintain forward and backward compatibility, go-dockerclient
+// replicates the event in both the new and old format as faithfully as possible.
+//
+// For events that only exist in 1.22 in later, `Status` is filled in as
+// `"Type:Action"` instead of just `Action` to allow for older clients to
+// differentiate and not break if they rely on the pre-1.22 Status types.
+//
+// The transformEvent method can be consulted for more information about how
+// events are translated from new/old API formats
+type APIEvents struct {
+ // New API Fields in 1.22
+ Action string `json:"action,omitempty"`
+ Type string `json:"type,omitempty"`
+ Actor APIActor `json:"actor,omitempty"`
+
+ // Old API fields for < 1.22
+ Status string `json:"status,omitempty"`
+ ID string `json:"id,omitempty"`
+ From string `json:"from,omitempty"`
+
+ // Fields in both
+ Time int64 `json:"time,omitempty"`
+ TimeNano int64 `json:"timeNano,omitempty"`
+}
+
+// APIActor represents an actor that accomplishes something for an event
+type APIActor struct {
+ ID string `json:"id,omitempty"`
+ Attributes map[string]string `json:"attributes,omitempty"`
+}
+
+type eventMonitoringState struct {
+ // `sync/atomic` expects the first word in an allocated struct to be 64-bit
+ // aligned on both ARM and x86-32. See https://goo.gl/zW7dgq for more details.
+ lastSeen int64
+ sync.RWMutex
+ sync.WaitGroup
+ enabled bool
+ C chan *APIEvents
+ errC chan error
+ listeners []chan<- *APIEvents
+}
+
+const (
+ maxMonitorConnRetries = 5
+ retryInitialWaitTime = 10.
+)
+
+var (
+ // ErrNoListeners is the error returned when no listeners are available
+ // to receive an event.
+ ErrNoListeners = errors.New("no listeners present to receive event")
+
+ // ErrListenerAlreadyExists is the error returned when the listerner already
+ // exists.
+ ErrListenerAlreadyExists = errors.New("listener already exists for docker events")
+
+ // EOFEvent is sent when the event listener receives an EOF error.
+ EOFEvent = &APIEvents{
+ Type: "EOF",
+ Status: "EOF",
+ }
+)
+
+// AddEventListener adds a new listener to container events in the Docker API.
+//
+// The parameter is a channel through which events will be sent.
+func (c *Client) AddEventListener(listener chan<- *APIEvents) error {
+ var err error
+ if !c.eventMonitor.isEnabled() {
+ err = c.eventMonitor.enableEventMonitoring(c)
+ if err != nil {
+ return err
+ }
+ }
+ err = c.eventMonitor.addListener(listener)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+// RemoveEventListener removes a listener from the monitor.
+func (c *Client) RemoveEventListener(listener chan *APIEvents) error {
+ err := c.eventMonitor.removeListener(listener)
+ if err != nil {
+ return err
+ }
+ if c.eventMonitor.listernersCount() == 0 {
+ c.eventMonitor.disableEventMonitoring()
+ }
+ return nil
+}
+
+func (eventState *eventMonitoringState) addListener(listener chan<- *APIEvents) error {
+ eventState.Lock()
+ defer eventState.Unlock()
+ if listenerExists(listener, &eventState.listeners) {
+ return ErrListenerAlreadyExists
+ }
+ eventState.Add(1)
+ eventState.listeners = append(eventState.listeners, listener)
+ return nil
+}
+
+func (eventState *eventMonitoringState) removeListener(listener chan<- *APIEvents) error {
+ eventState.Lock()
+ defer eventState.Unlock()
+ if listenerExists(listener, &eventState.listeners) {
+ var newListeners []chan<- *APIEvents
+ for _, l := range eventState.listeners {
+ if l != listener {
+ newListeners = append(newListeners, l)
+ }
+ }
+ eventState.listeners = newListeners
+ eventState.Add(-1)
+ }
+ return nil
+}
+
+func (eventState *eventMonitoringState) closeListeners() {
+ for _, l := range eventState.listeners {
+ close(l)
+ eventState.Add(-1)
+ }
+ eventState.listeners = nil
+}
+
+func (eventState *eventMonitoringState) listernersCount() int {
+ eventState.RLock()
+ defer eventState.RUnlock()
+ return len(eventState.listeners)
+}
+
+func listenerExists(a chan<- *APIEvents, list *[]chan<- *APIEvents) bool {
+ for _, b := range *list {
+ if b == a {
+ return true
+ }
+ }
+ return false
+}
+
+func (eventState *eventMonitoringState) enableEventMonitoring(c *Client) error {
+ eventState.Lock()
+ defer eventState.Unlock()
+ if !eventState.enabled {
+ eventState.enabled = true
+ atomic.StoreInt64(&eventState.lastSeen, 0)
+ eventState.C = make(chan *APIEvents, 100)
+ eventState.errC = make(chan error, 1)
+ go eventState.monitorEvents(c)
+ }
+ return nil
+}
+
+func (eventState *eventMonitoringState) disableEventMonitoring() error {
+ eventState.Lock()
+ defer eventState.Unlock()
+
+ eventState.closeListeners()
+
+ eventState.Wait()
+
+ if eventState.enabled {
+ eventState.enabled = false
+ close(eventState.C)
+ close(eventState.errC)
+ }
+ return nil
+}
+
+func (eventState *eventMonitoringState) monitorEvents(c *Client) {
+ var err error
+ for eventState.noListeners() {
+ time.Sleep(10 * time.Millisecond)
+ }
+ if err = eventState.connectWithRetry(c); err != nil {
+ // terminate if connect failed
+ eventState.disableEventMonitoring()
+ return
+ }
+ for eventState.isEnabled() {
+ timeout := time.After(100 * time.Millisecond)
+ select {
+ case ev, ok := <-eventState.C:
+ if !ok {
+ return
+ }
+ if ev == EOFEvent {
+ eventState.disableEventMonitoring()
+ return
+ }
+ eventState.updateLastSeen(ev)
+ go eventState.sendEvent(ev)
+ case err = <-eventState.errC:
+ if err == ErrNoListeners {
+ eventState.disableEventMonitoring()
+ return
+ } else if err != nil {
+ defer func() { go eventState.monitorEvents(c) }()
+ return
+ }
+ case <-timeout:
+ continue
+ }
+ }
+}
+
+func (eventState *eventMonitoringState) connectWithRetry(c *Client) error {
+ var retries int
+ eventState.RLock()
+ eventChan := eventState.C
+ errChan := eventState.errC
+ eventState.RUnlock()
+ err := c.eventHijack(atomic.LoadInt64(&eventState.lastSeen), eventChan, errChan)
+ for ; err != nil && retries < maxMonitorConnRetries; retries++ {
+ waitTime := int64(retryInitialWaitTime * math.Pow(2, float64(retries)))
+ time.Sleep(time.Duration(waitTime) * time.Millisecond)
+ eventState.RLock()
+ eventChan = eventState.C
+ errChan = eventState.errC
+ eventState.RUnlock()
+ err = c.eventHijack(atomic.LoadInt64(&eventState.lastSeen), eventChan, errChan)
+ }
+ return err
+}
+
+func (eventState *eventMonitoringState) noListeners() bool {
+ eventState.RLock()
+ defer eventState.RUnlock()
+ return len(eventState.listeners) == 0
+}
+
+func (eventState *eventMonitoringState) isEnabled() bool {
+ eventState.RLock()
+ defer eventState.RUnlock()
+ return eventState.enabled
+}
+
+func (eventState *eventMonitoringState) sendEvent(event *APIEvents) {
+ eventState.RLock()
+ defer eventState.RUnlock()
+ eventState.Add(1)
+ defer eventState.Done()
+ if eventState.enabled {
+ if len(eventState.listeners) == 0 {
+ eventState.errC <- ErrNoListeners
+ return
+ }
+
+ for _, listener := range eventState.listeners {
+ listener <- event
+ }
+ }
+}
+
+func (eventState *eventMonitoringState) updateLastSeen(e *APIEvents) {
+ eventState.Lock()
+ defer eventState.Unlock()
+ if atomic.LoadInt64(&eventState.lastSeen) < e.Time {
+ atomic.StoreInt64(&eventState.lastSeen, e.Time)
+ }
+}
+
+func (c *Client) eventHijack(startTime int64, eventChan chan *APIEvents, errChan chan error) error {
+ uri := "/events"
+ if startTime != 0 {
+ uri += fmt.Sprintf("?since=%d", startTime)
+ }
+ protocol := c.endpointURL.Scheme
+ address := c.endpointURL.Path
+ if protocol != "unix" {
+ protocol = "tcp"
+ address = c.endpointURL.Host
+ }
+ var dial net.Conn
+ var err error
+ if c.TLSConfig == nil {
+ dial, err = c.Dialer.Dial(protocol, address)
+ } else {
+ dial, err = tlsDialWithDialer(c.Dialer, protocol, address, c.TLSConfig)
+ }
+ if err != nil {
+ return err
+ }
+ conn := httputil.NewClientConn(dial, nil)
+ req, err := http.NewRequest("GET", uri, nil)
+ if err != nil {
+ return err
+ }
+ res, err := conn.Do(req)
+ if err != nil {
+ return err
+ }
+ go func(res *http.Response, conn *httputil.ClientConn) {
+ defer conn.Close()
+ defer res.Body.Close()
+ decoder := json.NewDecoder(res.Body)
+ for {
+ var event APIEvents
+ if err = decoder.Decode(&event); err != nil {
+ if err == io.EOF || err == io.ErrUnexpectedEOF {
+ c.eventMonitor.RLock()
+ if c.eventMonitor.enabled && c.eventMonitor.C == eventChan {
+ // Signal that we're exiting.
+ eventChan <- EOFEvent
+ }
+ c.eventMonitor.RUnlock()
+ break
+ }
+ errChan <- err
+ }
+ if event.Time == 0 {
+ continue
+ }
+ if !c.eventMonitor.isEnabled() || c.eventMonitor.C != eventChan {
+ return
+ }
+ transformEvent(&event)
+ eventChan <- &event
+ }
+ }(res, conn)
+ return nil
+}
+
+// transformEvent takes an event and determines what version it is from
+// then populates both versions of the event
+func transformEvent(event *APIEvents) {
+ // if event version is <= 1.21 there will be no Action and no Type
+ if event.Action == "" && event.Type == "" {
+ event.Action = event.Status
+ event.Actor.ID = event.ID
+ event.Actor.Attributes = map[string]string{}
+ switch event.Status {
+ case "delete", "import", "pull", "push", "tag", "untag":
+ event.Type = "image"
+ default:
+ event.Type = "container"
+ if event.From != "" {
+ event.Actor.Attributes["image"] = event.From
+ }
+ }
+ } else {
+ if event.Status == "" {
+ if event.Type == "image" || event.Type == "container" {
+ event.Status = event.Action
+ } else {
+ // Because just the Status has been overloaded with different Types
+ // if an event is not for an image or a container, we prepend the type
+ // to avoid problems for people relying on actions being only for
+ // images and containers
+ event.Status = event.Type + ":" + event.Action
+ }
+ }
+ if event.ID == "" {
+ event.ID = event.Actor.ID
+ }
+ if event.From == "" {
+ event.From = event.Actor.Attributes["image"]
+ }
+ }
+}
diff --git a/vendor/github.com/fsouza/go-dockerclient/exec.go b/vendor/github.com/fsouza/go-dockerclient/exec.go
new file mode 100644
index 0000000000..1a16da9d62
--- /dev/null
+++ b/vendor/github.com/fsouza/go-dockerclient/exec.go
@@ -0,0 +1,202 @@
+// Copyright 2015 go-dockerclient authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package docker
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+ "net/http"
+ "net/url"
+ "strconv"
+)
+
+// Exec is the type representing a `docker exec` instance and containing the
+// instance ID
+type Exec struct {
+ ID string `json:"Id,omitempty" yaml:"Id,omitempty"`
+}
+
+// CreateExecOptions specify parameters to the CreateExecContainer function.
+//
+// See https://goo.gl/1KSIb7 for more details
+type CreateExecOptions struct {
+ AttachStdin bool `json:"AttachStdin,omitempty" yaml:"AttachStdin,omitempty"`
+ AttachStdout bool `json:"AttachStdout,omitempty" yaml:"AttachStdout,omitempty"`
+ AttachStderr bool `json:"AttachStderr,omitempty" yaml:"AttachStderr,omitempty"`
+ Tty bool `json:"Tty,omitempty" yaml:"Tty,omitempty"`
+ Cmd []string `json:"Cmd,omitempty" yaml:"Cmd,omitempty"`
+ Container string `json:"Container,omitempty" yaml:"Container,omitempty"`
+ User string `json:"User,omitempty" yaml:"User,omitempty"`
+}
+
+// CreateExec sets up an exec instance in a running container `id`, returning the exec
+// instance, or an error in case of failure.
+//
+// See https://goo.gl/1KSIb7 for more details
+func (c *Client) CreateExec(opts CreateExecOptions) (*Exec, error) {
+ path := fmt.Sprintf("/containers/%s/exec", opts.Container)
+ resp, err := c.do("POST", path, doOptions{data: opts})
+ if err != nil {
+ if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
+ return nil, &NoSuchContainer{ID: opts.Container}
+ }
+ return nil, err
+ }
+ defer resp.Body.Close()
+ var exec Exec
+ if err := json.NewDecoder(resp.Body).Decode(&exec); err != nil {
+ return nil, err
+ }
+
+ return &exec, nil
+}
+
+// StartExecOptions specify parameters to the StartExecContainer function.
+//
+// See https://goo.gl/iQCnto for more details
+type StartExecOptions struct {
+ Detach bool `json:"Detach,omitempty" yaml:"Detach,omitempty"`
+
+ Tty bool `json:"Tty,omitempty" yaml:"Tty,omitempty"`
+
+ InputStream io.Reader `qs:"-"`
+ OutputStream io.Writer `qs:"-"`
+ ErrorStream io.Writer `qs:"-"`
+
+ // Use raw terminal? Usually true when the container contains a TTY.
+ RawTerminal bool `qs:"-"`
+
+ // If set, after a successful connect, a sentinel will be sent and then the
+ // client will block on receive before continuing.
+ //
+ // It must be an unbuffered channel. Using a buffered channel can lead
+ // to unexpected behavior.
+ Success chan struct{} `json:"-"`
+}
+
+// StartExec starts a previously set up exec instance id. If opts.Detach is
+// true, it returns after starting the exec command. Otherwise, it sets up an
+// interactive session with the exec command.
+//
+// See https://goo.gl/iQCnto for more details
+func (c *Client) StartExec(id string, opts StartExecOptions) error {
+ cw, err := c.StartExecNonBlocking(id, opts)
+ if err != nil {
+ return err
+ }
+ if cw != nil {
+ return cw.Wait()
+ }
+ return nil
+}
+
+// StartExecNonBlocking starts a previously set up exec instance id. If opts.Detach is
+// true, it returns after starting the exec command. Otherwise, it sets up an
+// interactive session with the exec command.
+//
+// See https://goo.gl/iQCnto for more details
+func (c *Client) StartExecNonBlocking(id string, opts StartExecOptions) (CloseWaiter, error) {
+ if id == "" {
+ return nil, &NoSuchExec{ID: id}
+ }
+
+ path := fmt.Sprintf("/exec/%s/start", id)
+
+ if opts.Detach {
+ resp, err := c.do("POST", path, doOptions{data: opts})
+ if err != nil {
+ if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
+ return nil, &NoSuchExec{ID: id}
+ }
+ return nil, err
+ }
+ defer resp.Body.Close()
+ return nil, nil
+ }
+
+ return c.hijack("POST", path, hijackOptions{
+ success: opts.Success,
+ setRawTerminal: opts.RawTerminal,
+ in: opts.InputStream,
+ stdout: opts.OutputStream,
+ stderr: opts.ErrorStream,
+ data: opts,
+ })
+}
+
+// ResizeExecTTY resizes the tty session used by the exec command id. This API
+// is valid only if Tty was specified as part of creating and starting the exec
+// command.
+//
+// See https://goo.gl/e1JpsA for more details
+func (c *Client) ResizeExecTTY(id string, height, width int) error {
+ params := make(url.Values)
+ params.Set("h", strconv.Itoa(height))
+ params.Set("w", strconv.Itoa(width))
+
+ path := fmt.Sprintf("/exec/%s/resize?%s", id, params.Encode())
+ resp, err := c.do("POST", path, doOptions{})
+ if err != nil {
+ return err
+ }
+ resp.Body.Close()
+ return nil
+}
+
+// ExecProcessConfig is a type describing the command associated to a Exec
+// instance. It's used in the ExecInspect type.
+type ExecProcessConfig struct {
+ Privileged bool `json:"privileged,omitempty" yaml:"privileged,omitempty"`
+ User string `json:"user,omitempty" yaml:"user,omitempty"`
+ Tty bool `json:"tty,omitempty" yaml:"tty,omitempty"`
+ EntryPoint string `json:"entrypoint,omitempty" yaml:"entrypoint,omitempty"`
+ Arguments []string `json:"arguments,omitempty" yaml:"arguments,omitempty"`
+}
+
+// ExecInspect is a type with details about a exec instance, including the
+// exit code if the command has finished running. It's returned by a api
+// call to /exec/(id)/json
+//
+// See https://goo.gl/gPtX9R for more details
+type ExecInspect struct {
+ ID string `json:"ID,omitempty" yaml:"ID,omitempty"`
+ Running bool `json:"Running,omitempty" yaml:"Running,omitempty"`
+ ExitCode int `json:"ExitCode,omitempty" yaml:"ExitCode,omitempty"`
+ OpenStdin bool `json:"OpenStdin,omitempty" yaml:"OpenStdin,omitempty"`
+ OpenStderr bool `json:"OpenStderr,omitempty" yaml:"OpenStderr,omitempty"`
+ OpenStdout bool `json:"OpenStdout,omitempty" yaml:"OpenStdout,omitempty"`
+ ProcessConfig ExecProcessConfig `json:"ProcessConfig,omitempty" yaml:"ProcessConfig,omitempty"`
+ Container Container `json:"Container,omitempty" yaml:"Container,omitempty"`
+}
+
+// InspectExec returns low-level information about the exec command id.
+//
+// See https://goo.gl/gPtX9R for more details
+func (c *Client) InspectExec(id string) (*ExecInspect, error) {
+ path := fmt.Sprintf("/exec/%s/json", id)
+ resp, err := c.do("GET", path, doOptions{})
+ if err != nil {
+ if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
+ return nil, &NoSuchExec{ID: id}
+ }
+ return nil, err
+ }
+ defer resp.Body.Close()
+ var exec ExecInspect
+ if err := json.NewDecoder(resp.Body).Decode(&exec); err != nil {
+ return nil, err
+ }
+ return &exec, nil
+}
+
+// NoSuchExec is the error returned when a given exec instance does not exist.
+type NoSuchExec struct {
+ ID string
+}
+
+func (err *NoSuchExec) Error() string {
+ return "No such exec instance: " + err.ID
+}
diff --git a/vendor/github.com/fsouza/go-dockerclient/image.go b/vendor/github.com/fsouza/go-dockerclient/image.go
new file mode 100644
index 0000000000..fd51c3f928
--- /dev/null
+++ b/vendor/github.com/fsouza/go-dockerclient/image.go
@@ -0,0 +1,642 @@
+// Copyright 2015 go-dockerclient authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package docker
+
+import (
+ "bytes"
+ "encoding/base64"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+ "net/url"
+ "os"
+ "time"
+)
+
+// APIImages represent an image returned in the ListImages call.
+type APIImages struct {
+ ID string `json:"Id" yaml:"Id"`
+ RepoTags []string `json:"RepoTags,omitempty" yaml:"RepoTags,omitempty"`
+ Created int64 `json:"Created,omitempty" yaml:"Created,omitempty"`
+ Size int64 `json:"Size,omitempty" yaml:"Size,omitempty"`
+ VirtualSize int64 `json:"VirtualSize,omitempty" yaml:"VirtualSize,omitempty"`
+ ParentID string `json:"ParentId,omitempty" yaml:"ParentId,omitempty"`
+ RepoDigests []string `json:"RepoDigests,omitempty" yaml:"RepoDigests,omitempty"`
+ Labels map[string]string `json:"Labels,omitempty" yaml:"Labels,omitempty"`
+}
+
+// RootFS represents the underlying layers used by an image
+type RootFS struct {
+ Type string `json:"Type,omitempty" yaml:"Type,omitempty"`
+ Layers []string `json:"Layers,omitempty" yaml:"Layers,omitempty"`
+}
+
+// Image is the type representing a docker image and its various properties
+type Image struct {
+ ID string `json:"Id" yaml:"Id"`
+ RepoTags []string `json:"RepoTags,omitempty" yaml:"RepoTags,omitempty"`
+ Parent string `json:"Parent,omitempty" yaml:"Parent,omitempty"`
+ Comment string `json:"Comment,omitempty" yaml:"Comment,omitempty"`
+ Created time.Time `json:"Created,omitempty" yaml:"Created,omitempty"`
+ Container string `json:"Container,omitempty" yaml:"Container,omitempty"`
+ ContainerConfig Config `json:"ContainerConfig,omitempty" yaml:"ContainerConfig,omitempty"`
+ DockerVersion string `json:"DockerVersion,omitempty" yaml:"DockerVersion,omitempty"`
+ Author string `json:"Author,omitempty" yaml:"Author,omitempty"`
+ Config *Config `json:"Config,omitempty" yaml:"Config,omitempty"`
+ Architecture string `json:"Architecture,omitempty" yaml:"Architecture,omitempty"`
+ Size int64 `json:"Size,omitempty" yaml:"Size,omitempty"`
+ VirtualSize int64 `json:"VirtualSize,omitempty" yaml:"VirtualSize,omitempty"`
+ RepoDigests []string `json:"RepoDigests,omitempty" yaml:"RepoDigests,omitempty"`
+ RootFS *RootFS `json:"RootFS,omitempty" yaml:"RootFS,omitempty"`
+}
+
+// ImagePre012 serves the same purpose as the Image type except that it is for
+// earlier versions of the Docker API (pre-012 to be specific)
+type ImagePre012 struct {
+ ID string `json:"id"`
+ Parent string `json:"parent,omitempty"`
+ Comment string `json:"comment,omitempty"`
+ Created time.Time `json:"created"`
+ Container string `json:"container,omitempty"`
+ ContainerConfig Config `json:"container_config,omitempty"`
+ DockerVersion string `json:"docker_version,omitempty"`
+ Author string `json:"author,omitempty"`
+ Config *Config `json:"config,omitempty"`
+ Architecture string `json:"architecture,omitempty"`
+ Size int64 `json:"size,omitempty"`
+}
+
+var (
+ // ErrNoSuchImage is the error returned when the image does not exist.
+ ErrNoSuchImage = errors.New("no such image")
+
+ // ErrMissingRepo is the error returned when the remote repository is
+ // missing.
+ ErrMissingRepo = errors.New("missing remote repository e.g. 'github.com/user/repo'")
+
+ // ErrMissingOutputStream is the error returned when no output stream
+ // is provided to some calls, like BuildImage.
+ ErrMissingOutputStream = errors.New("missing output stream")
+
+ // ErrMultipleContexts is the error returned when both a ContextDir and
+ // InputStream are provided in BuildImageOptions
+ ErrMultipleContexts = errors.New("image build may not be provided BOTH context dir and input stream")
+
+ // ErrMustSpecifyNames is the error rreturned when the Names field on
+ // ExportImagesOptions is nil or empty
+ ErrMustSpecifyNames = errors.New("must specify at least one name to export")
+)
+
+// ListImagesOptions specify parameters to the ListImages function.
+//
+// See https://goo.gl/xBe1u3 for more details.
+type ListImagesOptions struct {
+ All bool
+ Filters map[string][]string
+ Digests bool
+ Filter string
+}
+
+// ListImages returns the list of available images in the server.
+//
+// See https://goo.gl/xBe1u3 for more details.
+func (c *Client) ListImages(opts ListImagesOptions) ([]APIImages, error) {
+ path := "/images/json?" + queryString(opts)
+ resp, err := c.do("GET", path, doOptions{})
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+ var images []APIImages
+ if err := json.NewDecoder(resp.Body).Decode(&images); err != nil {
+ return nil, err
+ }
+ return images, nil
+}
+
+// ImageHistory represent a layer in an image's history returned by the
+// ImageHistory call.
+type ImageHistory struct {
+ ID string `json:"Id" yaml:"Id"`
+ Tags []string `json:"Tags,omitempty" yaml:"Tags,omitempty"`
+ Created int64 `json:"Created,omitempty" yaml:"Created,omitempty"`
+ CreatedBy string `json:"CreatedBy,omitempty" yaml:"CreatedBy,omitempty"`
+ Size int64 `json:"Size,omitempty" yaml:"Size,omitempty"`
+}
+
+// ImageHistory returns the history of the image by its name or ID.
+//
+// See https://goo.gl/8bnTId for more details.
+func (c *Client) ImageHistory(name string) ([]ImageHistory, error) {
+ resp, err := c.do("GET", "/images/"+name+"/history", doOptions{})
+ if err != nil {
+ if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
+ return nil, ErrNoSuchImage
+ }
+ return nil, err
+ }
+ defer resp.Body.Close()
+ var history []ImageHistory
+ if err := json.NewDecoder(resp.Body).Decode(&history); err != nil {
+ return nil, err
+ }
+ return history, nil
+}
+
+// RemoveImage removes an image by its name or ID.
+//
+// See https://goo.gl/V3ZWnK for more details.
+func (c *Client) RemoveImage(name string) error {
+ resp, err := c.do("DELETE", "/images/"+name, doOptions{})
+ if err != nil {
+ if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
+ return ErrNoSuchImage
+ }
+ return err
+ }
+ resp.Body.Close()
+ return nil
+}
+
+// RemoveImageOptions present the set of options available for removing an image
+// from a registry.
+//
+// See https://goo.gl/V3ZWnK for more details.
+type RemoveImageOptions struct {
+ Force bool `qs:"force"`
+ NoPrune bool `qs:"noprune"`
+}
+
+// RemoveImageExtended removes an image by its name or ID.
+// Extra params can be passed, see RemoveImageOptions
+//
+// See https://goo.gl/V3ZWnK for more details.
+func (c *Client) RemoveImageExtended(name string, opts RemoveImageOptions) error {
+ uri := fmt.Sprintf("/images/%s?%s", name, queryString(&opts))
+ resp, err := c.do("DELETE", uri, doOptions{})
+ if err != nil {
+ if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
+ return ErrNoSuchImage
+ }
+ return err
+ }
+ resp.Body.Close()
+ return nil
+}
+
+// InspectImage returns an image by its name or ID.
+//
+// See https://goo.gl/jHPcg6 for more details.
+func (c *Client) InspectImage(name string) (*Image, error) {
+ resp, err := c.do("GET", "/images/"+name+"/json", doOptions{})
+ if err != nil {
+ if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
+ return nil, ErrNoSuchImage
+ }
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ var image Image
+
+ // if the caller elected to skip checking the server's version, assume it's the latest
+ if c.SkipServerVersionCheck || c.expectedAPIVersion.GreaterThanOrEqualTo(apiVersion112) {
+ if err := json.NewDecoder(resp.Body).Decode(&image); err != nil {
+ return nil, err
+ }
+ } else {
+ var imagePre012 ImagePre012
+ if err := json.NewDecoder(resp.Body).Decode(&imagePre012); err != nil {
+ return nil, err
+ }
+
+ image.ID = imagePre012.ID
+ image.Parent = imagePre012.Parent
+ image.Comment = imagePre012.Comment
+ image.Created = imagePre012.Created
+ image.Container = imagePre012.Container
+ image.ContainerConfig = imagePre012.ContainerConfig
+ image.DockerVersion = imagePre012.DockerVersion
+ image.Author = imagePre012.Author
+ image.Config = imagePre012.Config
+ image.Architecture = imagePre012.Architecture
+ image.Size = imagePre012.Size
+ }
+
+ return &image, nil
+}
+
+// PushImageOptions represents options to use in the PushImage method.
+//
+// See https://goo.gl/zPtZaT for more details.
+type PushImageOptions struct {
+ // Name of the image
+ Name string
+
+ // Tag of the image
+ Tag string
+
+ // Registry server to push the image
+ Registry string
+
+ OutputStream io.Writer `qs:"-"`
+ RawJSONStream bool `qs:"-"`
+ InactivityTimeout time.Duration `qs:"-"`
+}
+
+// PushImage pushes an image to a remote registry, logging progress to w.
+//
+// An empty instance of AuthConfiguration may be used for unauthenticated
+// pushes.
+//
+// See https://goo.gl/zPtZaT for more details.
+func (c *Client) PushImage(opts PushImageOptions, auth AuthConfiguration) error {
+ if opts.Name == "" {
+ return ErrNoSuchImage
+ }
+ headers, err := headersWithAuth(auth)
+ if err != nil {
+ return err
+ }
+ name := opts.Name
+ opts.Name = ""
+ path := "/images/" + name + "/push?" + queryString(&opts)
+ return c.stream("POST", path, streamOptions{
+ setRawTerminal: true,
+ rawJSONStream: opts.RawJSONStream,
+ headers: headers,
+ stdout: opts.OutputStream,
+ inactivityTimeout: opts.InactivityTimeout,
+ })
+}
+
+// PullImageOptions present the set of options available for pulling an image
+// from a registry.
+//
+// See https://goo.gl/iJkZjD for more details.
+type PullImageOptions struct {
+ Repository string `qs:"fromImage"`
+ Registry string
+ Tag string
+
+ OutputStream io.Writer `qs:"-"`
+ RawJSONStream bool `qs:"-"`
+ InactivityTimeout time.Duration `qs:"-"`
+}
+
+// PullImage pulls an image from a remote registry, logging progress to
+// opts.OutputStream.
+//
+// See https://goo.gl/iJkZjD for more details.
+func (c *Client) PullImage(opts PullImageOptions, auth AuthConfiguration) error {
+ if opts.Repository == "" {
+ return ErrNoSuchImage
+ }
+
+ headers, err := headersWithAuth(auth)
+ if err != nil {
+ return err
+ }
+ return c.createImage(queryString(&opts), headers, nil, opts.OutputStream, opts.RawJSONStream, opts.InactivityTimeout)
+}
+
+func (c *Client) createImage(qs string, headers map[string]string, in io.Reader, w io.Writer, rawJSONStream bool, timeout time.Duration) error {
+ path := "/images/create?" + qs
+ return c.stream("POST", path, streamOptions{
+ setRawTerminal: true,
+ headers: headers,
+ in: in,
+ stdout: w,
+ rawJSONStream: rawJSONStream,
+ inactivityTimeout: timeout,
+ })
+}
+
+// LoadImageOptions represents the options for LoadImage Docker API Call
+//
+// See https://goo.gl/JyClMX for more details.
+type LoadImageOptions struct {
+ InputStream io.Reader
+}
+
+// LoadImage imports a tarball docker image
+//
+// See https://goo.gl/JyClMX for more details.
+func (c *Client) LoadImage(opts LoadImageOptions) error {
+ return c.stream("POST", "/images/load", streamOptions{
+ setRawTerminal: true,
+ in: opts.InputStream,
+ })
+}
+
+// ExportImageOptions represent the options for ExportImage Docker API call.
+//
+// See https://goo.gl/le7vK8 for more details.
+type ExportImageOptions struct {
+ Name string
+ OutputStream io.Writer
+ InactivityTimeout time.Duration `qs:"-"`
+}
+
+// ExportImage exports an image (as a tar file) into the stream.
+//
+// See https://goo.gl/le7vK8 for more details.
+func (c *Client) ExportImage(opts ExportImageOptions) error {
+ return c.stream("GET", fmt.Sprintf("/images/%s/get", opts.Name), streamOptions{
+ setRawTerminal: true,
+ stdout: opts.OutputStream,
+ inactivityTimeout: opts.InactivityTimeout,
+ })
+}
+
+// ExportImagesOptions represent the options for ExportImages Docker API call
+//
+// See https://goo.gl/huC7HA for more details.
+type ExportImagesOptions struct {
+ Names []string
+ OutputStream io.Writer `qs:"-"`
+ InactivityTimeout time.Duration `qs:"-"`
+}
+
+// ExportImages exports one or more images (as a tar file) into the stream
+//
+// See https://goo.gl/huC7HA for more details.
+func (c *Client) ExportImages(opts ExportImagesOptions) error {
+ if opts.Names == nil || len(opts.Names) == 0 {
+ return ErrMustSpecifyNames
+ }
+ return c.stream("GET", "/images/get?"+queryString(&opts), streamOptions{
+ setRawTerminal: true,
+ stdout: opts.OutputStream,
+ inactivityTimeout: opts.InactivityTimeout,
+ })
+}
+
+// ImportImageOptions present the set of informations available for importing
+// an image from a source file or the stdin.
+//
+// See https://goo.gl/iJkZjD for more details.
+type ImportImageOptions struct {
+ Repository string `qs:"repo"`
+ Source string `qs:"fromSrc"`
+ Tag string `qs:"tag"`
+
+ InputStream io.Reader `qs:"-"`
+ OutputStream io.Writer `qs:"-"`
+ RawJSONStream bool `qs:"-"`
+ InactivityTimeout time.Duration `qs:"-"`
+}
+
+// ImportImage imports an image from a url, a file or stdin
+//
+// See https://goo.gl/iJkZjD for more details.
+func (c *Client) ImportImage(opts ImportImageOptions) error {
+ if opts.Repository == "" {
+ return ErrNoSuchImage
+ }
+ if opts.Source != "-" {
+ opts.InputStream = nil
+ }
+ if opts.Source != "-" && !isURL(opts.Source) {
+ f, err := os.Open(opts.Source)
+ if err != nil {
+ return err
+ }
+ opts.InputStream = f
+ opts.Source = "-"
+ }
+ return c.createImage(queryString(&opts), nil, opts.InputStream, opts.OutputStream, opts.RawJSONStream, opts.InactivityTimeout)
+}
+
+// BuildImageOptions present the set of informations available for building an
+// image from a tarfile with a Dockerfile in it.
+//
+// For more details about the Docker building process, see
+// http://goo.gl/tlPXPu.
+type BuildImageOptions struct {
+ Name string `qs:"t"`
+ Dockerfile string `qs:"dockerfile"`
+ NoCache bool `qs:"nocache"`
+ SuppressOutput bool `qs:"q"`
+ Pull bool `qs:"pull"`
+ RmTmpContainer bool `qs:"rm"`
+ ForceRmTmpContainer bool `qs:"forcerm"`
+ Memory int64 `qs:"memory"`
+ Memswap int64 `qs:"memswap"`
+ CPUShares int64 `qs:"cpushares"`
+ CPUQuota int64 `qs:"cpuquota"`
+ CPUPeriod int64 `qs:"cpuperiod"`
+ CPUSetCPUs string `qs:"cpusetcpus"`
+ InputStream io.Reader `qs:"-"`
+ OutputStream io.Writer `qs:"-"`
+ RawJSONStream bool `qs:"-"`
+ Remote string `qs:"remote"`
+ Auth AuthConfiguration `qs:"-"` // for older docker X-Registry-Auth header
+ AuthConfigs AuthConfigurations `qs:"-"` // for newer docker X-Registry-Config header
+ ContextDir string `qs:"-"`
+ Ulimits []ULimit `qs:"-"`
+ BuildArgs []BuildArg `qs:"-"`
+ InactivityTimeout time.Duration `qs:"-"`
+}
+
+// BuildArg represents arguments that can be passed to the image when building
+// it from a Dockerfile.
+//
+// For more details about the Docker building process, see
+// http://goo.gl/tlPXPu.
+type BuildArg struct {
+ Name string `json:"Name,omitempty" yaml:"Name,omitempty"`
+ Value string `json:"Value,omitempty" yaml:"Value,omitempty"`
+}
+
+// BuildImage builds an image from a tarball's url or a Dockerfile in the input
+// stream.
+//
+// See https://goo.gl/xySxCe for more details.
+func (c *Client) BuildImage(opts BuildImageOptions) error {
+ if opts.OutputStream == nil {
+ return ErrMissingOutputStream
+ }
+ headers, err := headersWithAuth(opts.Auth, c.versionedAuthConfigs(opts.AuthConfigs))
+ if err != nil {
+ return err
+ }
+
+ if opts.Remote != "" && opts.Name == "" {
+ opts.Name = opts.Remote
+ }
+ if opts.InputStream != nil || opts.ContextDir != "" {
+ headers["Content-Type"] = "application/tar"
+ } else if opts.Remote == "" {
+ return ErrMissingRepo
+ }
+ if opts.ContextDir != "" {
+ if opts.InputStream != nil {
+ return ErrMultipleContexts
+ }
+ var err error
+ if opts.InputStream, err = createTarStream(opts.ContextDir, opts.Dockerfile); err != nil {
+ return err
+ }
+ }
+
+ qs := queryString(&opts)
+ if len(opts.Ulimits) > 0 {
+ if b, err := json.Marshal(opts.Ulimits); err == nil {
+ item := url.Values(map[string][]string{})
+ item.Add("ulimits", string(b))
+ qs = fmt.Sprintf("%s&%s", qs, item.Encode())
+ }
+ }
+
+ if len(opts.BuildArgs) > 0 {
+ v := make(map[string]string)
+ for _, arg := range opts.BuildArgs {
+ v[arg.Name] = arg.Value
+ }
+ if b, err := json.Marshal(v); err == nil {
+ item := url.Values(map[string][]string{})
+ item.Add("buildargs", string(b))
+ qs = fmt.Sprintf("%s&%s", qs, item.Encode())
+ }
+ }
+
+ return c.stream("POST", fmt.Sprintf("/build?%s", qs), streamOptions{
+ setRawTerminal: true,
+ rawJSONStream: opts.RawJSONStream,
+ headers: headers,
+ in: opts.InputStream,
+ stdout: opts.OutputStream,
+ inactivityTimeout: opts.InactivityTimeout,
+ })
+}
+
+func (c *Client) versionedAuthConfigs(authConfigs AuthConfigurations) interface{} {
+ if c.serverAPIVersion == nil {
+ c.checkAPIVersion()
+ }
+ if c.serverAPIVersion != nil && c.serverAPIVersion.GreaterThanOrEqualTo(apiVersion119) {
+ return AuthConfigurations119(authConfigs.Configs)
+ }
+ return authConfigs
+}
+
+// TagImageOptions present the set of options to tag an image.
+//
+// See https://goo.gl/98ZzkU for more details.
+type TagImageOptions struct {
+ Repo string
+ Tag string
+ Force bool
+}
+
+// TagImage adds a tag to the image identified by the given name.
+//
+// See https://goo.gl/98ZzkU for more details.
+func (c *Client) TagImage(name string, opts TagImageOptions) error {
+ if name == "" {
+ return ErrNoSuchImage
+ }
+ resp, err := c.do("POST", fmt.Sprintf("/images/"+name+"/tag?%s",
+ queryString(&opts)), doOptions{})
+
+ if err != nil {
+ return err
+ }
+
+ defer resp.Body.Close()
+
+ if resp.StatusCode == http.StatusNotFound {
+ return ErrNoSuchImage
+ }
+
+ return err
+}
+
+func isURL(u string) bool {
+ p, err := url.Parse(u)
+ if err != nil {
+ return false
+ }
+ return p.Scheme == "http" || p.Scheme == "https"
+}
+
+func headersWithAuth(auths ...interface{}) (map[string]string, error) {
+ var headers = make(map[string]string)
+
+ for _, auth := range auths {
+ switch auth.(type) {
+ case AuthConfiguration:
+ var buf bytes.Buffer
+ if err := json.NewEncoder(&buf).Encode(auth); err != nil {
+ return nil, err
+ }
+ headers["X-Registry-Auth"] = base64.URLEncoding.EncodeToString(buf.Bytes())
+ case AuthConfigurations, AuthConfigurations119:
+ var buf bytes.Buffer
+ if err := json.NewEncoder(&buf).Encode(auth); err != nil {
+ return nil, err
+ }
+ headers["X-Registry-Config"] = base64.URLEncoding.EncodeToString(buf.Bytes())
+ }
+ }
+
+ return headers, nil
+}
+
+// APIImageSearch reflect the result of a search on the Docker Hub.
+//
+// See https://goo.gl/AYjyrF for more details.
+type APIImageSearch struct {
+ Description string `json:"description,omitempty" yaml:"description,omitempty"`
+ IsOfficial bool `json:"is_official,omitempty" yaml:"is_official,omitempty"`
+ IsAutomated bool `json:"is_automated,omitempty" yaml:"is_automated,omitempty"`
+ Name string `json:"name,omitempty" yaml:"name,omitempty"`
+ StarCount int `json:"star_count,omitempty" yaml:"star_count,omitempty"`
+}
+
+// SearchImages search the docker hub with a specific given term.
+//
+// See https://goo.gl/AYjyrF for more details.
+func (c *Client) SearchImages(term string) ([]APIImageSearch, error) {
+ resp, err := c.do("GET", "/images/search?term="+term, doOptions{})
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+ var searchResult []APIImageSearch
+ if err := json.NewDecoder(resp.Body).Decode(&searchResult); err != nil {
+ return nil, err
+ }
+ return searchResult, nil
+}
+
+// SearchImagesEx search the docker hub with a specific given term and authentication.
+//
+// See https://goo.gl/AYjyrF for more details.
+func (c *Client) SearchImagesEx(term string, auth AuthConfiguration) ([]APIImageSearch, error) {
+ headers, err := headersWithAuth(auth)
+ if err != nil {
+ return nil, err
+ }
+
+ resp, err := c.do("GET", "/images/search?term="+term, doOptions{
+ headers: headers,
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ defer resp.Body.Close()
+
+ var searchResult []APIImageSearch
+ if err := json.NewDecoder(resp.Body).Decode(&searchResult); err != nil {
+ return nil, err
+ }
+
+ return searchResult, nil
+}
diff --git a/vendor/github.com/fsouza/go-dockerclient/misc.go b/vendor/github.com/fsouza/go-dockerclient/misc.go
new file mode 100644
index 0000000000..ce9e9750b0
--- /dev/null
+++ b/vendor/github.com/fsouza/go-dockerclient/misc.go
@@ -0,0 +1,124 @@
+// Copyright 2015 go-dockerclient authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package docker
+
+import (
+ "encoding/json"
+ "strings"
+)
+
+// Version returns version information about the docker server.
+//
+// See https://goo.gl/ND9R8L for more details.
+func (c *Client) Version() (*Env, error) {
+ resp, err := c.do("GET", "/version", doOptions{})
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+ var env Env
+ if err := env.Decode(resp.Body); err != nil {
+ return nil, err
+ }
+ return &env, nil
+}
+
+// DockerInfo contains information about the Docker server
+//
+// See https://goo.gl/bHUoz9 for more details.
+type DockerInfo struct {
+ ID string
+ Containers int
+ ContainersRunning int
+ ContainersPaused int
+ ContainersStopped int
+ Images int
+ Driver string
+ DriverStatus [][2]string
+ SystemStatus [][2]string
+ Plugins PluginsInfo
+ MemoryLimit bool
+ SwapLimit bool
+ KernelMemory bool
+ CPUCfsPeriod bool `json:"CpuCfsPeriod"`
+ CPUCfsQuota bool `json:"CpuCfsQuota"`
+ CPUShares bool
+ CPUSet bool
+ IPv4Forwarding bool
+ BridgeNfIptables bool
+ BridgeNfIP6tables bool `json:"BridgeNfIp6tables"`
+ Debug bool
+ NFd int
+ OomKillDisable bool
+ NGoroutines int
+ SystemTime string
+ ExecutionDriver string
+ LoggingDriver string
+ CgroupDriver string
+ NEventsListener int
+ KernelVersion string
+ OperatingSystem string
+ OSType string
+ Architecture string
+ IndexServerAddress string
+ NCPU int
+ MemTotal int64
+ DockerRootDir string
+ HTTPProxy string `json:"HttpProxy"`
+ HTTPSProxy string `json:"HttpsProxy"`
+ NoProxy string
+ Name string
+ Labels []string
+ ExperimentalBuild bool
+ ServerVersion string
+ ClusterStore string
+ ClusterAdvertise string
+}
+
+// PluginsInfo is a struct with the plugins registered with the docker daemon
+//
+// for more information, see: https://goo.gl/bHUoz9
+type PluginsInfo struct {
+ // List of Volume plugins registered
+ Volume []string
+ // List of Network plugins registered
+ Network []string
+ // List of Authorization plugins registered
+ Authorization []string
+}
+
+// Info returns system-wide information about the Docker server.
+//
+// See https://goo.gl/ElTHi2 for more details.
+func (c *Client) Info() (*DockerInfo, error) {
+ resp, err := c.do("GET", "/info", doOptions{})
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+ var info DockerInfo
+ if err := json.NewDecoder(resp.Body).Decode(&info); err != nil {
+ return nil, err
+ }
+ return &info, nil
+}
+
+// ParseRepositoryTag gets the name of the repository and returns it splitted
+// in two parts: the repository and the tag.
+//
+// Some examples:
+//
+// localhost.localdomain:5000/samalba/hipache:latest -> localhost.localdomain:5000/samalba/hipache, latest
+// localhost.localdomain:5000/samalba/hipache -> localhost.localdomain:5000/samalba/hipache, ""
+func ParseRepositoryTag(repoTag string) (repository string, tag string) {
+ n := strings.LastIndex(repoTag, ":")
+ if n < 0 {
+ return repoTag, ""
+ }
+ if tag := repoTag[n+1:]; !strings.Contains(tag, "/") {
+ return repoTag[:n], tag
+ }
+ return repoTag, ""
+}
diff --git a/vendor/github.com/fsouza/go-dockerclient/network.go b/vendor/github.com/fsouza/go-dockerclient/network.go
new file mode 100644
index 0000000000..07a1a5796d
--- /dev/null
+++ b/vendor/github.com/fsouza/go-dockerclient/network.go
@@ -0,0 +1,282 @@
+// Copyright 2015 go-dockerclient authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package docker
+
+import (
+ "bytes"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "net/http"
+)
+
+// ErrNetworkAlreadyExists is the error returned by CreateNetwork when the
+// network already exists.
+var ErrNetworkAlreadyExists = errors.New("network already exists")
+
+// Network represents a network.
+//
+// See https://goo.gl/6GugX3 for more details.
+type Network struct {
+ Name string
+ ID string `json:"Id"`
+ Scope string
+ Driver string
+ IPAM IPAMOptions
+ Containers map[string]Endpoint
+ Options map[string]string
+ Internal bool
+ EnableIPv6 bool `json:"EnableIPv6"`
+}
+
+// Endpoint contains network resources allocated and used for a container in a network
+//
+// See https://goo.gl/6GugX3 for more details.
+type Endpoint struct {
+ Name string
+ ID string `json:"EndpointID"`
+ MacAddress string
+ IPv4Address string
+ IPv6Address string
+}
+
+// ListNetworks returns all networks.
+//
+// See https://goo.gl/6GugX3 for more details.
+func (c *Client) ListNetworks() ([]Network, error) {
+ resp, err := c.do("GET", "/networks", doOptions{})
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+ var networks []Network
+ if err := json.NewDecoder(resp.Body).Decode(&networks); err != nil {
+ return nil, err
+ }
+ return networks, nil
+}
+
+// NetworkFilterOpts is an aggregation of key=value that Docker
+// uses to filter networks
+type NetworkFilterOpts map[string]map[string]bool
+
+// FilteredListNetworks returns all networks with the filters applied
+//
+// See goo.gl/zd2mx4 for more details.
+func (c *Client) FilteredListNetworks(opts NetworkFilterOpts) ([]Network, error) {
+ params := bytes.NewBuffer(nil)
+ if err := json.NewEncoder(params).Encode(&opts); err != nil {
+ return nil, err
+ }
+ path := "/networks?filters=" + params.String()
+ resp, err := c.do("GET", path, doOptions{})
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+ var networks []Network
+ if err := json.NewDecoder(resp.Body).Decode(&networks); err != nil {
+ return nil, err
+ }
+ return networks, nil
+}
+
+// NetworkInfo returns information about a network by its ID.
+//
+// See https://goo.gl/6GugX3 for more details.
+func (c *Client) NetworkInfo(id string) (*Network, error) {
+ path := "/networks/" + id
+ resp, err := c.do("GET", path, doOptions{})
+ if err != nil {
+ if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
+ return nil, &NoSuchNetwork{ID: id}
+ }
+ return nil, err
+ }
+ defer resp.Body.Close()
+ var network Network
+ if err := json.NewDecoder(resp.Body).Decode(&network); err != nil {
+ return nil, err
+ }
+ return &network, nil
+}
+
+// CreateNetworkOptions specify parameters to the CreateNetwork function and
+// (for now) is the expected body of the "create network" http request message
+//
+// See https://goo.gl/6GugX3 for more details.
+type CreateNetworkOptions struct {
+ Name string `json:"Name"`
+ CheckDuplicate bool `json:"CheckDuplicate"`
+ Driver string `json:"Driver"`
+ IPAM IPAMOptions `json:"IPAM"`
+ Options map[string]interface{} `json:"Options"`
+ Internal bool `json:"Internal"`
+ EnableIPv6 bool `json:"EnableIPv6"`
+}
+
+// IPAMOptions controls IP Address Management when creating a network
+//
+// See https://goo.gl/T8kRVH for more details.
+type IPAMOptions struct {
+ Driver string `json:"Driver"`
+ Config []IPAMConfig `json:"Config"`
+}
+
+// IPAMConfig represents IPAM configurations
+//
+// See https://goo.gl/T8kRVH for more details.
+type IPAMConfig struct {
+ Subnet string `json:",omitempty"`
+ IPRange string `json:",omitempty"`
+ Gateway string `json:",omitempty"`
+ AuxAddress map[string]string `json:"AuxiliaryAddresses,omitempty"`
+}
+
+// CreateNetwork creates a new network, returning the network instance,
+// or an error in case of failure.
+//
+// See https://goo.gl/6GugX3 for more details.
+func (c *Client) CreateNetwork(opts CreateNetworkOptions) (*Network, error) {
+ resp, err := c.do(
+ "POST",
+ "/networks/create",
+ doOptions{
+ data: opts,
+ },
+ )
+ if err != nil {
+ if e, ok := err.(*Error); ok && e.Status == http.StatusConflict {
+ return nil, ErrNetworkAlreadyExists
+ }
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ type createNetworkResponse struct {
+ ID string
+ }
+ var (
+ network Network
+ cnr createNetworkResponse
+ )
+ if err := json.NewDecoder(resp.Body).Decode(&cnr); err != nil {
+ return nil, err
+ }
+
+ network.Name = opts.Name
+ network.ID = cnr.ID
+ network.Driver = opts.Driver
+
+ return &network, nil
+}
+
+// RemoveNetwork removes a network or returns an error in case of failure.
+//
+// See https://goo.gl/6GugX3 for more details.
+func (c *Client) RemoveNetwork(id string) error {
+ resp, err := c.do("DELETE", "/networks/"+id, doOptions{})
+ if err != nil {
+ if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
+ return &NoSuchNetwork{ID: id}
+ }
+ return err
+ }
+ resp.Body.Close()
+ return nil
+}
+
+// NetworkConnectionOptions specify parameters to the ConnectNetwork and
+// DisconnectNetwork function.
+//
+// See https://goo.gl/RV7BJU for more details.
+type NetworkConnectionOptions struct {
+ Container string
+
+ // EndpointConfig is only applicable to the ConnectNetwork call
+ EndpointConfig *EndpointConfig `json:"EndpointConfig,omitempty"`
+
+ // Force is only applicable to the DisconnectNetwork call
+ Force bool
+}
+
+// EndpointConfig stores network endpoint details
+//
+// See https://goo.gl/RV7BJU for more details.
+type EndpointConfig struct {
+ IPAMConfig *EndpointIPAMConfig
+ Links []string
+ Aliases []string
+ NetworkID string
+ EndpointID string
+ Gateway string
+ IPAddress string
+ IPPrefixLen int
+ IPv6Gateway string
+ GlobalIPv6Address string
+ GlobalIPv6PrefixLen int
+ MacAddress string
+}
+
+// EndpointIPAMConfig represents IPAM configurations for an
+// endpoint
+//
+// See https://goo.gl/RV7BJU for more details.
+type EndpointIPAMConfig struct {
+ IPv4Address string `json:",omitempty"`
+ IPv6Address string `json:",omitempty"`
+}
+
+// ConnectNetwork adds a container to a network or returns an error in case of
+// failure.
+//
+// See https://goo.gl/6GugX3 for more details.
+func (c *Client) ConnectNetwork(id string, opts NetworkConnectionOptions) error {
+ resp, err := c.do("POST", "/networks/"+id+"/connect", doOptions{data: opts})
+ if err != nil {
+ if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
+ return &NoSuchNetworkOrContainer{NetworkID: id, ContainerID: opts.Container}
+ }
+ return err
+ }
+ resp.Body.Close()
+ return nil
+}
+
+// DisconnectNetwork removes a container from a network or returns an error in
+// case of failure.
+//
+// See https://goo.gl/6GugX3 for more details.
+func (c *Client) DisconnectNetwork(id string, opts NetworkConnectionOptions) error {
+ resp, err := c.do("POST", "/networks/"+id+"/disconnect", doOptions{data: opts})
+ if err != nil {
+ if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
+ return &NoSuchNetworkOrContainer{NetworkID: id, ContainerID: opts.Container}
+ }
+ return err
+ }
+ resp.Body.Close()
+ return nil
+}
+
+// NoSuchNetwork is the error returned when a given network does not exist.
+type NoSuchNetwork struct {
+ ID string
+}
+
+func (err *NoSuchNetwork) Error() string {
+ return fmt.Sprintf("No such network: %s", err.ID)
+}
+
+// NoSuchNetworkOrContainer is the error returned when a given network or
+// container does not exist.
+type NoSuchNetworkOrContainer struct {
+ NetworkID string
+ ContainerID string
+}
+
+func (err *NoSuchNetworkOrContainer) Error() string {
+ return fmt.Sprintf("No such network (%s) or container (%s)", err.NetworkID, err.ContainerID)
+}
diff --git a/vendor/github.com/fsouza/go-dockerclient/signal.go b/vendor/github.com/fsouza/go-dockerclient/signal.go
new file mode 100644
index 0000000000..16aa00388f
--- /dev/null
+++ b/vendor/github.com/fsouza/go-dockerclient/signal.go
@@ -0,0 +1,49 @@
+// Copyright 2014 go-dockerclient authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package docker
+
+// Signal represents a signal that can be send to the container on
+// KillContainer call.
+type Signal int
+
+// These values represent all signals available on Linux, where containers will
+// be running.
+const (
+ SIGABRT = Signal(0x6)
+ SIGALRM = Signal(0xe)
+ SIGBUS = Signal(0x7)
+ SIGCHLD = Signal(0x11)
+ SIGCLD = Signal(0x11)
+ SIGCONT = Signal(0x12)
+ SIGFPE = Signal(0x8)
+ SIGHUP = Signal(0x1)
+ SIGILL = Signal(0x4)
+ SIGINT = Signal(0x2)
+ SIGIO = Signal(0x1d)
+ SIGIOT = Signal(0x6)
+ SIGKILL = Signal(0x9)
+ SIGPIPE = Signal(0xd)
+ SIGPOLL = Signal(0x1d)
+ SIGPROF = Signal(0x1b)
+ SIGPWR = Signal(0x1e)
+ SIGQUIT = Signal(0x3)
+ SIGSEGV = Signal(0xb)
+ SIGSTKFLT = Signal(0x10)
+ SIGSTOP = Signal(0x13)
+ SIGSYS = Signal(0x1f)
+ SIGTERM = Signal(0xf)
+ SIGTRAP = Signal(0x5)
+ SIGTSTP = Signal(0x14)
+ SIGTTIN = Signal(0x15)
+ SIGTTOU = Signal(0x16)
+ SIGUNUSED = Signal(0x1f)
+ SIGURG = Signal(0x17)
+ SIGUSR1 = Signal(0xa)
+ SIGUSR2 = Signal(0xc)
+ SIGVTALRM = Signal(0x1a)
+ SIGWINCH = Signal(0x1c)
+ SIGXCPU = Signal(0x18)
+ SIGXFSZ = Signal(0x19)
+)
diff --git a/vendor/github.com/fsouza/go-dockerclient/tar.go b/vendor/github.com/fsouza/go-dockerclient/tar.go
new file mode 100644
index 0000000000..11e51b496a
--- /dev/null
+++ b/vendor/github.com/fsouza/go-dockerclient/tar.go
@@ -0,0 +1,117 @@
+// Copyright 2015 go-dockerclient authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package docker
+
+import (
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "path"
+ "path/filepath"
+ "strings"
+
+ "github.com/docker/docker/pkg/archive"
+ "github.com/docker/docker/pkg/fileutils"
+)
+
+func createTarStream(srcPath, dockerfilePath string) (io.ReadCloser, error) {
+ excludes, err := parseDockerignore(srcPath)
+ if err != nil {
+ return nil, err
+ }
+
+ includes := []string{"."}
+
+ // If .dockerignore mentions .dockerignore or the Dockerfile
+ // then make sure we send both files over to the daemon
+ // because Dockerfile is, obviously, needed no matter what, and
+ // .dockerignore is needed to know if either one needs to be
+ // removed. The deamon will remove them for us, if needed, after it
+ // parses the Dockerfile.
+ //
+ // https://github.com/docker/docker/issues/8330
+ //
+ forceIncludeFiles := []string{".dockerignore", dockerfilePath}
+
+ for _, includeFile := range forceIncludeFiles {
+ if includeFile == "" {
+ continue
+ }
+ keepThem, err := fileutils.Matches(includeFile, excludes)
+ if err != nil {
+ return nil, fmt.Errorf("cannot match .dockerfile: '%s', error: %s", includeFile, err)
+ }
+ if keepThem {
+ includes = append(includes, includeFile)
+ }
+ }
+
+ if err := validateContextDirectory(srcPath, excludes); err != nil {
+ return nil, err
+ }
+ tarOpts := &archive.TarOptions{
+ ExcludePatterns: excludes,
+ IncludeFiles: includes,
+ Compression: archive.Uncompressed,
+ NoLchown: true,
+ }
+ return archive.TarWithOptions(srcPath, tarOpts)
+}
+
+// validateContextDirectory checks if all the contents of the directory
+// can be read and returns an error if some files can't be read.
+// Symlinks which point to non-existing files don't trigger an error
+func validateContextDirectory(srcPath string, excludes []string) error {
+ return filepath.Walk(filepath.Join(srcPath, "."), func(filePath string, f os.FileInfo, err error) error {
+ // skip this directory/file if it's not in the path, it won't get added to the context
+ if relFilePath, err := filepath.Rel(srcPath, filePath); err != nil {
+ return err
+ } else if skip, err := fileutils.Matches(relFilePath, excludes); err != nil {
+ return err
+ } else if skip {
+ if f.IsDir() {
+ return filepath.SkipDir
+ }
+ return nil
+ }
+
+ if err != nil {
+ if os.IsPermission(err) {
+ return fmt.Errorf("can't stat '%s'", filePath)
+ }
+ if os.IsNotExist(err) {
+ return nil
+ }
+ return err
+ }
+
+ // skip checking if symlinks point to non-existing files, such symlinks can be useful
+ // also skip named pipes, because they hanging on open
+ if f.Mode()&(os.ModeSymlink|os.ModeNamedPipe) != 0 {
+ return nil
+ }
+
+ if !f.IsDir() {
+ currentFile, err := os.Open(filePath)
+ if err != nil && os.IsPermission(err) {
+ return fmt.Errorf("no permission to read from '%s'", filePath)
+ }
+ currentFile.Close()
+ }
+ return nil
+ })
+}
+
+func parseDockerignore(root string) ([]string, error) {
+ var excludes []string
+ ignore, err := ioutil.ReadFile(path.Join(root, ".dockerignore"))
+ if err != nil && !os.IsNotExist(err) {
+ return excludes, fmt.Errorf("error reading .dockerignore: '%s'", err)
+ }
+ excludes = strings.Split(string(ignore), "\n")
+
+ return excludes, nil
+}
diff --git a/vendor/github.com/fsouza/go-dockerclient/tls.go b/vendor/github.com/fsouza/go-dockerclient/tls.go
new file mode 100644
index 0000000000..bb5790b5f0
--- /dev/null
+++ b/vendor/github.com/fsouza/go-dockerclient/tls.go
@@ -0,0 +1,118 @@
+// Copyright 2014 go-dockerclient authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+//
+// The content is borrowed from Docker's own source code to provide a simple
+// tls based dialer
+
+package docker
+
+import (
+ "crypto/tls"
+ "errors"
+ "net"
+ "strings"
+ "time"
+)
+
+type tlsClientCon struct {
+ *tls.Conn
+ rawConn net.Conn
+}
+
+func (c *tlsClientCon) CloseWrite() error {
+ // Go standard tls.Conn doesn't provide the CloseWrite() method so we do it
+ // on its underlying connection.
+ if cwc, ok := c.rawConn.(interface {
+ CloseWrite() error
+ }); ok {
+ return cwc.CloseWrite()
+ }
+ return nil
+}
+
+func tlsDialWithDialer(dialer *net.Dialer, network, addr string, config *tls.Config) (net.Conn, error) {
+ // We want the Timeout and Deadline values from dialer to cover the
+ // whole process: TCP connection and TLS handshake. This means that we
+ // also need to start our own timers now.
+ timeout := dialer.Timeout
+
+ if !dialer.Deadline.IsZero() {
+ deadlineTimeout := dialer.Deadline.Sub(time.Now())
+ if timeout == 0 || deadlineTimeout < timeout {
+ timeout = deadlineTimeout
+ }
+ }
+
+ var errChannel chan error
+
+ if timeout != 0 {
+ errChannel = make(chan error, 2)
+ time.AfterFunc(timeout, func() {
+ errChannel <- errors.New("")
+ })
+ }
+
+ rawConn, err := dialer.Dial(network, addr)
+ if err != nil {
+ return nil, err
+ }
+
+ colonPos := strings.LastIndex(addr, ":")
+ if colonPos == -1 {
+ colonPos = len(addr)
+ }
+ hostname := addr[:colonPos]
+
+ // If no ServerName is set, infer the ServerName
+ // from the hostname we're connecting to.
+ if config.ServerName == "" {
+ // Make a copy to avoid polluting argument or default.
+ config = copyTLSConfig(config)
+ config.ServerName = hostname
+ }
+
+ conn := tls.Client(rawConn, config)
+
+ if timeout == 0 {
+ err = conn.Handshake()
+ } else {
+ go func() {
+ errChannel <- conn.Handshake()
+ }()
+
+ err = <-errChannel
+ }
+
+ if err != nil {
+ rawConn.Close()
+ return nil, err
+ }
+
+ // This is Docker difference with standard's crypto/tls package: returned a
+ // wrapper which holds both the TLS and raw connections.
+ return &tlsClientCon{conn, rawConn}, nil
+}
+
+// this exists to silent an error message in go vet
+func copyTLSConfig(cfg *tls.Config) *tls.Config {
+ return &tls.Config{
+ Certificates: cfg.Certificates,
+ CipherSuites: cfg.CipherSuites,
+ ClientAuth: cfg.ClientAuth,
+ ClientCAs: cfg.ClientCAs,
+ ClientSessionCache: cfg.ClientSessionCache,
+ CurvePreferences: cfg.CurvePreferences,
+ InsecureSkipVerify: cfg.InsecureSkipVerify,
+ MaxVersion: cfg.MaxVersion,
+ MinVersion: cfg.MinVersion,
+ NameToCertificate: cfg.NameToCertificate,
+ NextProtos: cfg.NextProtos,
+ PreferServerCipherSuites: cfg.PreferServerCipherSuites,
+ Rand: cfg.Rand,
+ RootCAs: cfg.RootCAs,
+ ServerName: cfg.ServerName,
+ SessionTicketKey: cfg.SessionTicketKey,
+ SessionTicketsDisabled: cfg.SessionTicketsDisabled,
+ }
+}
diff --git a/vendor/github.com/fsouza/go-dockerclient/volume.go b/vendor/github.com/fsouza/go-dockerclient/volume.go
new file mode 100644
index 0000000000..5fe8ee3d63
--- /dev/null
+++ b/vendor/github.com/fsouza/go-dockerclient/volume.go
@@ -0,0 +1,128 @@
+// Copyright 2015 go-dockerclient authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package docker
+
+import (
+ "encoding/json"
+ "errors"
+ "net/http"
+)
+
+var (
+ // ErrNoSuchVolume is the error returned when the volume does not exist.
+ ErrNoSuchVolume = errors.New("no such volume")
+
+ // ErrVolumeInUse is the error returned when the volume requested to be removed is still in use.
+ ErrVolumeInUse = errors.New("volume in use and cannot be removed")
+)
+
+// Volume represents a volume.
+//
+// See https://goo.gl/FZA4BK for more details.
+type Volume struct {
+ Name string `json:"Name" yaml:"Name"`
+ Driver string `json:"Driver,omitempty" yaml:"Driver,omitempty"`
+ Mountpoint string `json:"Mountpoint,omitempty" yaml:"Mountpoint,omitempty"`
+ Labels map[string]string `json:"Labels,omitempty" yaml:"Labels,omitempty"`
+}
+
+// ListVolumesOptions specify parameters to the ListVolumes function.
+//
+// See https://goo.gl/FZA4BK for more details.
+type ListVolumesOptions struct {
+ Filters map[string][]string
+}
+
+// ListVolumes returns a list of available volumes in the server.
+//
+// See https://goo.gl/FZA4BK for more details.
+func (c *Client) ListVolumes(opts ListVolumesOptions) ([]Volume, error) {
+ resp, err := c.do("GET", "/volumes?"+queryString(opts), doOptions{})
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+ m := make(map[string]interface{})
+ if err := json.NewDecoder(resp.Body).Decode(&m); err != nil {
+ return nil, err
+ }
+ var volumes []Volume
+ volumesJSON, ok := m["Volumes"]
+ if !ok {
+ return volumes, nil
+ }
+ data, err := json.Marshal(volumesJSON)
+ if err != nil {
+ return nil, err
+ }
+ if err := json.Unmarshal(data, &volumes); err != nil {
+ return nil, err
+ }
+ return volumes, nil
+}
+
+// CreateVolumeOptions specify parameters to the CreateVolume function.
+//
+// See https://goo.gl/pBUbZ9 for more details.
+type CreateVolumeOptions struct {
+ Name string
+ Driver string
+ DriverOpts map[string]string
+}
+
+// CreateVolume creates a volume on the server.
+//
+// See https://goo.gl/pBUbZ9 for more details.
+func (c *Client) CreateVolume(opts CreateVolumeOptions) (*Volume, error) {
+ resp, err := c.do("POST", "/volumes/create", doOptions{data: opts})
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+ var volume Volume
+ if err := json.NewDecoder(resp.Body).Decode(&volume); err != nil {
+ return nil, err
+ }
+ return &volume, nil
+}
+
+// InspectVolume returns a volume by its name.
+//
+// See https://goo.gl/0g9A6i for more details.
+func (c *Client) InspectVolume(name string) (*Volume, error) {
+ resp, err := c.do("GET", "/volumes/"+name, doOptions{})
+ if err != nil {
+ if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
+ return nil, ErrNoSuchVolume
+ }
+ return nil, err
+ }
+ defer resp.Body.Close()
+ var volume Volume
+ if err := json.NewDecoder(resp.Body).Decode(&volume); err != nil {
+ return nil, err
+ }
+ return &volume, nil
+}
+
+// RemoveVolume removes a volume by its name.
+//
+// See https://goo.gl/79GNQz for more details.
+func (c *Client) RemoveVolume(name string) error {
+ resp, err := c.do("DELETE", "/volumes/"+name, doOptions{})
+ if err != nil {
+ if e, ok := err.(*Error); ok {
+ if e.Status == http.StatusNotFound {
+ return ErrNoSuchVolume
+ }
+ if e.Status == http.StatusConflict {
+ return ErrVolumeInUse
+ }
+ }
+ return nil
+ }
+ defer resp.Body.Close()
+ return nil
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/AUTHORS b/vendor/github.com/go-sql-driver/mysql/AUTHORS
new file mode 100644
index 0000000000..466ac86abc
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/AUTHORS
@@ -0,0 +1,54 @@
+# This is the official list of Go-MySQL-Driver authors for copyright purposes.
+
+# If you are submitting a patch, please add your name or the name of the
+# organization which holds the copyright to this list in alphabetical order.
+
+# Names should be added to this file as
+# Name
+# The email address is not required for organizations.
+# Please keep the list sorted.
+
+
+# Individual Persons
+
+Aaron Hopkins
+Arne Hormann
+Carlos Nieto
+Chris Moos
+Daniel Nichter
+Daniël van Eeden
+DisposaBoy
+Frederick Mayle
+Gustavo Kristic
+Hanno Braun
+Henri Yandell
+Hirotaka Yamamoto
+INADA Naoki
+James Harr
+Jian Zhen
+Joshua Prunier
+Julien Lefevre
+Julien Schmidt
+Kamil Dziedzic
+Kevin Malachowski
+Lennart Rudolph
+Leonardo YongUk Kim
+Luca Looz
+Lucas Liu
+Luke Scott
+Michael Woolnough
+Nicola Peduzzi
+Paul Bonser
+Runrioter Wung
+Soroush Pour
+Stan Putrya
+Stanley Gunawan
+Xiaobing Jiang
+Xiuming Chen
+Zhenye Xie
+
+# Organizations
+
+Barracuda Networks, Inc.
+Google Inc.
+Stripe Inc.
diff --git a/vendor/github.com/go-sql-driver/mysql/CHANGELOG.md b/vendor/github.com/go-sql-driver/mysql/CHANGELOG.md
new file mode 100644
index 0000000000..617ad80fc5
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/CHANGELOG.md
@@ -0,0 +1,114 @@
+## HEAD
+
+Changes:
+
+ - Go 1.1 is no longer supported
+ - Use decimals fields in MySQL to format time types (#249)
+ - Buffer optimizations (#269)
+ - TLS ServerName defaults to the host (#283)
+ - Refactoring (#400, #410, #437)
+ - Adjusted documentation for second generation CloudSQL (#485)
+
+New Features:
+
+ - Enable microsecond resolution on TIME, DATETIME and TIMESTAMP (#249)
+ - Support for returning table alias on Columns() (#289, #359, #382)
+ - Placeholder interpolation, can be actived with the DSN parameter `interpolateParams=true` (#309, #318, #490)
+ - Support for uint64 parameters with high bit set (#332, #345)
+ - Cleartext authentication plugin support (#327)
+ - Exported ParseDSN function and the Config struct (#403, #419, #429)
+ - Read / Write timeouts (#401)
+ - Support for JSON field type (#414)
+ - Support for multi-statements and multi-results (#411, #431)
+ - DSN parameter to set the driver-side max_allowed_packet value manually (#489)
+
+Bugfixes:
+
+ - Fixed handling of queries without columns and rows (#255)
+ - Fixed a panic when SetKeepAlive() failed (#298)
+ - Handle ERR packets while reading rows (#321)
+ - Fixed reading NULL length-encoded integers in MySQL 5.6+ (#349)
+ - Fixed absolute paths support in LOAD LOCAL DATA INFILE (#356)
+ - Actually zero out bytes in handshake response (#378)
+ - Fixed race condition in registering LOAD DATA INFILE handler (#383)
+ - Fixed tests with MySQL 5.7.9+ (#380)
+ - QueryUnescape TLS config names (#397)
+ - Fixed "broken pipe" error by writing to closed socket (#390)
+ - Fixed LOAD LOCAL DATA INFILE buffering (#424)
+ - Fixed parsing of floats into float64 when placeholders are used (#434)
+ - Fixed DSN tests with Go 1.7+ (#459)
+ - Handle ERR packets while waiting for EOF (#473)
+
+
+## Version 1.2 (2014-06-03)
+
+Changes:
+
+ - We switched back to a "rolling release". `go get` installs the current master branch again
+ - Version v1 of the driver will not be maintained anymore. Go 1.0 is no longer supported by this driver
+ - Exported errors to allow easy checking from application code
+ - Enabled TCP Keepalives on TCP connections
+ - Optimized INFILE handling (better buffer size calculation, lazy init, ...)
+ - The DSN parser also checks for a missing separating slash
+ - Faster binary date / datetime to string formatting
+ - Also exported the MySQLWarning type
+ - mysqlConn.Close returns the first error encountered instead of ignoring all errors
+ - writePacket() automatically writes the packet size to the header
+ - readPacket() uses an iterative approach instead of the recursive approach to merge splitted packets
+
+New Features:
+
+ - `RegisterDial` allows the usage of a custom dial function to establish the network connection
+ - Setting the connection collation is possible with the `collation` DSN parameter. This parameter should be preferred over the `charset` parameter
+ - Logging of critical errors is configurable with `SetLogger`
+ - Google CloudSQL support
+
+Bugfixes:
+
+ - Allow more than 32 parameters in prepared statements
+ - Various old_password fixes
+ - Fixed TestConcurrent test to pass Go's race detection
+ - Fixed appendLengthEncodedInteger for large numbers
+ - Renamed readLengthEnodedString to readLengthEncodedString and skipLengthEnodedString to skipLengthEncodedString (fixed typo)
+
+
+## Version 1.1 (2013-11-02)
+
+Changes:
+
+ - Go-MySQL-Driver now requires Go 1.1
+ - Connections now use the collation `utf8_general_ci` by default. Adding `&charset=UTF8` to the DSN should not be necessary anymore
+ - Made closing rows and connections error tolerant. This allows for example deferring rows.Close() without checking for errors
+ - `[]byte(nil)` is now treated as a NULL value. Before, it was treated like an empty string / `[]byte("")`
+ - DSN parameter values must now be url.QueryEscape'ed. This allows text values to contain special characters, such as '&'.
+ - Use the IO buffer also for writing. This results in zero allocations (by the driver) for most queries
+ - Optimized the buffer for reading
+ - stmt.Query now caches column metadata
+ - New Logo
+ - Changed the copyright header to include all contributors
+ - Improved the LOAD INFILE documentation
+ - The driver struct is now exported to make the driver directly accessible
+ - Refactored the driver tests
+ - Added more benchmarks and moved all to a separate file
+ - Other small refactoring
+
+New Features:
+
+ - Added *old_passwords* support: Required in some cases, but must be enabled by adding `allowOldPasswords=true` to the DSN since it is insecure
+ - Added a `clientFoundRows` parameter: Return the number of matching rows instead of the number of rows changed on UPDATEs
+ - Added TLS/SSL support: Use a TLS/SSL encrypted connection to the server. Custom TLS configs can be registered and used
+
+Bugfixes:
+
+ - Fixed MySQL 4.1 support: MySQL 4.1 sends packets with lengths which differ from the specification
+ - Convert to DB timezone when inserting `time.Time`
+ - Splitted packets (more than 16MB) are now merged correctly
+ - Fixed false positive `io.EOF` errors when the data was fully read
+ - Avoid panics on reuse of closed connections
+ - Fixed empty string producing false nil values
+ - Fixed sign byte for positive TIME fields
+
+
+## Version 1.0 (2013-05-14)
+
+Initial Release
diff --git a/vendor/github.com/go-sql-driver/mysql/CONTRIBUTING.md b/vendor/github.com/go-sql-driver/mysql/CONTRIBUTING.md
new file mode 100644
index 0000000000..8fe16bcb49
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/CONTRIBUTING.md
@@ -0,0 +1,23 @@
+# Contributing Guidelines
+
+## Reporting Issues
+
+Before creating a new Issue, please check first if a similar Issue [already exists](https://github.com/go-sql-driver/mysql/issues?state=open) or was [recently closed](https://github.com/go-sql-driver/mysql/issues?direction=desc&page=1&sort=updated&state=closed).
+
+## Contributing Code
+
+By contributing to this project, you share your code under the Mozilla Public License 2, as specified in the LICENSE file.
+Don't forget to add yourself to the AUTHORS file.
+
+### Code Review
+
+Everyone is invited to review and comment on pull requests.
+If it looks fine to you, comment with "LGTM" (Looks good to me).
+
+If changes are required, notice the reviewers with "PTAL" (Please take another look) after committing the fixes.
+
+Before merging the Pull Request, at least one [team member](https://github.com/go-sql-driver?tab=members) must have commented with "LGTM".
+
+## Development Ideas
+
+If you are looking for ideas for code contributions, please check our [Development Ideas](https://github.com/go-sql-driver/mysql/wiki/Development-Ideas) Wiki page.
diff --git a/vendor/github.com/go-sql-driver/mysql/ISSUE_TEMPLATE.md b/vendor/github.com/go-sql-driver/mysql/ISSUE_TEMPLATE.md
new file mode 100644
index 0000000000..d9771f1ddc
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/ISSUE_TEMPLATE.md
@@ -0,0 +1,21 @@
+### Issue description
+Tell us what should happen and what happens instead
+
+### Example code
+```go
+If possible, please enter some example code here to reproduce the issue.
+```
+
+### Error log
+```
+If you have an error log, please paste it here.
+```
+
+### Configuration
+*Driver version (or git SHA):*
+
+*Go version:* run `go version` in your console
+
+*Server version:* E.g. MySQL 5.6, MariaDB 10.0.20
+
+*Server OS:* E.g. Debian 8.1 (Jessie), Windows 10
diff --git a/vendor/github.com/go-sql-driver/mysql/LICENSE b/vendor/github.com/go-sql-driver/mysql/LICENSE
new file mode 100644
index 0000000000..14e2f777f6
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/LICENSE
@@ -0,0 +1,373 @@
+Mozilla Public License Version 2.0
+==================================
+
+1. Definitions
+--------------
+
+1.1. "Contributor"
+ means each individual or legal entity that creates, contributes to
+ the creation of, or owns Covered Software.
+
+1.2. "Contributor Version"
+ means the combination of the Contributions of others (if any) used
+ by a Contributor and that particular Contributor's Contribution.
+
+1.3. "Contribution"
+ means Covered Software of a particular Contributor.
+
+1.4. "Covered Software"
+ means Source Code Form to which the initial Contributor has attached
+ the notice in Exhibit A, the Executable Form of such Source Code
+ Form, and Modifications of such Source Code Form, in each case
+ including portions thereof.
+
+1.5. "Incompatible With Secondary Licenses"
+ means
+
+ (a) that the initial Contributor has attached the notice described
+ in Exhibit B to the Covered Software; or
+
+ (b) that the Covered Software was made available under the terms of
+ version 1.1 or earlier of the License, but not also under the
+ terms of a Secondary License.
+
+1.6. "Executable Form"
+ means any form of the work other than Source Code Form.
+
+1.7. "Larger Work"
+ means a work that combines Covered Software with other material, in
+ a separate file or files, that is not Covered Software.
+
+1.8. "License"
+ means this document.
+
+1.9. "Licensable"
+ means having the right to grant, to the maximum extent possible,
+ whether at the time of the initial grant or subsequently, any and
+ all of the rights conveyed by this License.
+
+1.10. "Modifications"
+ means any of the following:
+
+ (a) any file in Source Code Form that results from an addition to,
+ deletion from, or modification of the contents of Covered
+ Software; or
+
+ (b) any new file in Source Code Form that contains any Covered
+ Software.
+
+1.11. "Patent Claims" of a Contributor
+ means any patent claim(s), including without limitation, method,
+ process, and apparatus claims, in any patent Licensable by such
+ Contributor that would be infringed, but for the grant of the
+ License, by the making, using, selling, offering for sale, having
+ made, import, or transfer of either its Contributions or its
+ Contributor Version.
+
+1.12. "Secondary License"
+ means either the GNU General Public License, Version 2.0, the GNU
+ Lesser General Public License, Version 2.1, the GNU Affero General
+ Public License, Version 3.0, or any later versions of those
+ licenses.
+
+1.13. "Source Code Form"
+ means the form of the work preferred for making modifications.
+
+1.14. "You" (or "Your")
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, "You" includes any entity that
+ controls, is controlled by, or is under common control with You. For
+ purposes of this definition, "control" means (a) the power, direct
+ or indirect, to cause the direction or management of such entity,
+ whether by contract or otherwise, or (b) ownership of more than
+ fifty percent (50%) of the outstanding shares or beneficial
+ ownership of such entity.
+
+2. License Grants and Conditions
+--------------------------------
+
+2.1. Grants
+
+Each Contributor hereby grants You a world-wide, royalty-free,
+non-exclusive license:
+
+(a) under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or
+ as part of a Larger Work; and
+
+(b) under Patent Claims of such Contributor to make, use, sell, offer
+ for sale, have made, import, and otherwise transfer either its
+ Contributions or its Contributor Version.
+
+2.2. Effective Date
+
+The licenses granted in Section 2.1 with respect to any Contribution
+become effective for each Contribution on the date the Contributor first
+distributes such Contribution.
+
+2.3. Limitations on Grant Scope
+
+The licenses granted in this Section 2 are the only rights granted under
+this License. No additional rights or licenses will be implied from the
+distribution or licensing of Covered Software under this License.
+Notwithstanding Section 2.1(b) above, no patent license is granted by a
+Contributor:
+
+(a) for any code that a Contributor has removed from Covered Software;
+ or
+
+(b) for infringements caused by: (i) Your and any other third party's
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+(c) under Patent Claims infringed by Covered Software in the absence of
+ its Contributions.
+
+This License does not grant any rights in the trademarks, service marks,
+or logos of any Contributor (except as may be necessary to comply with
+the notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+No Contributor makes additional grants as a result of Your choice to
+distribute the Covered Software under a subsequent version of this
+License (see Section 10.2) or under the terms of a Secondary License (if
+permitted under the terms of Section 3.3).
+
+2.5. Representation
+
+Each Contributor represents that the Contributor believes its
+Contributions are its original creation(s) or it has sufficient rights
+to grant the rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+This License is not intended to limit any rights You have under
+applicable copyright doctrines of fair use, fair dealing, or other
+equivalents.
+
+2.7. Conditions
+
+Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted
+in Section 2.1.
+
+3. Responsibilities
+-------------------
+
+3.1. Distribution of Source Form
+
+All distribution of Covered Software in Source Code Form, including any
+Modifications that You create or to which You contribute, must be under
+the terms of this License. You must inform recipients that the Source
+Code Form of the Covered Software is governed by the terms of this
+License, and how they can obtain a copy of this License. You may not
+attempt to alter or restrict the recipients' rights in the Source Code
+Form.
+
+3.2. Distribution of Executable Form
+
+If You distribute Covered Software in Executable Form then:
+
+(a) such Covered Software must also be made available in Source Code
+ Form, as described in Section 3.1, and You must inform recipients of
+ the Executable Form how they can obtain a copy of such Source Code
+ Form by reasonable means in a timely manner, at a charge no more
+ than the cost of distribution to the recipient; and
+
+(b) You may distribute such Executable Form under the terms of this
+ License, or sublicense it under different terms, provided that the
+ license for the Executable Form does not attempt to limit or alter
+ the recipients' rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+You may create and distribute a Larger Work under terms of Your choice,
+provided that You also comply with the requirements of this License for
+the Covered Software. If the Larger Work is a combination of Covered
+Software with a work governed by one or more Secondary Licenses, and the
+Covered Software is not Incompatible With Secondary Licenses, this
+License permits You to additionally distribute such Covered Software
+under the terms of such Secondary License(s), so that the recipient of
+the Larger Work may, at their option, further distribute the Covered
+Software under the terms of either this License or such Secondary
+License(s).
+
+3.4. Notices
+
+You may not remove or alter the substance of any license notices
+(including copyright notices, patent notices, disclaimers of warranty,
+or limitations of liability) contained within the Source Code Form of
+the Covered Software, except that You may alter any license notices to
+the extent required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+You may choose to offer, and to charge a fee for, warranty, support,
+indemnity or liability obligations to one or more recipients of Covered
+Software. However, You may do so only on Your own behalf, and not on
+behalf of any Contributor. You must make it absolutely clear that any
+such warranty, support, indemnity, or liability obligation is offered by
+You alone, and You hereby agree to indemnify every Contributor for any
+liability incurred by such Contributor as a result of warranty, support,
+indemnity or liability terms You offer. You may include additional
+disclaimers of warranty and limitations of liability specific to any
+jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+---------------------------------------------------
+
+If it is impossible for You to comply with any of the terms of this
+License with respect to some or all of the Covered Software due to
+statute, judicial order, or regulation then You must: (a) comply with
+the terms of this License to the maximum extent possible; and (b)
+describe the limitations and the code they affect. Such description must
+be placed in a text file included with all distributions of the Covered
+Software under this License. Except to the extent prohibited by statute
+or regulation, such description must be sufficiently detailed for a
+recipient of ordinary skill to be able to understand it.
+
+5. Termination
+--------------
+
+5.1. The rights granted under this License will terminate automatically
+if You fail to comply with any of its terms. However, if You become
+compliant, then the rights granted under this License from a particular
+Contributor are reinstated (a) provisionally, unless and until such
+Contributor explicitly and finally terminates Your grants, and (b) on an
+ongoing basis, if such Contributor fails to notify You of the
+non-compliance by some reasonable means prior to 60 days after You have
+come back into compliance. Moreover, Your grants from a particular
+Contributor are reinstated on an ongoing basis if such Contributor
+notifies You of the non-compliance by some reasonable means, this is the
+first time You have received notice of non-compliance with this License
+from such Contributor, and You become compliant prior to 30 days after
+Your receipt of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+infringement claim (excluding declaratory judgment actions,
+counter-claims, and cross-claims) alleging that a Contributor Version
+directly or indirectly infringes any patent, then the rights granted to
+You by any and all Contributors for the Covered Software under Section
+2.1 of this License shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all
+end user license agreements (excluding distributors and resellers) which
+have been validly granted by You or Your distributors under this License
+prior to termination shall survive termination.
+
+************************************************************************
+* *
+* 6. Disclaimer of Warranty *
+* ------------------------- *
+* *
+* Covered Software is provided under this License on an "as is" *
+* basis, without warranty of any kind, either expressed, implied, or *
+* statutory, including, without limitation, warranties that the *
+* Covered Software is free of defects, merchantable, fit for a *
+* particular purpose or non-infringing. The entire risk as to the *
+* quality and performance of the Covered Software is with You. *
+* Should any Covered Software prove defective in any respect, You *
+* (not any Contributor) assume the cost of any necessary servicing, *
+* repair, or correction. This disclaimer of warranty constitutes an *
+* essential part of this License. No use of any Covered Software is *
+* authorized under this License except under this disclaimer. *
+* *
+************************************************************************
+
+************************************************************************
+* *
+* 7. Limitation of Liability *
+* -------------------------- *
+* *
+* Under no circumstances and under no legal theory, whether tort *
+* (including negligence), contract, or otherwise, shall any *
+* Contributor, or anyone who distributes Covered Software as *
+* permitted above, be liable to You for any direct, indirect, *
+* special, incidental, or consequential damages of any character *
+* including, without limitation, damages for lost profits, loss of *
+* goodwill, work stoppage, computer failure or malfunction, or any *
+* and all other commercial damages or losses, even if such party *
+* shall have been informed of the possibility of such damages. This *
+* limitation of liability shall not apply to liability for death or *
+* personal injury resulting from such party's negligence to the *
+* extent applicable law prohibits such limitation. Some *
+* jurisdictions do not allow the exclusion or limitation of *
+* incidental or consequential damages, so this exclusion and *
+* limitation may not apply to You. *
+* *
+************************************************************************
+
+8. Litigation
+-------------
+
+Any litigation relating to this License may be brought only in the
+courts of a jurisdiction where the defendant maintains its principal
+place of business and such litigation shall be governed by laws of that
+jurisdiction, without reference to its conflict-of-law provisions.
+Nothing in this Section shall prevent a party's ability to bring
+cross-claims or counter-claims.
+
+9. Miscellaneous
+----------------
+
+This License represents the complete agreement concerning the subject
+matter hereof. If any provision of this License is held to be
+unenforceable, such provision shall be reformed only to the extent
+necessary to make it enforceable. Any law or regulation which provides
+that the language of a contract shall be construed against the drafter
+shall not be used to construe this License against a Contributor.
+
+10. Versions of the License
+---------------------------
+
+10.1. New Versions
+
+Mozilla Foundation is the license steward. Except as provided in Section
+10.3, no one other than the license steward has the right to modify or
+publish new versions of this License. Each version will be given a
+distinguishing version number.
+
+10.2. Effect of New Versions
+
+You may distribute the Covered Software under the terms of the version
+of the License under which You originally received the Covered Software,
+or under the terms of any subsequent version published by the license
+steward.
+
+10.3. Modified Versions
+
+If you create software not governed by this License, and you want to
+create a new license for such software, you may create and use a
+modified version of this License if you rename the license and remove
+any references to the name of the license steward (except to note that
+such modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary
+Licenses
+
+If You choose to distribute Source Code Form that is Incompatible With
+Secondary Licenses under the terms of this version of the License, the
+notice described in Exhibit B of this License must be attached.
+
+Exhibit A - Source Code Form License Notice
+-------------------------------------------
+
+ This Source Code Form is subject to the terms of the Mozilla Public
+ License, v. 2.0. If a copy of the MPL was not distributed with this
+ file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular
+file, then You may include the notice in a location (such as a LICENSE
+file in a relevant directory) where a recipient would be likely to look
+for such a notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - "Incompatible With Secondary Licenses" Notice
+---------------------------------------------------------
+
+ This Source Code Form is "Incompatible With Secondary Licenses", as
+ defined by the Mozilla Public License, v. 2.0.
diff --git a/vendor/github.com/go-sql-driver/mysql/PULL_REQUEST_TEMPLATE.md b/vendor/github.com/go-sql-driver/mysql/PULL_REQUEST_TEMPLATE.md
new file mode 100644
index 0000000000..6f5c7ebeb7
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/PULL_REQUEST_TEMPLATE.md
@@ -0,0 +1,9 @@
+### Description
+Please explain the changes you made here.
+
+### Checklist
+- [ ] Code compiles correctly
+- [ ] Created tests which fail without the change (if possible)
+- [ ] All tests passing
+- [ ] Extended the README / documentation, if necessary
+- [ ] Added myself / the copyright holder to the AUTHORS file
diff --git a/vendor/github.com/go-sql-driver/mysql/README.md b/vendor/github.com/go-sql-driver/mysql/README.md
new file mode 100644
index 0000000000..a110cf1d31
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/README.md
@@ -0,0 +1,430 @@
+# Go-MySQL-Driver
+
+A MySQL-Driver for Go's [database/sql](http://golang.org/pkg/database/sql) package
+
+![Go-MySQL-Driver logo](https://raw.github.com/wiki/go-sql-driver/mysql/gomysql_m.png "Golang Gopher holding the MySQL Dolphin")
+
+**Latest stable Release:** [Version 1.2 (June 03, 2014)](https://github.com/go-sql-driver/mysql/releases)
+
+[![Build Status](https://travis-ci.org/go-sql-driver/mysql.png?branch=master)](https://travis-ci.org/go-sql-driver/mysql)
+
+---------------------------------------
+ * [Features](#features)
+ * [Requirements](#requirements)
+ * [Installation](#installation)
+ * [Usage](#usage)
+ * [DSN (Data Source Name)](#dsn-data-source-name)
+ * [Password](#password)
+ * [Protocol](#protocol)
+ * [Address](#address)
+ * [Parameters](#parameters)
+ * [Examples](#examples)
+ * [LOAD DATA LOCAL INFILE support](#load-data-local-infile-support)
+ * [time.Time support](#timetime-support)
+ * [Unicode support](#unicode-support)
+ * [Testing / Development](#testing--development)
+ * [License](#license)
+
+---------------------------------------
+
+## Features
+ * Lightweight and [fast](https://github.com/go-sql-driver/sql-benchmark "golang MySQL-Driver performance")
+ * Native Go implementation. No C-bindings, just pure Go
+ * Connections over TCP/IPv4, TCP/IPv6, Unix domain sockets or [custom protocols](http://godoc.org/github.com/go-sql-driver/mysql#DialFunc)
+ * Automatic handling of broken connections
+ * Automatic Connection Pooling *(by database/sql package)*
+ * Supports queries larger than 16MB
+ * Full [`sql.RawBytes`](http://golang.org/pkg/database/sql/#RawBytes) support.
+ * Intelligent `LONG DATA` handling in prepared statements
+ * Secure `LOAD DATA LOCAL INFILE` support with file Whitelisting and `io.Reader` support
+ * Optional `time.Time` parsing
+ * Optional placeholder interpolation
+
+## Requirements
+ * Go 1.2 or higher
+ * MySQL (4.1+), MariaDB, Percona Server, Google CloudSQL or Sphinx (2.2.3+)
+
+---------------------------------------
+
+## Installation
+Simple install the package to your [$GOPATH](http://code.google.com/p/go-wiki/wiki/GOPATH "GOPATH") with the [go tool](http://golang.org/cmd/go/ "go command") from shell:
+```bash
+$ go get github.com/go-sql-driver/mysql
+```
+Make sure [Git is installed](http://git-scm.com/downloads) on your machine and in your system's `PATH`.
+
+## Usage
+_Go MySQL Driver_ is an implementation of Go's `database/sql/driver` interface. You only need to import the driver and can use the full [`database/sql`](http://golang.org/pkg/database/sql) API then.
+
+Use `mysql` as `driverName` and a valid [DSN](#dsn-data-source-name) as `dataSourceName`:
+```go
+import "database/sql"
+import _ "github.com/go-sql-driver/mysql"
+
+db, err := sql.Open("mysql", "user:password@/dbname")
+```
+
+[Examples are available in our Wiki](https://github.com/go-sql-driver/mysql/wiki/Examples "Go-MySQL-Driver Examples").
+
+
+### DSN (Data Source Name)
+
+The Data Source Name has a common format, like e.g. [PEAR DB](http://pear.php.net/manual/en/package.database.db.intro-dsn.php) uses it, but without type-prefix (optional parts marked by squared brackets):
+```
+[username[:password]@][protocol[(address)]]/dbname[?param1=value1&...¶mN=valueN]
+```
+
+A DSN in its fullest form:
+```
+username:password@protocol(address)/dbname?param=value
+```
+
+Except for the databasename, all values are optional. So the minimal DSN is:
+```
+/dbname
+```
+
+If you do not want to preselect a database, leave `dbname` empty:
+```
+/
+```
+This has the same effect as an empty DSN string:
+```
+
+```
+
+Alternatively, [Config.FormatDSN](https://godoc.org/github.com/go-sql-driver/mysql#Config.FormatDSN) can be used to create a DSN string by filling a struct.
+
+#### Password
+Passwords can consist of any character. Escaping is **not** necessary.
+
+#### Protocol
+See [net.Dial](http://golang.org/pkg/net/#Dial) for more information which networks are available.
+In general you should use an Unix domain socket if available and TCP otherwise for best performance.
+
+#### Address
+For TCP and UDP networks, addresses have the form `host:port`.
+If `host` is a literal IPv6 address, it must be enclosed in square brackets.
+The functions [net.JoinHostPort](http://golang.org/pkg/net/#JoinHostPort) and [net.SplitHostPort](http://golang.org/pkg/net/#SplitHostPort) manipulate addresses in this form.
+
+For Unix domain sockets the address is the absolute path to the MySQL-Server-socket, e.g. `/var/run/mysqld/mysqld.sock` or `/tmp/mysql.sock`.
+
+#### Parameters
+*Parameters are case-sensitive!*
+
+Notice that any of `true`, `TRUE`, `True` or `1` is accepted to stand for a true boolean value. Not surprisingly, false can be specified as any of: `false`, `FALSE`, `False` or `0`.
+
+##### `allowAllFiles`
+
+```
+Type: bool
+Valid Values: true, false
+Default: false
+```
+
+`allowAllFiles=true` disables the file Whitelist for `LOAD DATA LOCAL INFILE` and allows *all* files.
+[*Might be insecure!*](http://dev.mysql.com/doc/refman/5.7/en/load-data-local.html)
+
+##### `allowCleartextPasswords`
+
+```
+Type: bool
+Valid Values: true, false
+Default: false
+```
+
+`allowCleartextPasswords=true` allows using the [cleartext client side plugin](http://dev.mysql.com/doc/en/cleartext-authentication-plugin.html) if required by an account, such as one defined with the [PAM authentication plugin](http://dev.mysql.com/doc/en/pam-authentication-plugin.html). Sending passwords in clear text may be a security problem in some configurations. To avoid problems if there is any possibility that the password would be intercepted, clients should connect to MySQL Server using a method that protects the password. Possibilities include [TLS / SSL](#tls), IPsec, or a private network.
+
+##### `allowOldPasswords`
+
+```
+Type: bool
+Valid Values: true, false
+Default: false
+```
+`allowOldPasswords=true` allows the usage of the insecure old password method. This should be avoided, but is necessary in some cases. See also [the old_passwords wiki page](https://github.com/go-sql-driver/mysql/wiki/old_passwords).
+
+##### `charset`
+
+```
+Type: string
+Valid Values:
+Default: none
+```
+
+Sets the charset used for client-server interaction (`"SET NAMES "`). If multiple charsets are set (separated by a comma), the following charset is used if setting the charset failes. This enables for example support for `utf8mb4` ([introduced in MySQL 5.5.3](http://dev.mysql.com/doc/refman/5.5/en/charset-unicode-utf8mb4.html)) with fallback to `utf8` for older servers (`charset=utf8mb4,utf8`).
+
+Usage of the `charset` parameter is discouraged because it issues additional queries to the server.
+Unless you need the fallback behavior, please use `collation` instead.
+
+##### `collation`
+
+```
+Type: string
+Valid Values:
+Default: utf8_general_ci
+```
+
+Sets the collation used for client-server interaction on connection. In contrast to `charset`, `collation` does not issue additional queries. If the specified collation is unavailable on the target server, the connection will fail.
+
+A list of valid charsets for a server is retrievable with `SHOW COLLATION`.
+
+##### `clientFoundRows`
+
+```
+Type: bool
+Valid Values: true, false
+Default: false
+```
+
+`clientFoundRows=true` causes an UPDATE to return the number of matching rows instead of the number of rows changed.
+
+##### `columnsWithAlias`
+
+```
+Type: bool
+Valid Values: true, false
+Default: false
+```
+
+When `columnsWithAlias` is true, calls to `sql.Rows.Columns()` will return the table alias and the column name separated by a dot. For example:
+
+```
+SELECT u.id FROM users as u
+```
+
+will return `u.id` instead of just `id` if `columnsWithAlias=true`.
+
+##### `interpolateParams`
+
+```
+Type: bool
+Valid Values: true, false
+Default: false
+```
+
+If `interpolateParams` is true, placeholders (`?`) in calls to `db.Query()` and `db.Exec()` are interpolated into a single query string with given parameters. This reduces the number of roundtrips, since the driver has to prepare a statement, execute it with given parameters and close the statement again with `interpolateParams=false`.
+
+*This can not be used together with the multibyte encodings BIG5, CP932, GB2312, GBK or SJIS. These are blacklisted as they may [introduce a SQL injection vulnerability](http://stackoverflow.com/a/12118602/3430118)!*
+
+##### `loc`
+
+```
+Type: string
+Valid Values:
+Default: UTC
+```
+
+Sets the location for time.Time values (when using `parseTime=true`). *"Local"* sets the system's location. See [time.LoadLocation](http://golang.org/pkg/time/#LoadLocation) for details.
+
+Note that this sets the location for time.Time values but does not change MySQL's [time_zone setting](https://dev.mysql.com/doc/refman/5.5/en/time-zone-support.html). For that see the [time_zone system variable](#system-variables), which can also be set as a DSN parameter.
+
+Please keep in mind, that param values must be [url.QueryEscape](http://golang.org/pkg/net/url/#QueryEscape)'ed. Alternatively you can manually replace the `/` with `%2F`. For example `US/Pacific` would be `loc=US%2FPacific`.
+
+##### `maxAllowedPacket`
+```
+Type: decimal number
+Default: 0
+```
+
+Max packet size allowed in bytes. Use `maxAllowedPacket=0` to automatically fetch the `max_allowed_packet` variable from server.
+
+##### `multiStatements`
+
+```
+Type: bool
+Valid Values: true, false
+Default: false
+```
+
+Allow multiple statements in one query. While this allows batch queries, it also greatly increases the risk of SQL injections. Only the result of the first query is returned, all other results are silently discarded.
+
+When `multiStatements` is used, `?` parameters must only be used in the first statement.
+
+##### `parseTime`
+
+```
+Type: bool
+Valid Values: true, false
+Default: false
+```
+
+`parseTime=true` changes the output type of `DATE` and `DATETIME` values to `time.Time` instead of `[]byte` / `string`
+
+
+##### `readTimeout`
+
+```
+Type: decimal number
+Default: 0
+```
+
+I/O read timeout. The value must be a decimal number with an unit suffix ( *"ms"*, *"s"*, *"m"*, *"h"* ), such as *"30s"*, *"0.5m"* or *"1m30s"*.
+
+##### `strict`
+
+```
+Type: bool
+Valid Values: true, false
+Default: false
+```
+
+`strict=true` enables a driver-side strict mode in which MySQL warnings are treated as errors. This mode should not be used in production as it may lead to data corruption in certain situations.
+
+A server-side strict mode, which is safe for production use, can be set via the [`sql_mode`](https://dev.mysql.com/doc/refman/5.7/en/sql-mode.html) system variable.
+
+By default MySQL also treats notes as warnings. Use [`sql_notes=false`](http://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_sql_notes) to ignore notes.
+
+##### `timeout`
+
+```
+Type: decimal number
+Default: OS default
+```
+
+*Driver* side connection timeout. The value must be a decimal number with an unit suffix ( *"ms"*, *"s"*, *"m"*, *"h"* ), such as *"30s"*, *"0.5m"* or *"1m30s"*. To set a server side timeout, use the parameter [`wait_timeout`](http://dev.mysql.com/doc/refman/5.6/en/server-system-variables.html#sysvar_wait_timeout).
+
+##### `tls`
+
+```
+Type: bool / string
+Valid Values: true, false, skip-verify,
+Default: false
+```
+
+`tls=true` enables TLS / SSL encrypted connection to the server. Use `skip-verify` if you want to use a self-signed or invalid certificate (server side). Use a custom value registered with [`mysql.RegisterTLSConfig`](http://godoc.org/github.com/go-sql-driver/mysql#RegisterTLSConfig).
+
+##### `writeTimeout`
+
+```
+Type: decimal number
+Default: 0
+```
+
+I/O write timeout. The value must be a decimal number with an unit suffix ( *"ms"*, *"s"*, *"m"*, *"h"* ), such as *"30s"*, *"0.5m"* or *"1m30s"*.
+
+
+##### System Variables
+
+All other parameters are interpreted as system variables:
+ * `autocommit`: `"SET autocommit="`
+ * [`time_zone`](https://dev.mysql.com/doc/refman/5.5/en/time-zone-support.html): `"SET time_zone="`
+ * [`tx_isolation`](https://dev.mysql.com/doc/refman/5.5/en/server-system-variables.html#sysvar_tx_isolation): `"SET tx_isolation="`
+ * `param`: `"SET ="`
+
+*The values must be [url.QueryEscape](http://golang.org/pkg/net/url/#QueryEscape)'ed!*
+
+#### Examples
+```
+user@unix(/path/to/socket)/dbname
+```
+
+```
+root:pw@unix(/tmp/mysql.sock)/myDatabase?loc=Local
+```
+
+```
+user:password@tcp(localhost:5555)/dbname?tls=skip-verify&autocommit=true
+```
+
+Treat warnings as errors by setting the system variable [`sql_mode`](https://dev.mysql.com/doc/refman/5.7/en/sql-mode.html):
+```
+user:password@/dbname?sql_mode=TRADITIONAL
+```
+
+TCP via IPv6:
+```
+user:password@tcp([de:ad:be:ef::ca:fe]:80)/dbname?timeout=90s&collation=utf8mb4_unicode_ci
+```
+
+TCP on a remote host, e.g. Amazon RDS:
+```
+id:password@tcp(your-amazonaws-uri.com:3306)/dbname
+```
+
+Google Cloud SQL on App Engine (First Generation MySQL Server):
+```
+user@cloudsql(project-id:instance-name)/dbname
+```
+
+Google Cloud SQL on App Engine (Second Generation MySQL Server):
+```
+user@cloudsql(project-id:regionname:instance-name)/dbname
+```
+
+TCP using default port (3306) on localhost:
+```
+user:password@tcp/dbname?charset=utf8mb4,utf8&sys_var=esc%40ped
+```
+
+Use the default protocol (tcp) and host (localhost:3306):
+```
+user:password@/dbname
+```
+
+No Database preselected:
+```
+user:password@/
+```
+
+### `LOAD DATA LOCAL INFILE` support
+For this feature you need direct access to the package. Therefore you must change the import path (no `_`):
+```go
+import "github.com/go-sql-driver/mysql"
+```
+
+Files must be whitelisted by registering them with `mysql.RegisterLocalFile(filepath)` (recommended) or the Whitelist check must be deactivated by using the DSN parameter `allowAllFiles=true` ([*Might be insecure!*](http://dev.mysql.com/doc/refman/5.7/en/load-data-local.html)).
+
+To use a `io.Reader` a handler function must be registered with `mysql.RegisterReaderHandler(name, handler)` which returns a `io.Reader` or `io.ReadCloser`. The Reader is available with the filepath `Reader::` then. Choose different names for different handlers and `DeregisterReaderHandler` when you don't need it anymore.
+
+See the [godoc of Go-MySQL-Driver](http://godoc.org/github.com/go-sql-driver/mysql "golang mysql driver documentation") for details.
+
+
+### `time.Time` support
+The default internal output type of MySQL `DATE` and `DATETIME` values is `[]byte` which allows you to scan the value into a `[]byte`, `string` or `sql.RawBytes` variable in your programm.
+
+However, many want to scan MySQL `DATE` and `DATETIME` values into `time.Time` variables, which is the logical opposite in Go to `DATE` and `DATETIME` in MySQL. You can do that by changing the internal output type from `[]byte` to `time.Time` with the DSN parameter `parseTime=true`. You can set the default [`time.Time` location](http://golang.org/pkg/time/#Location) with the `loc` DSN parameter.
+
+**Caution:** As of Go 1.1, this makes `time.Time` the only variable type you can scan `DATE` and `DATETIME` values into. This breaks for example [`sql.RawBytes` support](https://github.com/go-sql-driver/mysql/wiki/Examples#rawbytes).
+
+Alternatively you can use the [`NullTime`](http://godoc.org/github.com/go-sql-driver/mysql#NullTime) type as the scan destination, which works with both `time.Time` and `string` / `[]byte`.
+
+
+### Unicode support
+Since version 1.1 Go-MySQL-Driver automatically uses the collation `utf8_general_ci` by default.
+
+Other collations / charsets can be set using the [`collation`](#collation) DSN parameter.
+
+Version 1.0 of the driver recommended adding `&charset=utf8` (alias for `SET NAMES utf8`) to the DSN to enable proper UTF-8 support. This is not necessary anymore. The [`collation`](#collation) parameter should be preferred to set another collation / charset than the default.
+
+See http://dev.mysql.com/doc/refman/5.7/en/charset-unicode.html for more details on MySQL's Unicode support.
+
+
+## Testing / Development
+To run the driver tests you may need to adjust the configuration. See the [Testing Wiki-Page](https://github.com/go-sql-driver/mysql/wiki/Testing "Testing") for details.
+
+Go-MySQL-Driver is not feature-complete yet. Your help is very appreciated.
+If you want to contribute, you can work on an [open issue](https://github.com/go-sql-driver/mysql/issues?state=open) or review a [pull request](https://github.com/go-sql-driver/mysql/pulls).
+
+See the [Contribution Guidelines](https://github.com/go-sql-driver/mysql/blob/master/CONTRIBUTING.md) for details.
+
+---------------------------------------
+
+## License
+Go-MySQL-Driver is licensed under the [Mozilla Public License Version 2.0](https://raw.github.com/go-sql-driver/mysql/master/LICENSE)
+
+Mozilla summarizes the license scope as follows:
+> MPL: The copyleft applies to any files containing MPLed code.
+
+
+That means:
+ * You can **use** the **unchanged** source code both in private and commercially
+ * When distributing, you **must publish** the source code of any **changed files** licensed under the MPL 2.0 under a) the MPL 2.0 itself or b) a compatible license (e.g. GPL 3.0 or Apache License 2.0)
+ * You **needn't publish** the source code of your library as long as the files licensed under the MPL 2.0 are **unchanged**
+
+Please read the [MPL 2.0 FAQ](http://www.mozilla.org/MPL/2.0/FAQ.html) if you have further questions regarding the license.
+
+You can read the full terms here: [LICENSE](https://raw.github.com/go-sql-driver/mysql/master/LICENSE)
+
+![Go Gopher and MySQL Dolphin](https://raw.github.com/wiki/go-sql-driver/mysql/go-mysql-driver_m.jpg "Golang Gopher transporting the MySQL Dolphin in a wheelbarrow")
+
diff --git a/vendor/github.com/go-sql-driver/mysql/appengine.go b/vendor/github.com/go-sql-driver/mysql/appengine.go
new file mode 100644
index 0000000000..565614eef7
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/appengine.go
@@ -0,0 +1,19 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+// +build appengine
+
+package mysql
+
+import (
+ "appengine/cloudsql"
+)
+
+func init() {
+ RegisterDial("cloudsql", cloudsql.Dial)
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/buffer.go b/vendor/github.com/go-sql-driver/mysql/buffer.go
new file mode 100644
index 0000000000..2001feacd3
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/buffer.go
@@ -0,0 +1,147 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+import (
+ "io"
+ "net"
+ "time"
+)
+
+const defaultBufSize = 4096
+
+// A buffer which is used for both reading and writing.
+// This is possible since communication on each connection is synchronous.
+// In other words, we can't write and read simultaneously on the same connection.
+// The buffer is similar to bufio.Reader / Writer but zero-copy-ish
+// Also highly optimized for this particular use case.
+type buffer struct {
+ buf []byte
+ nc net.Conn
+ idx int
+ length int
+ timeout time.Duration
+}
+
+func newBuffer(nc net.Conn) buffer {
+ var b [defaultBufSize]byte
+ return buffer{
+ buf: b[:],
+ nc: nc,
+ }
+}
+
+// fill reads into the buffer until at least _need_ bytes are in it
+func (b *buffer) fill(need int) error {
+ n := b.length
+
+ // move existing data to the beginning
+ if n > 0 && b.idx > 0 {
+ copy(b.buf[0:n], b.buf[b.idx:])
+ }
+
+ // grow buffer if necessary
+ // TODO: let the buffer shrink again at some point
+ // Maybe keep the org buf slice and swap back?
+ if need > len(b.buf) {
+ // Round up to the next multiple of the default size
+ newBuf := make([]byte, ((need/defaultBufSize)+1)*defaultBufSize)
+ copy(newBuf, b.buf)
+ b.buf = newBuf
+ }
+
+ b.idx = 0
+
+ for {
+ if b.timeout > 0 {
+ if err := b.nc.SetReadDeadline(time.Now().Add(b.timeout)); err != nil {
+ return err
+ }
+ }
+
+ nn, err := b.nc.Read(b.buf[n:])
+ n += nn
+
+ switch err {
+ case nil:
+ if n < need {
+ continue
+ }
+ b.length = n
+ return nil
+
+ case io.EOF:
+ if n >= need {
+ b.length = n
+ return nil
+ }
+ return io.ErrUnexpectedEOF
+
+ default:
+ return err
+ }
+ }
+}
+
+// returns next N bytes from buffer.
+// The returned slice is only guaranteed to be valid until the next read
+func (b *buffer) readNext(need int) ([]byte, error) {
+ if b.length < need {
+ // refill
+ if err := b.fill(need); err != nil {
+ return nil, err
+ }
+ }
+
+ offset := b.idx
+ b.idx += need
+ b.length -= need
+ return b.buf[offset:b.idx], nil
+}
+
+// returns a buffer with the requested size.
+// If possible, a slice from the existing buffer is returned.
+// Otherwise a bigger buffer is made.
+// Only one buffer (total) can be used at a time.
+func (b *buffer) takeBuffer(length int) []byte {
+ if b.length > 0 {
+ return nil
+ }
+
+ // test (cheap) general case first
+ if length <= defaultBufSize || length <= cap(b.buf) {
+ return b.buf[:length]
+ }
+
+ if length < maxPacketSize {
+ b.buf = make([]byte, length)
+ return b.buf
+ }
+ return make([]byte, length)
+}
+
+// shortcut which can be used if the requested buffer is guaranteed to be
+// smaller than defaultBufSize
+// Only one buffer (total) can be used at a time.
+func (b *buffer) takeSmallBuffer(length int) []byte {
+ if b.length == 0 {
+ return b.buf[:length]
+ }
+ return nil
+}
+
+// takeCompleteBuffer returns the complete existing buffer.
+// This can be used if the necessary buffer size is unknown.
+// Only one buffer (total) can be used at a time.
+func (b *buffer) takeCompleteBuffer() []byte {
+ if b.length == 0 {
+ return b.buf
+ }
+ return nil
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/collations.go b/vendor/github.com/go-sql-driver/mysql/collations.go
new file mode 100644
index 0000000000..82079cfb93
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/collations.go
@@ -0,0 +1,250 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2014 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+const defaultCollation = "utf8_general_ci"
+
+// A list of available collations mapped to the internal ID.
+// To update this map use the following MySQL query:
+// SELECT COLLATION_NAME, ID FROM information_schema.COLLATIONS
+var collations = map[string]byte{
+ "big5_chinese_ci": 1,
+ "latin2_czech_cs": 2,
+ "dec8_swedish_ci": 3,
+ "cp850_general_ci": 4,
+ "latin1_german1_ci": 5,
+ "hp8_english_ci": 6,
+ "koi8r_general_ci": 7,
+ "latin1_swedish_ci": 8,
+ "latin2_general_ci": 9,
+ "swe7_swedish_ci": 10,
+ "ascii_general_ci": 11,
+ "ujis_japanese_ci": 12,
+ "sjis_japanese_ci": 13,
+ "cp1251_bulgarian_ci": 14,
+ "latin1_danish_ci": 15,
+ "hebrew_general_ci": 16,
+ "tis620_thai_ci": 18,
+ "euckr_korean_ci": 19,
+ "latin7_estonian_cs": 20,
+ "latin2_hungarian_ci": 21,
+ "koi8u_general_ci": 22,
+ "cp1251_ukrainian_ci": 23,
+ "gb2312_chinese_ci": 24,
+ "greek_general_ci": 25,
+ "cp1250_general_ci": 26,
+ "latin2_croatian_ci": 27,
+ "gbk_chinese_ci": 28,
+ "cp1257_lithuanian_ci": 29,
+ "latin5_turkish_ci": 30,
+ "latin1_german2_ci": 31,
+ "armscii8_general_ci": 32,
+ "utf8_general_ci": 33,
+ "cp1250_czech_cs": 34,
+ "ucs2_general_ci": 35,
+ "cp866_general_ci": 36,
+ "keybcs2_general_ci": 37,
+ "macce_general_ci": 38,
+ "macroman_general_ci": 39,
+ "cp852_general_ci": 40,
+ "latin7_general_ci": 41,
+ "latin7_general_cs": 42,
+ "macce_bin": 43,
+ "cp1250_croatian_ci": 44,
+ "utf8mb4_general_ci": 45,
+ "utf8mb4_bin": 46,
+ "latin1_bin": 47,
+ "latin1_general_ci": 48,
+ "latin1_general_cs": 49,
+ "cp1251_bin": 50,
+ "cp1251_general_ci": 51,
+ "cp1251_general_cs": 52,
+ "macroman_bin": 53,
+ "utf16_general_ci": 54,
+ "utf16_bin": 55,
+ "utf16le_general_ci": 56,
+ "cp1256_general_ci": 57,
+ "cp1257_bin": 58,
+ "cp1257_general_ci": 59,
+ "utf32_general_ci": 60,
+ "utf32_bin": 61,
+ "utf16le_bin": 62,
+ "binary": 63,
+ "armscii8_bin": 64,
+ "ascii_bin": 65,
+ "cp1250_bin": 66,
+ "cp1256_bin": 67,
+ "cp866_bin": 68,
+ "dec8_bin": 69,
+ "greek_bin": 70,
+ "hebrew_bin": 71,
+ "hp8_bin": 72,
+ "keybcs2_bin": 73,
+ "koi8r_bin": 74,
+ "koi8u_bin": 75,
+ "latin2_bin": 77,
+ "latin5_bin": 78,
+ "latin7_bin": 79,
+ "cp850_bin": 80,
+ "cp852_bin": 81,
+ "swe7_bin": 82,
+ "utf8_bin": 83,
+ "big5_bin": 84,
+ "euckr_bin": 85,
+ "gb2312_bin": 86,
+ "gbk_bin": 87,
+ "sjis_bin": 88,
+ "tis620_bin": 89,
+ "ucs2_bin": 90,
+ "ujis_bin": 91,
+ "geostd8_general_ci": 92,
+ "geostd8_bin": 93,
+ "latin1_spanish_ci": 94,
+ "cp932_japanese_ci": 95,
+ "cp932_bin": 96,
+ "eucjpms_japanese_ci": 97,
+ "eucjpms_bin": 98,
+ "cp1250_polish_ci": 99,
+ "utf16_unicode_ci": 101,
+ "utf16_icelandic_ci": 102,
+ "utf16_latvian_ci": 103,
+ "utf16_romanian_ci": 104,
+ "utf16_slovenian_ci": 105,
+ "utf16_polish_ci": 106,
+ "utf16_estonian_ci": 107,
+ "utf16_spanish_ci": 108,
+ "utf16_swedish_ci": 109,
+ "utf16_turkish_ci": 110,
+ "utf16_czech_ci": 111,
+ "utf16_danish_ci": 112,
+ "utf16_lithuanian_ci": 113,
+ "utf16_slovak_ci": 114,
+ "utf16_spanish2_ci": 115,
+ "utf16_roman_ci": 116,
+ "utf16_persian_ci": 117,
+ "utf16_esperanto_ci": 118,
+ "utf16_hungarian_ci": 119,
+ "utf16_sinhala_ci": 120,
+ "utf16_german2_ci": 121,
+ "utf16_croatian_ci": 122,
+ "utf16_unicode_520_ci": 123,
+ "utf16_vietnamese_ci": 124,
+ "ucs2_unicode_ci": 128,
+ "ucs2_icelandic_ci": 129,
+ "ucs2_latvian_ci": 130,
+ "ucs2_romanian_ci": 131,
+ "ucs2_slovenian_ci": 132,
+ "ucs2_polish_ci": 133,
+ "ucs2_estonian_ci": 134,
+ "ucs2_spanish_ci": 135,
+ "ucs2_swedish_ci": 136,
+ "ucs2_turkish_ci": 137,
+ "ucs2_czech_ci": 138,
+ "ucs2_danish_ci": 139,
+ "ucs2_lithuanian_ci": 140,
+ "ucs2_slovak_ci": 141,
+ "ucs2_spanish2_ci": 142,
+ "ucs2_roman_ci": 143,
+ "ucs2_persian_ci": 144,
+ "ucs2_esperanto_ci": 145,
+ "ucs2_hungarian_ci": 146,
+ "ucs2_sinhala_ci": 147,
+ "ucs2_german2_ci": 148,
+ "ucs2_croatian_ci": 149,
+ "ucs2_unicode_520_ci": 150,
+ "ucs2_vietnamese_ci": 151,
+ "ucs2_general_mysql500_ci": 159,
+ "utf32_unicode_ci": 160,
+ "utf32_icelandic_ci": 161,
+ "utf32_latvian_ci": 162,
+ "utf32_romanian_ci": 163,
+ "utf32_slovenian_ci": 164,
+ "utf32_polish_ci": 165,
+ "utf32_estonian_ci": 166,
+ "utf32_spanish_ci": 167,
+ "utf32_swedish_ci": 168,
+ "utf32_turkish_ci": 169,
+ "utf32_czech_ci": 170,
+ "utf32_danish_ci": 171,
+ "utf32_lithuanian_ci": 172,
+ "utf32_slovak_ci": 173,
+ "utf32_spanish2_ci": 174,
+ "utf32_roman_ci": 175,
+ "utf32_persian_ci": 176,
+ "utf32_esperanto_ci": 177,
+ "utf32_hungarian_ci": 178,
+ "utf32_sinhala_ci": 179,
+ "utf32_german2_ci": 180,
+ "utf32_croatian_ci": 181,
+ "utf32_unicode_520_ci": 182,
+ "utf32_vietnamese_ci": 183,
+ "utf8_unicode_ci": 192,
+ "utf8_icelandic_ci": 193,
+ "utf8_latvian_ci": 194,
+ "utf8_romanian_ci": 195,
+ "utf8_slovenian_ci": 196,
+ "utf8_polish_ci": 197,
+ "utf8_estonian_ci": 198,
+ "utf8_spanish_ci": 199,
+ "utf8_swedish_ci": 200,
+ "utf8_turkish_ci": 201,
+ "utf8_czech_ci": 202,
+ "utf8_danish_ci": 203,
+ "utf8_lithuanian_ci": 204,
+ "utf8_slovak_ci": 205,
+ "utf8_spanish2_ci": 206,
+ "utf8_roman_ci": 207,
+ "utf8_persian_ci": 208,
+ "utf8_esperanto_ci": 209,
+ "utf8_hungarian_ci": 210,
+ "utf8_sinhala_ci": 211,
+ "utf8_german2_ci": 212,
+ "utf8_croatian_ci": 213,
+ "utf8_unicode_520_ci": 214,
+ "utf8_vietnamese_ci": 215,
+ "utf8_general_mysql500_ci": 223,
+ "utf8mb4_unicode_ci": 224,
+ "utf8mb4_icelandic_ci": 225,
+ "utf8mb4_latvian_ci": 226,
+ "utf8mb4_romanian_ci": 227,
+ "utf8mb4_slovenian_ci": 228,
+ "utf8mb4_polish_ci": 229,
+ "utf8mb4_estonian_ci": 230,
+ "utf8mb4_spanish_ci": 231,
+ "utf8mb4_swedish_ci": 232,
+ "utf8mb4_turkish_ci": 233,
+ "utf8mb4_czech_ci": 234,
+ "utf8mb4_danish_ci": 235,
+ "utf8mb4_lithuanian_ci": 236,
+ "utf8mb4_slovak_ci": 237,
+ "utf8mb4_spanish2_ci": 238,
+ "utf8mb4_roman_ci": 239,
+ "utf8mb4_persian_ci": 240,
+ "utf8mb4_esperanto_ci": 241,
+ "utf8mb4_hungarian_ci": 242,
+ "utf8mb4_sinhala_ci": 243,
+ "utf8mb4_german2_ci": 244,
+ "utf8mb4_croatian_ci": 245,
+ "utf8mb4_unicode_520_ci": 246,
+ "utf8mb4_vietnamese_ci": 247,
+}
+
+// A blacklist of collations which is unsafe to interpolate parameters.
+// These multibyte encodings may contains 0x5c (`\`) in their trailing bytes.
+var unsafeCollations = map[string]bool{
+ "big5_chinese_ci": true,
+ "sjis_japanese_ci": true,
+ "gbk_chinese_ci": true,
+ "big5_bin": true,
+ "gb2312_bin": true,
+ "gbk_bin": true,
+ "sjis_bin": true,
+ "cp932_japanese_ci": true,
+ "cp932_bin": true,
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/connection.go b/vendor/github.com/go-sql-driver/mysql/connection.go
new file mode 100644
index 0000000000..d82c728f3b
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/connection.go
@@ -0,0 +1,377 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+import (
+ "database/sql/driver"
+ "net"
+ "strconv"
+ "strings"
+ "time"
+)
+
+type mysqlConn struct {
+ buf buffer
+ netConn net.Conn
+ affectedRows uint64
+ insertId uint64
+ cfg *Config
+ maxAllowedPacket int
+ maxWriteSize int
+ writeTimeout time.Duration
+ flags clientFlag
+ status statusFlag
+ sequence uint8
+ parseTime bool
+ strict bool
+}
+
+// Handles parameters set in DSN after the connection is established
+func (mc *mysqlConn) handleParams() (err error) {
+ for param, val := range mc.cfg.Params {
+ switch param {
+ // Charset
+ case "charset":
+ charsets := strings.Split(val, ",")
+ for i := range charsets {
+ // ignore errors here - a charset may not exist
+ err = mc.exec("SET NAMES " + charsets[i])
+ if err == nil {
+ break
+ }
+ }
+ if err != nil {
+ return
+ }
+
+ // System Vars
+ default:
+ err = mc.exec("SET " + param + "=" + val + "")
+ if err != nil {
+ return
+ }
+ }
+ }
+
+ return
+}
+
+func (mc *mysqlConn) Begin() (driver.Tx, error) {
+ if mc.netConn == nil {
+ errLog.Print(ErrInvalidConn)
+ return nil, driver.ErrBadConn
+ }
+ err := mc.exec("START TRANSACTION")
+ if err == nil {
+ return &mysqlTx{mc}, err
+ }
+
+ return nil, err
+}
+
+func (mc *mysqlConn) Close() (err error) {
+ // Makes Close idempotent
+ if mc.netConn != nil {
+ err = mc.writeCommandPacket(comQuit)
+ }
+
+ mc.cleanup()
+
+ return
+}
+
+// Closes the network connection and unsets internal variables. Do not call this
+// function after successfully authentication, call Close instead. This function
+// is called before auth or on auth failure because MySQL will have already
+// closed the network connection.
+func (mc *mysqlConn) cleanup() {
+ // Makes cleanup idempotent
+ if mc.netConn != nil {
+ if err := mc.netConn.Close(); err != nil {
+ errLog.Print(err)
+ }
+ mc.netConn = nil
+ }
+ mc.cfg = nil
+ mc.buf.nc = nil
+}
+
+func (mc *mysqlConn) Prepare(query string) (driver.Stmt, error) {
+ if mc.netConn == nil {
+ errLog.Print(ErrInvalidConn)
+ return nil, driver.ErrBadConn
+ }
+ // Send command
+ err := mc.writeCommandPacketStr(comStmtPrepare, query)
+ if err != nil {
+ return nil, err
+ }
+
+ stmt := &mysqlStmt{
+ mc: mc,
+ }
+
+ // Read Result
+ columnCount, err := stmt.readPrepareResultPacket()
+ if err == nil {
+ if stmt.paramCount > 0 {
+ if err = mc.readUntilEOF(); err != nil {
+ return nil, err
+ }
+ }
+
+ if columnCount > 0 {
+ err = mc.readUntilEOF()
+ }
+ }
+
+ return stmt, err
+}
+
+func (mc *mysqlConn) interpolateParams(query string, args []driver.Value) (string, error) {
+ // Number of ? should be same to len(args)
+ if strings.Count(query, "?") != len(args) {
+ return "", driver.ErrSkip
+ }
+
+ buf := mc.buf.takeCompleteBuffer()
+ if buf == nil {
+ // can not take the buffer. Something must be wrong with the connection
+ errLog.Print(ErrBusyBuffer)
+ return "", driver.ErrBadConn
+ }
+ buf = buf[:0]
+ argPos := 0
+
+ for i := 0; i < len(query); i++ {
+ q := strings.IndexByte(query[i:], '?')
+ if q == -1 {
+ buf = append(buf, query[i:]...)
+ break
+ }
+ buf = append(buf, query[i:i+q]...)
+ i += q
+
+ arg := args[argPos]
+ argPos++
+
+ if arg == nil {
+ buf = append(buf, "NULL"...)
+ continue
+ }
+
+ switch v := arg.(type) {
+ case int64:
+ buf = strconv.AppendInt(buf, v, 10)
+ case float64:
+ buf = strconv.AppendFloat(buf, v, 'g', -1, 64)
+ case bool:
+ if v {
+ buf = append(buf, '1')
+ } else {
+ buf = append(buf, '0')
+ }
+ case time.Time:
+ if v.IsZero() {
+ buf = append(buf, "'0000-00-00'"...)
+ } else {
+ v := v.In(mc.cfg.Loc)
+ v = v.Add(time.Nanosecond * 500) // To round under microsecond
+ year := v.Year()
+ year100 := year / 100
+ year1 := year % 100
+ month := v.Month()
+ day := v.Day()
+ hour := v.Hour()
+ minute := v.Minute()
+ second := v.Second()
+ micro := v.Nanosecond() / 1000
+
+ buf = append(buf, []byte{
+ '\'',
+ digits10[year100], digits01[year100],
+ digits10[year1], digits01[year1],
+ '-',
+ digits10[month], digits01[month],
+ '-',
+ digits10[day], digits01[day],
+ ' ',
+ digits10[hour], digits01[hour],
+ ':',
+ digits10[minute], digits01[minute],
+ ':',
+ digits10[second], digits01[second],
+ }...)
+
+ if micro != 0 {
+ micro10000 := micro / 10000
+ micro100 := micro / 100 % 100
+ micro1 := micro % 100
+ buf = append(buf, []byte{
+ '.',
+ digits10[micro10000], digits01[micro10000],
+ digits10[micro100], digits01[micro100],
+ digits10[micro1], digits01[micro1],
+ }...)
+ }
+ buf = append(buf, '\'')
+ }
+ case []byte:
+ if v == nil {
+ buf = append(buf, "NULL"...)
+ } else {
+ buf = append(buf, "_binary'"...)
+ if mc.status&statusNoBackslashEscapes == 0 {
+ buf = escapeBytesBackslash(buf, v)
+ } else {
+ buf = escapeBytesQuotes(buf, v)
+ }
+ buf = append(buf, '\'')
+ }
+ case string:
+ buf = append(buf, '\'')
+ if mc.status&statusNoBackslashEscapes == 0 {
+ buf = escapeStringBackslash(buf, v)
+ } else {
+ buf = escapeStringQuotes(buf, v)
+ }
+ buf = append(buf, '\'')
+ default:
+ return "", driver.ErrSkip
+ }
+
+ if len(buf)+4 > mc.maxAllowedPacket {
+ return "", driver.ErrSkip
+ }
+ }
+ if argPos != len(args) {
+ return "", driver.ErrSkip
+ }
+ return string(buf), nil
+}
+
+func (mc *mysqlConn) Exec(query string, args []driver.Value) (driver.Result, error) {
+ if mc.netConn == nil {
+ errLog.Print(ErrInvalidConn)
+ return nil, driver.ErrBadConn
+ }
+ if len(args) != 0 {
+ if !mc.cfg.InterpolateParams {
+ return nil, driver.ErrSkip
+ }
+ // try to interpolate the parameters to save extra roundtrips for preparing and closing a statement
+ prepared, err := mc.interpolateParams(query, args)
+ if err != nil {
+ return nil, err
+ }
+ query = prepared
+ args = nil
+ }
+ mc.affectedRows = 0
+ mc.insertId = 0
+
+ err := mc.exec(query)
+ if err == nil {
+ return &mysqlResult{
+ affectedRows: int64(mc.affectedRows),
+ insertId: int64(mc.insertId),
+ }, err
+ }
+ return nil, err
+}
+
+// Internal function to execute commands
+func (mc *mysqlConn) exec(query string) error {
+ // Send command
+ err := mc.writeCommandPacketStr(comQuery, query)
+ if err != nil {
+ return err
+ }
+
+ // Read Result
+ resLen, err := mc.readResultSetHeaderPacket()
+ if err == nil && resLen > 0 {
+ if err = mc.readUntilEOF(); err != nil {
+ return err
+ }
+
+ err = mc.readUntilEOF()
+ }
+
+ return err
+}
+
+func (mc *mysqlConn) Query(query string, args []driver.Value) (driver.Rows, error) {
+ if mc.netConn == nil {
+ errLog.Print(ErrInvalidConn)
+ return nil, driver.ErrBadConn
+ }
+ if len(args) != 0 {
+ if !mc.cfg.InterpolateParams {
+ return nil, driver.ErrSkip
+ }
+ // try client-side prepare to reduce roundtrip
+ prepared, err := mc.interpolateParams(query, args)
+ if err != nil {
+ return nil, err
+ }
+ query = prepared
+ args = nil
+ }
+ // Send command
+ err := mc.writeCommandPacketStr(comQuery, query)
+ if err == nil {
+ // Read Result
+ var resLen int
+ resLen, err = mc.readResultSetHeaderPacket()
+ if err == nil {
+ rows := new(textRows)
+ rows.mc = mc
+
+ if resLen == 0 {
+ // no columns, no more data
+ return emptyRows{}, nil
+ }
+ // Columns
+ rows.columns, err = mc.readColumns(resLen)
+ return rows, err
+ }
+ }
+ return nil, err
+}
+
+// Gets the value of the given MySQL System Variable
+// The returned byte slice is only valid until the next read
+func (mc *mysqlConn) getSystemVar(name string) ([]byte, error) {
+ // Send command
+ if err := mc.writeCommandPacketStr(comQuery, "SELECT @@"+name); err != nil {
+ return nil, err
+ }
+
+ // Read Result
+ resLen, err := mc.readResultSetHeaderPacket()
+ if err == nil {
+ rows := new(textRows)
+ rows.mc = mc
+ rows.columns = []mysqlField{{fieldType: fieldTypeVarChar}}
+
+ if resLen > 0 {
+ // Columns
+ if err := mc.readUntilEOF(); err != nil {
+ return nil, err
+ }
+ }
+
+ dest := make([]driver.Value, resLen)
+ if err = rows.readRow(dest); err == nil {
+ return dest[0].([]byte), mc.readUntilEOF()
+ }
+ }
+ return nil, err
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/const.go b/vendor/github.com/go-sql-driver/mysql/const.go
new file mode 100644
index 0000000000..88cfff3fd8
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/const.go
@@ -0,0 +1,163 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+const (
+ minProtocolVersion byte = 10
+ maxPacketSize = 1<<24 - 1
+ timeFormat = "2006-01-02 15:04:05.999999"
+)
+
+// MySQL constants documentation:
+// http://dev.mysql.com/doc/internals/en/client-server-protocol.html
+
+const (
+ iOK byte = 0x00
+ iLocalInFile byte = 0xfb
+ iEOF byte = 0xfe
+ iERR byte = 0xff
+)
+
+// https://dev.mysql.com/doc/internals/en/capability-flags.html#packet-Protocol::CapabilityFlags
+type clientFlag uint32
+
+const (
+ clientLongPassword clientFlag = 1 << iota
+ clientFoundRows
+ clientLongFlag
+ clientConnectWithDB
+ clientNoSchema
+ clientCompress
+ clientODBC
+ clientLocalFiles
+ clientIgnoreSpace
+ clientProtocol41
+ clientInteractive
+ clientSSL
+ clientIgnoreSIGPIPE
+ clientTransactions
+ clientReserved
+ clientSecureConn
+ clientMultiStatements
+ clientMultiResults
+ clientPSMultiResults
+ clientPluginAuth
+ clientConnectAttrs
+ clientPluginAuthLenEncClientData
+ clientCanHandleExpiredPasswords
+ clientSessionTrack
+ clientDeprecateEOF
+)
+
+const (
+ comQuit byte = iota + 1
+ comInitDB
+ comQuery
+ comFieldList
+ comCreateDB
+ comDropDB
+ comRefresh
+ comShutdown
+ comStatistics
+ comProcessInfo
+ comConnect
+ comProcessKill
+ comDebug
+ comPing
+ comTime
+ comDelayedInsert
+ comChangeUser
+ comBinlogDump
+ comTableDump
+ comConnectOut
+ comRegisterSlave
+ comStmtPrepare
+ comStmtExecute
+ comStmtSendLongData
+ comStmtClose
+ comStmtReset
+ comSetOption
+ comStmtFetch
+)
+
+// https://dev.mysql.com/doc/internals/en/com-query-response.html#packet-Protocol::ColumnType
+const (
+ fieldTypeDecimal byte = iota
+ fieldTypeTiny
+ fieldTypeShort
+ fieldTypeLong
+ fieldTypeFloat
+ fieldTypeDouble
+ fieldTypeNULL
+ fieldTypeTimestamp
+ fieldTypeLongLong
+ fieldTypeInt24
+ fieldTypeDate
+ fieldTypeTime
+ fieldTypeDateTime
+ fieldTypeYear
+ fieldTypeNewDate
+ fieldTypeVarChar
+ fieldTypeBit
+)
+const (
+ fieldTypeJSON byte = iota + 0xf5
+ fieldTypeNewDecimal
+ fieldTypeEnum
+ fieldTypeSet
+ fieldTypeTinyBLOB
+ fieldTypeMediumBLOB
+ fieldTypeLongBLOB
+ fieldTypeBLOB
+ fieldTypeVarString
+ fieldTypeString
+ fieldTypeGeometry
+)
+
+type fieldFlag uint16
+
+const (
+ flagNotNULL fieldFlag = 1 << iota
+ flagPriKey
+ flagUniqueKey
+ flagMultipleKey
+ flagBLOB
+ flagUnsigned
+ flagZeroFill
+ flagBinary
+ flagEnum
+ flagAutoIncrement
+ flagTimestamp
+ flagSet
+ flagUnknown1
+ flagUnknown2
+ flagUnknown3
+ flagUnknown4
+)
+
+// http://dev.mysql.com/doc/internals/en/status-flags.html
+type statusFlag uint16
+
+const (
+ statusInTrans statusFlag = 1 << iota
+ statusInAutocommit
+ statusReserved // Not in documentation
+ statusMoreResultsExists
+ statusNoGoodIndexUsed
+ statusNoIndexUsed
+ statusCursorExists
+ statusLastRowSent
+ statusDbDropped
+ statusNoBackslashEscapes
+ statusMetadataChanged
+ statusQueryWasSlow
+ statusPsOutParams
+ statusInTransReadonly
+ statusSessionStateChanged
+)
diff --git a/vendor/github.com/go-sql-driver/mysql/driver.go b/vendor/github.com/go-sql-driver/mysql/driver.go
new file mode 100644
index 0000000000..562ddeffbe
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/driver.go
@@ -0,0 +1,171 @@
+// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+// Package mysql provides a MySQL driver for Go's database/sql package
+//
+// The driver should be used via the database/sql package:
+//
+// import "database/sql"
+// import _ "github.com/go-sql-driver/mysql"
+//
+// db, err := sql.Open("mysql", "user:password@/dbname")
+//
+// See https://github.com/go-sql-driver/mysql#usage for details
+package mysql
+
+import (
+ "database/sql"
+ "database/sql/driver"
+ "net"
+)
+
+// MySQLDriver is exported to make the driver directly accessible.
+// In general the driver is used via the database/sql package.
+type MySQLDriver struct{}
+
+// DialFunc is a function which can be used to establish the network connection.
+// Custom dial functions must be registered with RegisterDial
+type DialFunc func(addr string) (net.Conn, error)
+
+var dials map[string]DialFunc
+
+// RegisterDial registers a custom dial function. It can then be used by the
+// network address mynet(addr), where mynet is the registered new network.
+// addr is passed as a parameter to the dial function.
+func RegisterDial(net string, dial DialFunc) {
+ if dials == nil {
+ dials = make(map[string]DialFunc)
+ }
+ dials[net] = dial
+}
+
+// Open new Connection.
+// See https://github.com/go-sql-driver/mysql#dsn-data-source-name for how
+// the DSN string is formated
+func (d MySQLDriver) Open(dsn string) (driver.Conn, error) {
+ var err error
+
+ // New mysqlConn
+ mc := &mysqlConn{
+ maxAllowedPacket: maxPacketSize,
+ maxWriteSize: maxPacketSize - 1,
+ }
+ mc.cfg, err = ParseDSN(dsn)
+ if err != nil {
+ return nil, err
+ }
+ mc.parseTime = mc.cfg.ParseTime
+ mc.strict = mc.cfg.Strict
+
+ // Connect to Server
+ if dial, ok := dials[mc.cfg.Net]; ok {
+ mc.netConn, err = dial(mc.cfg.Addr)
+ } else {
+ nd := net.Dialer{Timeout: mc.cfg.Timeout}
+ mc.netConn, err = nd.Dial(mc.cfg.Net, mc.cfg.Addr)
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ // Enable TCP Keepalives on TCP connections
+ if tc, ok := mc.netConn.(*net.TCPConn); ok {
+ if err := tc.SetKeepAlive(true); err != nil {
+ // Don't send COM_QUIT before handshake.
+ mc.netConn.Close()
+ mc.netConn = nil
+ return nil, err
+ }
+ }
+
+ mc.buf = newBuffer(mc.netConn)
+
+ // Set I/O timeouts
+ mc.buf.timeout = mc.cfg.ReadTimeout
+ mc.writeTimeout = mc.cfg.WriteTimeout
+
+ // Reading Handshake Initialization Packet
+ cipher, err := mc.readInitPacket()
+ if err != nil {
+ mc.cleanup()
+ return nil, err
+ }
+
+ // Send Client Authentication Packet
+ if err = mc.writeAuthPacket(cipher); err != nil {
+ mc.cleanup()
+ return nil, err
+ }
+
+ // Handle response to auth packet, switch methods if possible
+ if err = handleAuthResult(mc, cipher); err != nil {
+ // Authentication failed and MySQL has already closed the connection
+ // (https://dev.mysql.com/doc/internals/en/authentication-fails.html).
+ // Do not send COM_QUIT, just cleanup and return the error.
+ mc.cleanup()
+ return nil, err
+ }
+
+ if mc.cfg.MaxAllowedPacket > 0 {
+ mc.maxAllowedPacket = mc.cfg.MaxAllowedPacket
+ } else {
+ // Get max allowed packet size
+ maxap, err := mc.getSystemVar("max_allowed_packet")
+ if err != nil {
+ mc.Close()
+ return nil, err
+ }
+ mc.maxAllowedPacket = stringToInt(maxap) - 1
+ }
+ if mc.maxAllowedPacket < maxPacketSize {
+ mc.maxWriteSize = mc.maxAllowedPacket
+ }
+
+ // Handle DSN Params
+ err = mc.handleParams()
+ if err != nil {
+ mc.Close()
+ return nil, err
+ }
+
+ return mc, nil
+}
+
+func handleAuthResult(mc *mysqlConn, cipher []byte) error {
+ // Read Result Packet
+ err := mc.readResultOK()
+ if err == nil {
+ return nil // auth successful
+ }
+
+ if mc.cfg == nil {
+ return err // auth failed and retry not possible
+ }
+
+ // Retry auth if configured to do so.
+ if mc.cfg.AllowOldPasswords && err == ErrOldPassword {
+ // Retry with old authentication method. Note: there are edge cases
+ // where this should work but doesn't; this is currently "wontfix":
+ // https://github.com/go-sql-driver/mysql/issues/184
+ if err = mc.writeOldAuthPacket(cipher); err != nil {
+ return err
+ }
+ err = mc.readResultOK()
+ } else if mc.cfg.AllowCleartextPasswords && err == ErrCleartextPassword {
+ // Retry with clear text password for
+ // http://dev.mysql.com/doc/refman/5.7/en/cleartext-authentication-plugin.html
+ // http://dev.mysql.com/doc/refman/5.7/en/pam-authentication-plugin.html
+ if err = mc.writeClearAuthPacket(); err != nil {
+ return err
+ }
+ err = mc.readResultOK()
+ }
+ return err
+}
+
+func init() {
+ sql.Register("mysql", &MySQLDriver{})
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/dsn.go b/vendor/github.com/go-sql-driver/mysql/dsn.go
new file mode 100644
index 0000000000..896be9ef53
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/dsn.go
@@ -0,0 +1,530 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2016 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+import (
+ "bytes"
+ "crypto/tls"
+ "errors"
+ "fmt"
+ "net"
+ "net/url"
+ "strconv"
+ "strings"
+ "time"
+)
+
+var (
+ errInvalidDSNUnescaped = errors.New("invalid DSN: did you forget to escape a param value?")
+ errInvalidDSNAddr = errors.New("invalid DSN: network address not terminated (missing closing brace)")
+ errInvalidDSNNoSlash = errors.New("invalid DSN: missing the slash separating the database name")
+ errInvalidDSNUnsafeCollation = errors.New("invalid DSN: interpolateParams can not be used with unsafe collations")
+)
+
+// Config is a configuration parsed from a DSN string
+type Config struct {
+ User string // Username
+ Passwd string // Password (requires User)
+ Net string // Network type
+ Addr string // Network address (requires Net)
+ DBName string // Database name
+ Params map[string]string // Connection parameters
+ Collation string // Connection collation
+ Loc *time.Location // Location for time.Time values
+ MaxAllowedPacket int // Max packet size allowed
+ TLSConfig string // TLS configuration name
+ tls *tls.Config // TLS configuration
+ Timeout time.Duration // Dial timeout
+ ReadTimeout time.Duration // I/O read timeout
+ WriteTimeout time.Duration // I/O write timeout
+
+ AllowAllFiles bool // Allow all files to be used with LOAD DATA LOCAL INFILE
+ AllowCleartextPasswords bool // Allows the cleartext client side plugin
+ AllowOldPasswords bool // Allows the old insecure password method
+ ClientFoundRows bool // Return number of matching rows instead of rows changed
+ ColumnsWithAlias bool // Prepend table alias to column names
+ InterpolateParams bool // Interpolate placeholders into query string
+ MultiStatements bool // Allow multiple statements in one query
+ ParseTime bool // Parse time values to time.Time
+ Strict bool // Return warnings as errors
+}
+
+// FormatDSN formats the given Config into a DSN string which can be passed to
+// the driver.
+func (cfg *Config) FormatDSN() string {
+ var buf bytes.Buffer
+
+ // [username[:password]@]
+ if len(cfg.User) > 0 {
+ buf.WriteString(cfg.User)
+ if len(cfg.Passwd) > 0 {
+ buf.WriteByte(':')
+ buf.WriteString(cfg.Passwd)
+ }
+ buf.WriteByte('@')
+ }
+
+ // [protocol[(address)]]
+ if len(cfg.Net) > 0 {
+ buf.WriteString(cfg.Net)
+ if len(cfg.Addr) > 0 {
+ buf.WriteByte('(')
+ buf.WriteString(cfg.Addr)
+ buf.WriteByte(')')
+ }
+ }
+
+ // /dbname
+ buf.WriteByte('/')
+ buf.WriteString(cfg.DBName)
+
+ // [?param1=value1&...¶mN=valueN]
+ hasParam := false
+
+ if cfg.AllowAllFiles {
+ hasParam = true
+ buf.WriteString("?allowAllFiles=true")
+ }
+
+ if cfg.AllowCleartextPasswords {
+ if hasParam {
+ buf.WriteString("&allowCleartextPasswords=true")
+ } else {
+ hasParam = true
+ buf.WriteString("?allowCleartextPasswords=true")
+ }
+ }
+
+ if cfg.AllowOldPasswords {
+ if hasParam {
+ buf.WriteString("&allowOldPasswords=true")
+ } else {
+ hasParam = true
+ buf.WriteString("?allowOldPasswords=true")
+ }
+ }
+
+ if cfg.ClientFoundRows {
+ if hasParam {
+ buf.WriteString("&clientFoundRows=true")
+ } else {
+ hasParam = true
+ buf.WriteString("?clientFoundRows=true")
+ }
+ }
+
+ if col := cfg.Collation; col != defaultCollation && len(col) > 0 {
+ if hasParam {
+ buf.WriteString("&collation=")
+ } else {
+ hasParam = true
+ buf.WriteString("?collation=")
+ }
+ buf.WriteString(col)
+ }
+
+ if cfg.ColumnsWithAlias {
+ if hasParam {
+ buf.WriteString("&columnsWithAlias=true")
+ } else {
+ hasParam = true
+ buf.WriteString("?columnsWithAlias=true")
+ }
+ }
+
+ if cfg.InterpolateParams {
+ if hasParam {
+ buf.WriteString("&interpolateParams=true")
+ } else {
+ hasParam = true
+ buf.WriteString("?interpolateParams=true")
+ }
+ }
+
+ if cfg.Loc != time.UTC && cfg.Loc != nil {
+ if hasParam {
+ buf.WriteString("&loc=")
+ } else {
+ hasParam = true
+ buf.WriteString("?loc=")
+ }
+ buf.WriteString(url.QueryEscape(cfg.Loc.String()))
+ }
+
+ if cfg.MultiStatements {
+ if hasParam {
+ buf.WriteString("&multiStatements=true")
+ } else {
+ hasParam = true
+ buf.WriteString("?multiStatements=true")
+ }
+ }
+
+ if cfg.ParseTime {
+ if hasParam {
+ buf.WriteString("&parseTime=true")
+ } else {
+ hasParam = true
+ buf.WriteString("?parseTime=true")
+ }
+ }
+
+ if cfg.ReadTimeout > 0 {
+ if hasParam {
+ buf.WriteString("&readTimeout=")
+ } else {
+ hasParam = true
+ buf.WriteString("?readTimeout=")
+ }
+ buf.WriteString(cfg.ReadTimeout.String())
+ }
+
+ if cfg.Strict {
+ if hasParam {
+ buf.WriteString("&strict=true")
+ } else {
+ hasParam = true
+ buf.WriteString("?strict=true")
+ }
+ }
+
+ if cfg.Timeout > 0 {
+ if hasParam {
+ buf.WriteString("&timeout=")
+ } else {
+ hasParam = true
+ buf.WriteString("?timeout=")
+ }
+ buf.WriteString(cfg.Timeout.String())
+ }
+
+ if len(cfg.TLSConfig) > 0 {
+ if hasParam {
+ buf.WriteString("&tls=")
+ } else {
+ hasParam = true
+ buf.WriteString("?tls=")
+ }
+ buf.WriteString(url.QueryEscape(cfg.TLSConfig))
+ }
+
+ if cfg.WriteTimeout > 0 {
+ if hasParam {
+ buf.WriteString("&writeTimeout=")
+ } else {
+ hasParam = true
+ buf.WriteString("?writeTimeout=")
+ }
+ buf.WriteString(cfg.WriteTimeout.String())
+ }
+
+ if cfg.MaxAllowedPacket > 0 {
+ if hasParam {
+ buf.WriteString("&maxAllowedPacket=")
+ } else {
+ hasParam = true
+ buf.WriteString("?maxAllowedPacket=")
+ }
+ buf.WriteString(strconv.Itoa(cfg.MaxAllowedPacket))
+
+ }
+
+ // other params
+ if cfg.Params != nil {
+ for param, value := range cfg.Params {
+ if hasParam {
+ buf.WriteByte('&')
+ } else {
+ hasParam = true
+ buf.WriteByte('?')
+ }
+
+ buf.WriteString(param)
+ buf.WriteByte('=')
+ buf.WriteString(url.QueryEscape(value))
+ }
+ }
+
+ return buf.String()
+}
+
+// ParseDSN parses the DSN string to a Config
+func ParseDSN(dsn string) (cfg *Config, err error) {
+ // New config with some default values
+ cfg = &Config{
+ Loc: time.UTC,
+ Collation: defaultCollation,
+ }
+
+ // [user[:password]@][net[(addr)]]/dbname[?param1=value1¶mN=valueN]
+ // Find the last '/' (since the password or the net addr might contain a '/')
+ foundSlash := false
+ for i := len(dsn) - 1; i >= 0; i-- {
+ if dsn[i] == '/' {
+ foundSlash = true
+ var j, k int
+
+ // left part is empty if i <= 0
+ if i > 0 {
+ // [username[:password]@][protocol[(address)]]
+ // Find the last '@' in dsn[:i]
+ for j = i; j >= 0; j-- {
+ if dsn[j] == '@' {
+ // username[:password]
+ // Find the first ':' in dsn[:j]
+ for k = 0; k < j; k++ {
+ if dsn[k] == ':' {
+ cfg.Passwd = dsn[k+1 : j]
+ break
+ }
+ }
+ cfg.User = dsn[:k]
+
+ break
+ }
+ }
+
+ // [protocol[(address)]]
+ // Find the first '(' in dsn[j+1:i]
+ for k = j + 1; k < i; k++ {
+ if dsn[k] == '(' {
+ // dsn[i-1] must be == ')' if an address is specified
+ if dsn[i-1] != ')' {
+ if strings.ContainsRune(dsn[k+1:i], ')') {
+ return nil, errInvalidDSNUnescaped
+ }
+ return nil, errInvalidDSNAddr
+ }
+ cfg.Addr = dsn[k+1 : i-1]
+ break
+ }
+ }
+ cfg.Net = dsn[j+1 : k]
+ }
+
+ // dbname[?param1=value1&...¶mN=valueN]
+ // Find the first '?' in dsn[i+1:]
+ for j = i + 1; j < len(dsn); j++ {
+ if dsn[j] == '?' {
+ if err = parseDSNParams(cfg, dsn[j+1:]); err != nil {
+ return
+ }
+ break
+ }
+ }
+ cfg.DBName = dsn[i+1 : j]
+
+ break
+ }
+ }
+
+ if !foundSlash && len(dsn) > 0 {
+ return nil, errInvalidDSNNoSlash
+ }
+
+ if cfg.InterpolateParams && unsafeCollations[cfg.Collation] {
+ return nil, errInvalidDSNUnsafeCollation
+ }
+
+ // Set default network if empty
+ if cfg.Net == "" {
+ cfg.Net = "tcp"
+ }
+
+ // Set default address if empty
+ if cfg.Addr == "" {
+ switch cfg.Net {
+ case "tcp":
+ cfg.Addr = "127.0.0.1:3306"
+ case "unix":
+ cfg.Addr = "/tmp/mysql.sock"
+ default:
+ return nil, errors.New("default addr for network '" + cfg.Net + "' unknown")
+ }
+
+ }
+
+ return
+}
+
+// parseDSNParams parses the DSN "query string"
+// Values must be url.QueryEscape'ed
+func parseDSNParams(cfg *Config, params string) (err error) {
+ for _, v := range strings.Split(params, "&") {
+ param := strings.SplitN(v, "=", 2)
+ if len(param) != 2 {
+ continue
+ }
+
+ // cfg params
+ switch value := param[1]; param[0] {
+
+ // Disable INFILE whitelist / enable all files
+ case "allowAllFiles":
+ var isBool bool
+ cfg.AllowAllFiles, isBool = readBool(value)
+ if !isBool {
+ return errors.New("invalid bool value: " + value)
+ }
+
+ // Use cleartext authentication mode (MySQL 5.5.10+)
+ case "allowCleartextPasswords":
+ var isBool bool
+ cfg.AllowCleartextPasswords, isBool = readBool(value)
+ if !isBool {
+ return errors.New("invalid bool value: " + value)
+ }
+
+ // Use old authentication mode (pre MySQL 4.1)
+ case "allowOldPasswords":
+ var isBool bool
+ cfg.AllowOldPasswords, isBool = readBool(value)
+ if !isBool {
+ return errors.New("invalid bool value: " + value)
+ }
+
+ // Switch "rowsAffected" mode
+ case "clientFoundRows":
+ var isBool bool
+ cfg.ClientFoundRows, isBool = readBool(value)
+ if !isBool {
+ return errors.New("invalid bool value: " + value)
+ }
+
+ // Collation
+ case "collation":
+ cfg.Collation = value
+ break
+
+ case "columnsWithAlias":
+ var isBool bool
+ cfg.ColumnsWithAlias, isBool = readBool(value)
+ if !isBool {
+ return errors.New("invalid bool value: " + value)
+ }
+
+ // Compression
+ case "compress":
+ return errors.New("compression not implemented yet")
+
+ // Enable client side placeholder substitution
+ case "interpolateParams":
+ var isBool bool
+ cfg.InterpolateParams, isBool = readBool(value)
+ if !isBool {
+ return errors.New("invalid bool value: " + value)
+ }
+
+ // Time Location
+ case "loc":
+ if value, err = url.QueryUnescape(value); err != nil {
+ return
+ }
+ cfg.Loc, err = time.LoadLocation(value)
+ if err != nil {
+ return
+ }
+
+ // multiple statements in one query
+ case "multiStatements":
+ var isBool bool
+ cfg.MultiStatements, isBool = readBool(value)
+ if !isBool {
+ return errors.New("invalid bool value: " + value)
+ }
+
+ // time.Time parsing
+ case "parseTime":
+ var isBool bool
+ cfg.ParseTime, isBool = readBool(value)
+ if !isBool {
+ return errors.New("invalid bool value: " + value)
+ }
+
+ // I/O read Timeout
+ case "readTimeout":
+ cfg.ReadTimeout, err = time.ParseDuration(value)
+ if err != nil {
+ return
+ }
+
+ // Strict mode
+ case "strict":
+ var isBool bool
+ cfg.Strict, isBool = readBool(value)
+ if !isBool {
+ return errors.New("invalid bool value: " + value)
+ }
+
+ // Dial Timeout
+ case "timeout":
+ cfg.Timeout, err = time.ParseDuration(value)
+ if err != nil {
+ return
+ }
+
+ // TLS-Encryption
+ case "tls":
+ boolValue, isBool := readBool(value)
+ if isBool {
+ if boolValue {
+ cfg.TLSConfig = "true"
+ cfg.tls = &tls.Config{}
+ } else {
+ cfg.TLSConfig = "false"
+ }
+ } else if vl := strings.ToLower(value); vl == "skip-verify" {
+ cfg.TLSConfig = vl
+ cfg.tls = &tls.Config{InsecureSkipVerify: true}
+ } else {
+ name, err := url.QueryUnescape(value)
+ if err != nil {
+ return fmt.Errorf("invalid value for TLS config name: %v", err)
+ }
+
+ if tlsConfig, ok := tlsConfigRegister[name]; ok {
+ if len(tlsConfig.ServerName) == 0 && !tlsConfig.InsecureSkipVerify {
+ host, _, err := net.SplitHostPort(cfg.Addr)
+ if err == nil {
+ tlsConfig.ServerName = host
+ }
+ }
+
+ cfg.TLSConfig = name
+ cfg.tls = tlsConfig
+ } else {
+ return errors.New("invalid value / unknown config name: " + name)
+ }
+ }
+
+ // I/O write Timeout
+ case "writeTimeout":
+ cfg.WriteTimeout, err = time.ParseDuration(value)
+ if err != nil {
+ return
+ }
+ case "maxAllowedPacket":
+ cfg.MaxAllowedPacket, err = strconv.Atoi(value)
+ if err != nil {
+ return
+ }
+ default:
+ // lazy init
+ if cfg.Params == nil {
+ cfg.Params = make(map[string]string)
+ }
+
+ if cfg.Params[param[0]], err = url.QueryUnescape(value); err != nil {
+ return
+ }
+ }
+ }
+
+ return
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/errors.go b/vendor/github.com/go-sql-driver/mysql/errors.go
new file mode 100644
index 0000000000..1543a80546
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/errors.go
@@ -0,0 +1,131 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+import (
+ "database/sql/driver"
+ "errors"
+ "fmt"
+ "io"
+ "log"
+ "os"
+)
+
+// Various errors the driver might return. Can change between driver versions.
+var (
+ ErrInvalidConn = errors.New("invalid connection")
+ ErrMalformPkt = errors.New("malformed packet")
+ ErrNoTLS = errors.New("TLS requested but server does not support TLS")
+ ErrOldPassword = errors.New("this user requires old password authentication. If you still want to use it, please add 'allowOldPasswords=1' to your DSN. See also https://github.com/go-sql-driver/mysql/wiki/old_passwords")
+ ErrCleartextPassword = errors.New("this user requires clear text authentication. If you still want to use it, please add 'allowCleartextPasswords=1' to your DSN")
+ ErrUnknownPlugin = errors.New("this authentication plugin is not supported")
+ ErrOldProtocol = errors.New("MySQL server does not support required protocol 41+")
+ ErrPktSync = errors.New("commands out of sync. You can't run this command now")
+ ErrPktSyncMul = errors.New("commands out of sync. Did you run multiple statements at once?")
+ ErrPktTooLarge = errors.New("packet for query is too large. Try adjusting the 'max_allowed_packet' variable on the server")
+ ErrBusyBuffer = errors.New("busy buffer")
+)
+
+var errLog = Logger(log.New(os.Stderr, "[mysql] ", log.Ldate|log.Ltime|log.Lshortfile))
+
+// Logger is used to log critical error messages.
+type Logger interface {
+ Print(v ...interface{})
+}
+
+// SetLogger is used to set the logger for critical errors.
+// The initial logger is os.Stderr.
+func SetLogger(logger Logger) error {
+ if logger == nil {
+ return errors.New("logger is nil")
+ }
+ errLog = logger
+ return nil
+}
+
+// MySQLError is an error type which represents a single MySQL error
+type MySQLError struct {
+ Number uint16
+ Message string
+}
+
+func (me *MySQLError) Error() string {
+ return fmt.Sprintf("Error %d: %s", me.Number, me.Message)
+}
+
+// MySQLWarnings is an error type which represents a group of one or more MySQL
+// warnings
+type MySQLWarnings []MySQLWarning
+
+func (mws MySQLWarnings) Error() string {
+ var msg string
+ for i, warning := range mws {
+ if i > 0 {
+ msg += "\r\n"
+ }
+ msg += fmt.Sprintf(
+ "%s %s: %s",
+ warning.Level,
+ warning.Code,
+ warning.Message,
+ )
+ }
+ return msg
+}
+
+// MySQLWarning is an error type which represents a single MySQL warning.
+// Warnings are returned in groups only. See MySQLWarnings
+type MySQLWarning struct {
+ Level string
+ Code string
+ Message string
+}
+
+func (mc *mysqlConn) getWarnings() (err error) {
+ rows, err := mc.Query("SHOW WARNINGS", nil)
+ if err != nil {
+ return
+ }
+
+ var warnings = MySQLWarnings{}
+ var values = make([]driver.Value, 3)
+
+ for {
+ err = rows.Next(values)
+ switch err {
+ case nil:
+ warning := MySQLWarning{}
+
+ if raw, ok := values[0].([]byte); ok {
+ warning.Level = string(raw)
+ } else {
+ warning.Level = fmt.Sprintf("%s", values[0])
+ }
+ if raw, ok := values[1].([]byte); ok {
+ warning.Code = string(raw)
+ } else {
+ warning.Code = fmt.Sprintf("%s", values[1])
+ }
+ if raw, ok := values[2].([]byte); ok {
+ warning.Message = string(raw)
+ } else {
+ warning.Message = fmt.Sprintf("%s", values[0])
+ }
+
+ warnings = append(warnings, warning)
+
+ case io.EOF:
+ return warnings
+
+ default:
+ rows.Close()
+ return
+ }
+ }
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/infile.go b/vendor/github.com/go-sql-driver/mysql/infile.go
new file mode 100644
index 0000000000..0f975bbc26
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/infile.go
@@ -0,0 +1,181 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+import (
+ "fmt"
+ "io"
+ "os"
+ "strings"
+ "sync"
+)
+
+var (
+ fileRegister map[string]bool
+ fileRegisterLock sync.RWMutex
+ readerRegister map[string]func() io.Reader
+ readerRegisterLock sync.RWMutex
+)
+
+// RegisterLocalFile adds the given file to the file whitelist,
+// so that it can be used by "LOAD DATA LOCAL INFILE ".
+// Alternatively you can allow the use of all local files with
+// the DSN parameter 'allowAllFiles=true'
+//
+// filePath := "/home/gopher/data.csv"
+// mysql.RegisterLocalFile(filePath)
+// err := db.Exec("LOAD DATA LOCAL INFILE '" + filePath + "' INTO TABLE foo")
+// if err != nil {
+// ...
+//
+func RegisterLocalFile(filePath string) {
+ fileRegisterLock.Lock()
+ // lazy map init
+ if fileRegister == nil {
+ fileRegister = make(map[string]bool)
+ }
+
+ fileRegister[strings.Trim(filePath, `"`)] = true
+ fileRegisterLock.Unlock()
+}
+
+// DeregisterLocalFile removes the given filepath from the whitelist.
+func DeregisterLocalFile(filePath string) {
+ fileRegisterLock.Lock()
+ delete(fileRegister, strings.Trim(filePath, `"`))
+ fileRegisterLock.Unlock()
+}
+
+// RegisterReaderHandler registers a handler function which is used
+// to receive a io.Reader.
+// The Reader can be used by "LOAD DATA LOCAL INFILE Reader::".
+// If the handler returns a io.ReadCloser Close() is called when the
+// request is finished.
+//
+// mysql.RegisterReaderHandler("data", func() io.Reader {
+// var csvReader io.Reader // Some Reader that returns CSV data
+// ... // Open Reader here
+// return csvReader
+// })
+// err := db.Exec("LOAD DATA LOCAL INFILE 'Reader::data' INTO TABLE foo")
+// if err != nil {
+// ...
+//
+func RegisterReaderHandler(name string, handler func() io.Reader) {
+ readerRegisterLock.Lock()
+ // lazy map init
+ if readerRegister == nil {
+ readerRegister = make(map[string]func() io.Reader)
+ }
+
+ readerRegister[name] = handler
+ readerRegisterLock.Unlock()
+}
+
+// DeregisterReaderHandler removes the ReaderHandler function with
+// the given name from the registry.
+func DeregisterReaderHandler(name string) {
+ readerRegisterLock.Lock()
+ delete(readerRegister, name)
+ readerRegisterLock.Unlock()
+}
+
+func deferredClose(err *error, closer io.Closer) {
+ closeErr := closer.Close()
+ if *err == nil {
+ *err = closeErr
+ }
+}
+
+func (mc *mysqlConn) handleInFileRequest(name string) (err error) {
+ var rdr io.Reader
+ var data []byte
+ packetSize := 16 * 1024 // 16KB is small enough for disk readahead and large enough for TCP
+ if mc.maxWriteSize < packetSize {
+ packetSize = mc.maxWriteSize
+ }
+
+ if idx := strings.Index(name, "Reader::"); idx == 0 || (idx > 0 && name[idx-1] == '/') { // io.Reader
+ // The server might return an an absolute path. See issue #355.
+ name = name[idx+8:]
+
+ readerRegisterLock.RLock()
+ handler, inMap := readerRegister[name]
+ readerRegisterLock.RUnlock()
+
+ if inMap {
+ rdr = handler()
+ if rdr != nil {
+ if cl, ok := rdr.(io.Closer); ok {
+ defer deferredClose(&err, cl)
+ }
+ } else {
+ err = fmt.Errorf("Reader '%s' is ", name)
+ }
+ } else {
+ err = fmt.Errorf("Reader '%s' is not registered", name)
+ }
+ } else { // File
+ name = strings.Trim(name, `"`)
+ fileRegisterLock.RLock()
+ fr := fileRegister[name]
+ fileRegisterLock.RUnlock()
+ if mc.cfg.AllowAllFiles || fr {
+ var file *os.File
+ var fi os.FileInfo
+
+ if file, err = os.Open(name); err == nil {
+ defer deferredClose(&err, file)
+
+ // get file size
+ if fi, err = file.Stat(); err == nil {
+ rdr = file
+ if fileSize := int(fi.Size()); fileSize < packetSize {
+ packetSize = fileSize
+ }
+ }
+ }
+ } else {
+ err = fmt.Errorf("local file '%s' is not registered", name)
+ }
+ }
+
+ // send content packets
+ if err == nil {
+ data := make([]byte, 4+packetSize)
+ var n int
+ for err == nil {
+ n, err = rdr.Read(data[4:])
+ if n > 0 {
+ if ioErr := mc.writePacket(data[:4+n]); ioErr != nil {
+ return ioErr
+ }
+ }
+ }
+ if err == io.EOF {
+ err = nil
+ }
+ }
+
+ // send empty packet (termination)
+ if data == nil {
+ data = make([]byte, 4)
+ }
+ if ioErr := mc.writePacket(data[:4]); ioErr != nil {
+ return ioErr
+ }
+
+ // read OK packet
+ if err == nil {
+ return mc.readResultOK()
+ }
+
+ mc.readPacket()
+ return err
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/packets.go b/vendor/github.com/go-sql-driver/mysql/packets.go
new file mode 100644
index 0000000000..f06752b02e
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/packets.go
@@ -0,0 +1,1246 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+import (
+ "bytes"
+ "crypto/tls"
+ "database/sql/driver"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "io"
+ "math"
+ "time"
+)
+
+// Packets documentation:
+// http://dev.mysql.com/doc/internals/en/client-server-protocol.html
+
+// Read packet to buffer 'data'
+func (mc *mysqlConn) readPacket() ([]byte, error) {
+ var payload []byte
+ for {
+ // Read packet header
+ data, err := mc.buf.readNext(4)
+ if err != nil {
+ errLog.Print(err)
+ mc.Close()
+ return nil, driver.ErrBadConn
+ }
+
+ // Packet Length [24 bit]
+ pktLen := int(uint32(data[0]) | uint32(data[1])<<8 | uint32(data[2])<<16)
+
+ if pktLen < 1 {
+ errLog.Print(ErrMalformPkt)
+ mc.Close()
+ return nil, driver.ErrBadConn
+ }
+
+ // Check Packet Sync [8 bit]
+ if data[3] != mc.sequence {
+ if data[3] > mc.sequence {
+ return nil, ErrPktSyncMul
+ }
+ return nil, ErrPktSync
+ }
+ mc.sequence++
+
+ // Read packet body [pktLen bytes]
+ data, err = mc.buf.readNext(pktLen)
+ if err != nil {
+ errLog.Print(err)
+ mc.Close()
+ return nil, driver.ErrBadConn
+ }
+
+ isLastPacket := (pktLen < maxPacketSize)
+
+ // Zero allocations for non-splitting packets
+ if isLastPacket && payload == nil {
+ return data, nil
+ }
+
+ payload = append(payload, data...)
+
+ if isLastPacket {
+ return payload, nil
+ }
+ }
+}
+
+// Write packet buffer 'data'
+func (mc *mysqlConn) writePacket(data []byte) error {
+ pktLen := len(data) - 4
+
+ if pktLen > mc.maxAllowedPacket {
+ return ErrPktTooLarge
+ }
+
+ for {
+ var size int
+ if pktLen >= maxPacketSize {
+ data[0] = 0xff
+ data[1] = 0xff
+ data[2] = 0xff
+ size = maxPacketSize
+ } else {
+ data[0] = byte(pktLen)
+ data[1] = byte(pktLen >> 8)
+ data[2] = byte(pktLen >> 16)
+ size = pktLen
+ }
+ data[3] = mc.sequence
+
+ // Write packet
+ if mc.writeTimeout > 0 {
+ if err := mc.netConn.SetWriteDeadline(time.Now().Add(mc.writeTimeout)); err != nil {
+ return err
+ }
+ }
+
+ n, err := mc.netConn.Write(data[:4+size])
+ if err == nil && n == 4+size {
+ mc.sequence++
+ if size != maxPacketSize {
+ return nil
+ }
+ pktLen -= size
+ data = data[size:]
+ continue
+ }
+
+ // Handle error
+ if err == nil { // n != len(data)
+ errLog.Print(ErrMalformPkt)
+ } else {
+ errLog.Print(err)
+ }
+ return driver.ErrBadConn
+ }
+}
+
+/******************************************************************************
+* Initialisation Process *
+******************************************************************************/
+
+// Handshake Initialization Packet
+// http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::Handshake
+func (mc *mysqlConn) readInitPacket() ([]byte, error) {
+ data, err := mc.readPacket()
+ if err != nil {
+ return nil, err
+ }
+
+ if data[0] == iERR {
+ return nil, mc.handleErrorPacket(data)
+ }
+
+ // protocol version [1 byte]
+ if data[0] < minProtocolVersion {
+ return nil, fmt.Errorf(
+ "unsupported protocol version %d. Version %d or higher is required",
+ data[0],
+ minProtocolVersion,
+ )
+ }
+
+ // server version [null terminated string]
+ // connection id [4 bytes]
+ pos := 1 + bytes.IndexByte(data[1:], 0x00) + 1 + 4
+
+ // first part of the password cipher [8 bytes]
+ cipher := data[pos : pos+8]
+
+ // (filler) always 0x00 [1 byte]
+ pos += 8 + 1
+
+ // capability flags (lower 2 bytes) [2 bytes]
+ mc.flags = clientFlag(binary.LittleEndian.Uint16(data[pos : pos+2]))
+ if mc.flags&clientProtocol41 == 0 {
+ return nil, ErrOldProtocol
+ }
+ if mc.flags&clientSSL == 0 && mc.cfg.tls != nil {
+ return nil, ErrNoTLS
+ }
+ pos += 2
+
+ if len(data) > pos {
+ // character set [1 byte]
+ // status flags [2 bytes]
+ // capability flags (upper 2 bytes) [2 bytes]
+ // length of auth-plugin-data [1 byte]
+ // reserved (all [00]) [10 bytes]
+ pos += 1 + 2 + 2 + 1 + 10
+
+ // second part of the password cipher [mininum 13 bytes],
+ // where len=MAX(13, length of auth-plugin-data - 8)
+ //
+ // The web documentation is ambiguous about the length. However,
+ // according to mysql-5.7/sql/auth/sql_authentication.cc line 538,
+ // the 13th byte is "\0 byte, terminating the second part of
+ // a scramble". So the second part of the password cipher is
+ // a NULL terminated string that's at least 13 bytes with the
+ // last byte being NULL.
+ //
+ // The official Python library uses the fixed length 12
+ // which seems to work but technically could have a hidden bug.
+ cipher = append(cipher, data[pos:pos+12]...)
+
+ // TODO: Verify string termination
+ // EOF if version (>= 5.5.7 and < 5.5.10) or (>= 5.6.0 and < 5.6.2)
+ // \NUL otherwise
+ //
+ //if data[len(data)-1] == 0 {
+ // return
+ //}
+ //return ErrMalformPkt
+
+ // make a memory safe copy of the cipher slice
+ var b [20]byte
+ copy(b[:], cipher)
+ return b[:], nil
+ }
+
+ // make a memory safe copy of the cipher slice
+ var b [8]byte
+ copy(b[:], cipher)
+ return b[:], nil
+}
+
+// Client Authentication Packet
+// http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::HandshakeResponse
+func (mc *mysqlConn) writeAuthPacket(cipher []byte) error {
+ // Adjust client flags based on server support
+ clientFlags := clientProtocol41 |
+ clientSecureConn |
+ clientLongPassword |
+ clientTransactions |
+ clientLocalFiles |
+ clientPluginAuth |
+ clientMultiResults |
+ mc.flags&clientLongFlag
+
+ if mc.cfg.ClientFoundRows {
+ clientFlags |= clientFoundRows
+ }
+
+ // To enable TLS / SSL
+ if mc.cfg.tls != nil {
+ clientFlags |= clientSSL
+ }
+
+ if mc.cfg.MultiStatements {
+ clientFlags |= clientMultiStatements
+ }
+
+ // User Password
+ scrambleBuff := scramblePassword(cipher, []byte(mc.cfg.Passwd))
+
+ pktLen := 4 + 4 + 1 + 23 + len(mc.cfg.User) + 1 + 1 + len(scrambleBuff) + 21 + 1
+
+ // To specify a db name
+ if n := len(mc.cfg.DBName); n > 0 {
+ clientFlags |= clientConnectWithDB
+ pktLen += n + 1
+ }
+
+ // Calculate packet length and get buffer with that size
+ data := mc.buf.takeSmallBuffer(pktLen + 4)
+ if data == nil {
+ // can not take the buffer. Something must be wrong with the connection
+ errLog.Print(ErrBusyBuffer)
+ return driver.ErrBadConn
+ }
+
+ // ClientFlags [32 bit]
+ data[4] = byte(clientFlags)
+ data[5] = byte(clientFlags >> 8)
+ data[6] = byte(clientFlags >> 16)
+ data[7] = byte(clientFlags >> 24)
+
+ // MaxPacketSize [32 bit] (none)
+ data[8] = 0x00
+ data[9] = 0x00
+ data[10] = 0x00
+ data[11] = 0x00
+
+ // Charset [1 byte]
+ var found bool
+ data[12], found = collations[mc.cfg.Collation]
+ if !found {
+ // Note possibility for false negatives:
+ // could be triggered although the collation is valid if the
+ // collations map does not contain entries the server supports.
+ return errors.New("unknown collation")
+ }
+
+ // SSL Connection Request Packet
+ // http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::SSLRequest
+ if mc.cfg.tls != nil {
+ // Send TLS / SSL request packet
+ if err := mc.writePacket(data[:(4+4+1+23)+4]); err != nil {
+ return err
+ }
+
+ // Switch to TLS
+ tlsConn := tls.Client(mc.netConn, mc.cfg.tls)
+ if err := tlsConn.Handshake(); err != nil {
+ return err
+ }
+ mc.netConn = tlsConn
+ mc.buf.nc = tlsConn
+ }
+
+ // Filler [23 bytes] (all 0x00)
+ pos := 13
+ for ; pos < 13+23; pos++ {
+ data[pos] = 0
+ }
+
+ // User [null terminated string]
+ if len(mc.cfg.User) > 0 {
+ pos += copy(data[pos:], mc.cfg.User)
+ }
+ data[pos] = 0x00
+ pos++
+
+ // ScrambleBuffer [length encoded integer]
+ data[pos] = byte(len(scrambleBuff))
+ pos += 1 + copy(data[pos+1:], scrambleBuff)
+
+ // Databasename [null terminated string]
+ if len(mc.cfg.DBName) > 0 {
+ pos += copy(data[pos:], mc.cfg.DBName)
+ data[pos] = 0x00
+ pos++
+ }
+
+ // Assume native client during response
+ pos += copy(data[pos:], "mysql_native_password")
+ data[pos] = 0x00
+
+ // Send Auth packet
+ return mc.writePacket(data)
+}
+
+// Client old authentication packet
+// http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::AuthSwitchResponse
+func (mc *mysqlConn) writeOldAuthPacket(cipher []byte) error {
+ // User password
+ scrambleBuff := scrambleOldPassword(cipher, []byte(mc.cfg.Passwd))
+
+ // Calculate the packet length and add a tailing 0
+ pktLen := len(scrambleBuff) + 1
+ data := mc.buf.takeSmallBuffer(4 + pktLen)
+ if data == nil {
+ // can not take the buffer. Something must be wrong with the connection
+ errLog.Print(ErrBusyBuffer)
+ return driver.ErrBadConn
+ }
+
+ // Add the scrambled password [null terminated string]
+ copy(data[4:], scrambleBuff)
+ data[4+pktLen-1] = 0x00
+
+ return mc.writePacket(data)
+}
+
+// Client clear text authentication packet
+// http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::AuthSwitchResponse
+func (mc *mysqlConn) writeClearAuthPacket() error {
+ // Calculate the packet length and add a tailing 0
+ pktLen := len(mc.cfg.Passwd) + 1
+ data := mc.buf.takeSmallBuffer(4 + pktLen)
+ if data == nil {
+ // can not take the buffer. Something must be wrong with the connection
+ errLog.Print(ErrBusyBuffer)
+ return driver.ErrBadConn
+ }
+
+ // Add the clear password [null terminated string]
+ copy(data[4:], mc.cfg.Passwd)
+ data[4+pktLen-1] = 0x00
+
+ return mc.writePacket(data)
+}
+
+/******************************************************************************
+* Command Packets *
+******************************************************************************/
+
+func (mc *mysqlConn) writeCommandPacket(command byte) error {
+ // Reset Packet Sequence
+ mc.sequence = 0
+
+ data := mc.buf.takeSmallBuffer(4 + 1)
+ if data == nil {
+ // can not take the buffer. Something must be wrong with the connection
+ errLog.Print(ErrBusyBuffer)
+ return driver.ErrBadConn
+ }
+
+ // Add command byte
+ data[4] = command
+
+ // Send CMD packet
+ return mc.writePacket(data)
+}
+
+func (mc *mysqlConn) writeCommandPacketStr(command byte, arg string) error {
+ // Reset Packet Sequence
+ mc.sequence = 0
+
+ pktLen := 1 + len(arg)
+ data := mc.buf.takeBuffer(pktLen + 4)
+ if data == nil {
+ // can not take the buffer. Something must be wrong with the connection
+ errLog.Print(ErrBusyBuffer)
+ return driver.ErrBadConn
+ }
+
+ // Add command byte
+ data[4] = command
+
+ // Add arg
+ copy(data[5:], arg)
+
+ // Send CMD packet
+ return mc.writePacket(data)
+}
+
+func (mc *mysqlConn) writeCommandPacketUint32(command byte, arg uint32) error {
+ // Reset Packet Sequence
+ mc.sequence = 0
+
+ data := mc.buf.takeSmallBuffer(4 + 1 + 4)
+ if data == nil {
+ // can not take the buffer. Something must be wrong with the connection
+ errLog.Print(ErrBusyBuffer)
+ return driver.ErrBadConn
+ }
+
+ // Add command byte
+ data[4] = command
+
+ // Add arg [32 bit]
+ data[5] = byte(arg)
+ data[6] = byte(arg >> 8)
+ data[7] = byte(arg >> 16)
+ data[8] = byte(arg >> 24)
+
+ // Send CMD packet
+ return mc.writePacket(data)
+}
+
+/******************************************************************************
+* Result Packets *
+******************************************************************************/
+
+// Returns error if Packet is not an 'Result OK'-Packet
+func (mc *mysqlConn) readResultOK() error {
+ data, err := mc.readPacket()
+ if err == nil {
+ // packet indicator
+ switch data[0] {
+
+ case iOK:
+ return mc.handleOkPacket(data)
+
+ case iEOF:
+ if len(data) > 1 {
+ plugin := string(data[1:bytes.IndexByte(data, 0x00)])
+ if plugin == "mysql_old_password" {
+ // using old_passwords
+ return ErrOldPassword
+ } else if plugin == "mysql_clear_password" {
+ // using clear text password
+ return ErrCleartextPassword
+ } else {
+ return ErrUnknownPlugin
+ }
+ } else {
+ return ErrOldPassword
+ }
+
+ default: // Error otherwise
+ return mc.handleErrorPacket(data)
+ }
+ }
+ return err
+}
+
+// Result Set Header Packet
+// http://dev.mysql.com/doc/internals/en/com-query-response.html#packet-ProtocolText::Resultset
+func (mc *mysqlConn) readResultSetHeaderPacket() (int, error) {
+ data, err := mc.readPacket()
+ if err == nil {
+ switch data[0] {
+
+ case iOK:
+ return 0, mc.handleOkPacket(data)
+
+ case iERR:
+ return 0, mc.handleErrorPacket(data)
+
+ case iLocalInFile:
+ return 0, mc.handleInFileRequest(string(data[1:]))
+ }
+
+ // column count
+ num, _, n := readLengthEncodedInteger(data)
+ if n-len(data) == 0 {
+ return int(num), nil
+ }
+
+ return 0, ErrMalformPkt
+ }
+ return 0, err
+}
+
+// Error Packet
+// http://dev.mysql.com/doc/internals/en/generic-response-packets.html#packet-ERR_Packet
+func (mc *mysqlConn) handleErrorPacket(data []byte) error {
+ if data[0] != iERR {
+ return ErrMalformPkt
+ }
+
+ // 0xff [1 byte]
+
+ // Error Number [16 bit uint]
+ errno := binary.LittleEndian.Uint16(data[1:3])
+
+ pos := 3
+
+ // SQL State [optional: # + 5bytes string]
+ if data[3] == 0x23 {
+ //sqlstate := string(data[4 : 4+5])
+ pos = 9
+ }
+
+ // Error Message [string]
+ return &MySQLError{
+ Number: errno,
+ Message: string(data[pos:]),
+ }
+}
+
+func readStatus(b []byte) statusFlag {
+ return statusFlag(b[0]) | statusFlag(b[1])<<8
+}
+
+// Ok Packet
+// http://dev.mysql.com/doc/internals/en/generic-response-packets.html#packet-OK_Packet
+func (mc *mysqlConn) handleOkPacket(data []byte) error {
+ var n, m int
+
+ // 0x00 [1 byte]
+
+ // Affected rows [Length Coded Binary]
+ mc.affectedRows, _, n = readLengthEncodedInteger(data[1:])
+
+ // Insert id [Length Coded Binary]
+ mc.insertId, _, m = readLengthEncodedInteger(data[1+n:])
+
+ // server_status [2 bytes]
+ mc.status = readStatus(data[1+n+m : 1+n+m+2])
+ if err := mc.discardResults(); err != nil {
+ return err
+ }
+
+ // warning count [2 bytes]
+ if !mc.strict {
+ return nil
+ }
+
+ pos := 1 + n + m + 2
+ if binary.LittleEndian.Uint16(data[pos:pos+2]) > 0 {
+ return mc.getWarnings()
+ }
+ return nil
+}
+
+// Read Packets as Field Packets until EOF-Packet or an Error appears
+// http://dev.mysql.com/doc/internals/en/com-query-response.html#packet-Protocol::ColumnDefinition41
+func (mc *mysqlConn) readColumns(count int) ([]mysqlField, error) {
+ columns := make([]mysqlField, count)
+
+ for i := 0; ; i++ {
+ data, err := mc.readPacket()
+ if err != nil {
+ return nil, err
+ }
+
+ // EOF Packet
+ if data[0] == iEOF && (len(data) == 5 || len(data) == 1) {
+ if i == count {
+ return columns, nil
+ }
+ return nil, fmt.Errorf("column count mismatch n:%d len:%d", count, len(columns))
+ }
+
+ // Catalog
+ pos, err := skipLengthEncodedString(data)
+ if err != nil {
+ return nil, err
+ }
+
+ // Database [len coded string]
+ n, err := skipLengthEncodedString(data[pos:])
+ if err != nil {
+ return nil, err
+ }
+ pos += n
+
+ // Table [len coded string]
+ if mc.cfg.ColumnsWithAlias {
+ tableName, _, n, err := readLengthEncodedString(data[pos:])
+ if err != nil {
+ return nil, err
+ }
+ pos += n
+ columns[i].tableName = string(tableName)
+ } else {
+ n, err = skipLengthEncodedString(data[pos:])
+ if err != nil {
+ return nil, err
+ }
+ pos += n
+ }
+
+ // Original table [len coded string]
+ n, err = skipLengthEncodedString(data[pos:])
+ if err != nil {
+ return nil, err
+ }
+ pos += n
+
+ // Name [len coded string]
+ name, _, n, err := readLengthEncodedString(data[pos:])
+ if err != nil {
+ return nil, err
+ }
+ columns[i].name = string(name)
+ pos += n
+
+ // Original name [len coded string]
+ n, err = skipLengthEncodedString(data[pos:])
+ if err != nil {
+ return nil, err
+ }
+
+ // Filler [uint8]
+ // Charset [charset, collation uint8]
+ // Length [uint32]
+ pos += n + 1 + 2 + 4
+
+ // Field type [uint8]
+ columns[i].fieldType = data[pos]
+ pos++
+
+ // Flags [uint16]
+ columns[i].flags = fieldFlag(binary.LittleEndian.Uint16(data[pos : pos+2]))
+ pos += 2
+
+ // Decimals [uint8]
+ columns[i].decimals = data[pos]
+ //pos++
+
+ // Default value [len coded binary]
+ //if pos < len(data) {
+ // defaultVal, _, err = bytesToLengthCodedBinary(data[pos:])
+ //}
+ }
+}
+
+// Read Packets as Field Packets until EOF-Packet or an Error appears
+// http://dev.mysql.com/doc/internals/en/com-query-response.html#packet-ProtocolText::ResultsetRow
+func (rows *textRows) readRow(dest []driver.Value) error {
+ mc := rows.mc
+
+ data, err := mc.readPacket()
+ if err != nil {
+ return err
+ }
+
+ // EOF Packet
+ if data[0] == iEOF && len(data) == 5 {
+ // server_status [2 bytes]
+ rows.mc.status = readStatus(data[3:])
+ if err := rows.mc.discardResults(); err != nil {
+ return err
+ }
+ rows.mc = nil
+ return io.EOF
+ }
+ if data[0] == iERR {
+ rows.mc = nil
+ return mc.handleErrorPacket(data)
+ }
+
+ // RowSet Packet
+ var n int
+ var isNull bool
+ pos := 0
+
+ for i := range dest {
+ // Read bytes and convert to string
+ dest[i], isNull, n, err = readLengthEncodedString(data[pos:])
+ pos += n
+ if err == nil {
+ if !isNull {
+ if !mc.parseTime {
+ continue
+ } else {
+ switch rows.columns[i].fieldType {
+ case fieldTypeTimestamp, fieldTypeDateTime,
+ fieldTypeDate, fieldTypeNewDate:
+ dest[i], err = parseDateTime(
+ string(dest[i].([]byte)),
+ mc.cfg.Loc,
+ )
+ if err == nil {
+ continue
+ }
+ default:
+ continue
+ }
+ }
+
+ } else {
+ dest[i] = nil
+ continue
+ }
+ }
+ return err // err != nil
+ }
+
+ return nil
+}
+
+// Reads Packets until EOF-Packet or an Error appears. Returns count of Packets read
+func (mc *mysqlConn) readUntilEOF() error {
+ for {
+ data, err := mc.readPacket()
+ if err != nil {
+ return err
+ }
+
+ switch data[0] {
+ case iERR:
+ return mc.handleErrorPacket(data)
+ case iEOF:
+ if len(data) == 5 {
+ mc.status = readStatus(data[3:])
+ }
+ return nil
+ }
+ }
+}
+
+/******************************************************************************
+* Prepared Statements *
+******************************************************************************/
+
+// Prepare Result Packets
+// http://dev.mysql.com/doc/internals/en/com-stmt-prepare-response.html
+func (stmt *mysqlStmt) readPrepareResultPacket() (uint16, error) {
+ data, err := stmt.mc.readPacket()
+ if err == nil {
+ // packet indicator [1 byte]
+ if data[0] != iOK {
+ return 0, stmt.mc.handleErrorPacket(data)
+ }
+
+ // statement id [4 bytes]
+ stmt.id = binary.LittleEndian.Uint32(data[1:5])
+
+ // Column count [16 bit uint]
+ columnCount := binary.LittleEndian.Uint16(data[5:7])
+
+ // Param count [16 bit uint]
+ stmt.paramCount = int(binary.LittleEndian.Uint16(data[7:9]))
+
+ // Reserved [8 bit]
+
+ // Warning count [16 bit uint]
+ if !stmt.mc.strict {
+ return columnCount, nil
+ }
+
+ // Check for warnings count > 0, only available in MySQL > 4.1
+ if len(data) >= 12 && binary.LittleEndian.Uint16(data[10:12]) > 0 {
+ return columnCount, stmt.mc.getWarnings()
+ }
+ return columnCount, nil
+ }
+ return 0, err
+}
+
+// http://dev.mysql.com/doc/internals/en/com-stmt-send-long-data.html
+func (stmt *mysqlStmt) writeCommandLongData(paramID int, arg []byte) error {
+ maxLen := stmt.mc.maxAllowedPacket - 1
+ pktLen := maxLen
+
+ // After the header (bytes 0-3) follows before the data:
+ // 1 byte command
+ // 4 bytes stmtID
+ // 2 bytes paramID
+ const dataOffset = 1 + 4 + 2
+
+ // Can not use the write buffer since
+ // a) the buffer is too small
+ // b) it is in use
+ data := make([]byte, 4+1+4+2+len(arg))
+
+ copy(data[4+dataOffset:], arg)
+
+ for argLen := len(arg); argLen > 0; argLen -= pktLen - dataOffset {
+ if dataOffset+argLen < maxLen {
+ pktLen = dataOffset + argLen
+ }
+
+ stmt.mc.sequence = 0
+ // Add command byte [1 byte]
+ data[4] = comStmtSendLongData
+
+ // Add stmtID [32 bit]
+ data[5] = byte(stmt.id)
+ data[6] = byte(stmt.id >> 8)
+ data[7] = byte(stmt.id >> 16)
+ data[8] = byte(stmt.id >> 24)
+
+ // Add paramID [16 bit]
+ data[9] = byte(paramID)
+ data[10] = byte(paramID >> 8)
+
+ // Send CMD packet
+ err := stmt.mc.writePacket(data[:4+pktLen])
+ if err == nil {
+ data = data[pktLen-dataOffset:]
+ continue
+ }
+ return err
+
+ }
+
+ // Reset Packet Sequence
+ stmt.mc.sequence = 0
+ return nil
+}
+
+// Execute Prepared Statement
+// http://dev.mysql.com/doc/internals/en/com-stmt-execute.html
+func (stmt *mysqlStmt) writeExecutePacket(args []driver.Value) error {
+ if len(args) != stmt.paramCount {
+ return fmt.Errorf(
+ "argument count mismatch (got: %d; has: %d)",
+ len(args),
+ stmt.paramCount,
+ )
+ }
+
+ const minPktLen = 4 + 1 + 4 + 1 + 4
+ mc := stmt.mc
+
+ // Reset packet-sequence
+ mc.sequence = 0
+
+ var data []byte
+
+ if len(args) == 0 {
+ data = mc.buf.takeBuffer(minPktLen)
+ } else {
+ data = mc.buf.takeCompleteBuffer()
+ }
+ if data == nil {
+ // can not take the buffer. Something must be wrong with the connection
+ errLog.Print(ErrBusyBuffer)
+ return driver.ErrBadConn
+ }
+
+ // command [1 byte]
+ data[4] = comStmtExecute
+
+ // statement_id [4 bytes]
+ data[5] = byte(stmt.id)
+ data[6] = byte(stmt.id >> 8)
+ data[7] = byte(stmt.id >> 16)
+ data[8] = byte(stmt.id >> 24)
+
+ // flags (0: CURSOR_TYPE_NO_CURSOR) [1 byte]
+ data[9] = 0x00
+
+ // iteration_count (uint32(1)) [4 bytes]
+ data[10] = 0x01
+ data[11] = 0x00
+ data[12] = 0x00
+ data[13] = 0x00
+
+ if len(args) > 0 {
+ pos := minPktLen
+
+ var nullMask []byte
+ if maskLen, typesLen := (len(args)+7)/8, 1+2*len(args); pos+maskLen+typesLen >= len(data) {
+ // buffer has to be extended but we don't know by how much so
+ // we depend on append after all data with known sizes fit.
+ // We stop at that because we deal with a lot of columns here
+ // which makes the required allocation size hard to guess.
+ tmp := make([]byte, pos+maskLen+typesLen)
+ copy(tmp[:pos], data[:pos])
+ data = tmp
+ nullMask = data[pos : pos+maskLen]
+ pos += maskLen
+ } else {
+ nullMask = data[pos : pos+maskLen]
+ for i := 0; i < maskLen; i++ {
+ nullMask[i] = 0
+ }
+ pos += maskLen
+ }
+
+ // newParameterBoundFlag 1 [1 byte]
+ data[pos] = 0x01
+ pos++
+
+ // type of each parameter [len(args)*2 bytes]
+ paramTypes := data[pos:]
+ pos += len(args) * 2
+
+ // value of each parameter [n bytes]
+ paramValues := data[pos:pos]
+ valuesCap := cap(paramValues)
+
+ for i, arg := range args {
+ // build NULL-bitmap
+ if arg == nil {
+ nullMask[i/8] |= 1 << (uint(i) & 7)
+ paramTypes[i+i] = fieldTypeNULL
+ paramTypes[i+i+1] = 0x00
+ continue
+ }
+
+ // cache types and values
+ switch v := arg.(type) {
+ case int64:
+ paramTypes[i+i] = fieldTypeLongLong
+ paramTypes[i+i+1] = 0x00
+
+ if cap(paramValues)-len(paramValues)-8 >= 0 {
+ paramValues = paramValues[:len(paramValues)+8]
+ binary.LittleEndian.PutUint64(
+ paramValues[len(paramValues)-8:],
+ uint64(v),
+ )
+ } else {
+ paramValues = append(paramValues,
+ uint64ToBytes(uint64(v))...,
+ )
+ }
+
+ case float64:
+ paramTypes[i+i] = fieldTypeDouble
+ paramTypes[i+i+1] = 0x00
+
+ if cap(paramValues)-len(paramValues)-8 >= 0 {
+ paramValues = paramValues[:len(paramValues)+8]
+ binary.LittleEndian.PutUint64(
+ paramValues[len(paramValues)-8:],
+ math.Float64bits(v),
+ )
+ } else {
+ paramValues = append(paramValues,
+ uint64ToBytes(math.Float64bits(v))...,
+ )
+ }
+
+ case bool:
+ paramTypes[i+i] = fieldTypeTiny
+ paramTypes[i+i+1] = 0x00
+
+ if v {
+ paramValues = append(paramValues, 0x01)
+ } else {
+ paramValues = append(paramValues, 0x00)
+ }
+
+ case []byte:
+ // Common case (non-nil value) first
+ if v != nil {
+ paramTypes[i+i] = fieldTypeString
+ paramTypes[i+i+1] = 0x00
+
+ if len(v) < mc.maxAllowedPacket-pos-len(paramValues)-(len(args)-(i+1))*64 {
+ paramValues = appendLengthEncodedInteger(paramValues,
+ uint64(len(v)),
+ )
+ paramValues = append(paramValues, v...)
+ } else {
+ if err := stmt.writeCommandLongData(i, v); err != nil {
+ return err
+ }
+ }
+ continue
+ }
+
+ // Handle []byte(nil) as a NULL value
+ nullMask[i/8] |= 1 << (uint(i) & 7)
+ paramTypes[i+i] = fieldTypeNULL
+ paramTypes[i+i+1] = 0x00
+
+ case string:
+ paramTypes[i+i] = fieldTypeString
+ paramTypes[i+i+1] = 0x00
+
+ if len(v) < mc.maxAllowedPacket-pos-len(paramValues)-(len(args)-(i+1))*64 {
+ paramValues = appendLengthEncodedInteger(paramValues,
+ uint64(len(v)),
+ )
+ paramValues = append(paramValues, v...)
+ } else {
+ if err := stmt.writeCommandLongData(i, []byte(v)); err != nil {
+ return err
+ }
+ }
+
+ case time.Time:
+ paramTypes[i+i] = fieldTypeString
+ paramTypes[i+i+1] = 0x00
+
+ var val []byte
+ if v.IsZero() {
+ val = []byte("0000-00-00")
+ } else {
+ val = []byte(v.In(mc.cfg.Loc).Format(timeFormat))
+ }
+
+ paramValues = appendLengthEncodedInteger(paramValues,
+ uint64(len(val)),
+ )
+ paramValues = append(paramValues, val...)
+
+ default:
+ return fmt.Errorf("can not convert type: %T", arg)
+ }
+ }
+
+ // Check if param values exceeded the available buffer
+ // In that case we must build the data packet with the new values buffer
+ if valuesCap != cap(paramValues) {
+ data = append(data[:pos], paramValues...)
+ mc.buf.buf = data
+ }
+
+ pos += len(paramValues)
+ data = data[:pos]
+ }
+
+ return mc.writePacket(data)
+}
+
+func (mc *mysqlConn) discardResults() error {
+ for mc.status&statusMoreResultsExists != 0 {
+ resLen, err := mc.readResultSetHeaderPacket()
+ if err != nil {
+ return err
+ }
+ if resLen > 0 {
+ // columns
+ if err := mc.readUntilEOF(); err != nil {
+ return err
+ }
+ // rows
+ if err := mc.readUntilEOF(); err != nil {
+ return err
+ }
+ } else {
+ mc.status &^= statusMoreResultsExists
+ }
+ }
+ return nil
+}
+
+// http://dev.mysql.com/doc/internals/en/binary-protocol-resultset-row.html
+func (rows *binaryRows) readRow(dest []driver.Value) error {
+ data, err := rows.mc.readPacket()
+ if err != nil {
+ return err
+ }
+
+ // packet indicator [1 byte]
+ if data[0] != iOK {
+ // EOF Packet
+ if data[0] == iEOF && len(data) == 5 {
+ rows.mc.status = readStatus(data[3:])
+ if err := rows.mc.discardResults(); err != nil {
+ return err
+ }
+ rows.mc = nil
+ return io.EOF
+ }
+ rows.mc = nil
+
+ // Error otherwise
+ return rows.mc.handleErrorPacket(data)
+ }
+
+ // NULL-bitmap, [(column-count + 7 + 2) / 8 bytes]
+ pos := 1 + (len(dest)+7+2)>>3
+ nullMask := data[1:pos]
+
+ for i := range dest {
+ // Field is NULL
+ // (byte >> bit-pos) % 2 == 1
+ if ((nullMask[(i+2)>>3] >> uint((i+2)&7)) & 1) == 1 {
+ dest[i] = nil
+ continue
+ }
+
+ // Convert to byte-coded string
+ switch rows.columns[i].fieldType {
+ case fieldTypeNULL:
+ dest[i] = nil
+ continue
+
+ // Numeric Types
+ case fieldTypeTiny:
+ if rows.columns[i].flags&flagUnsigned != 0 {
+ dest[i] = int64(data[pos])
+ } else {
+ dest[i] = int64(int8(data[pos]))
+ }
+ pos++
+ continue
+
+ case fieldTypeShort, fieldTypeYear:
+ if rows.columns[i].flags&flagUnsigned != 0 {
+ dest[i] = int64(binary.LittleEndian.Uint16(data[pos : pos+2]))
+ } else {
+ dest[i] = int64(int16(binary.LittleEndian.Uint16(data[pos : pos+2])))
+ }
+ pos += 2
+ continue
+
+ case fieldTypeInt24, fieldTypeLong:
+ if rows.columns[i].flags&flagUnsigned != 0 {
+ dest[i] = int64(binary.LittleEndian.Uint32(data[pos : pos+4]))
+ } else {
+ dest[i] = int64(int32(binary.LittleEndian.Uint32(data[pos : pos+4])))
+ }
+ pos += 4
+ continue
+
+ case fieldTypeLongLong:
+ if rows.columns[i].flags&flagUnsigned != 0 {
+ val := binary.LittleEndian.Uint64(data[pos : pos+8])
+ if val > math.MaxInt64 {
+ dest[i] = uint64ToString(val)
+ } else {
+ dest[i] = int64(val)
+ }
+ } else {
+ dest[i] = int64(binary.LittleEndian.Uint64(data[pos : pos+8]))
+ }
+ pos += 8
+ continue
+
+ case fieldTypeFloat:
+ dest[i] = float32(math.Float32frombits(binary.LittleEndian.Uint32(data[pos : pos+4])))
+ pos += 4
+ continue
+
+ case fieldTypeDouble:
+ dest[i] = math.Float64frombits(binary.LittleEndian.Uint64(data[pos : pos+8]))
+ pos += 8
+ continue
+
+ // Length coded Binary Strings
+ case fieldTypeDecimal, fieldTypeNewDecimal, fieldTypeVarChar,
+ fieldTypeBit, fieldTypeEnum, fieldTypeSet, fieldTypeTinyBLOB,
+ fieldTypeMediumBLOB, fieldTypeLongBLOB, fieldTypeBLOB,
+ fieldTypeVarString, fieldTypeString, fieldTypeGeometry, fieldTypeJSON:
+ var isNull bool
+ var n int
+ dest[i], isNull, n, err = readLengthEncodedString(data[pos:])
+ pos += n
+ if err == nil {
+ if !isNull {
+ continue
+ } else {
+ dest[i] = nil
+ continue
+ }
+ }
+ return err
+
+ case
+ fieldTypeDate, fieldTypeNewDate, // Date YYYY-MM-DD
+ fieldTypeTime, // Time [-][H]HH:MM:SS[.fractal]
+ fieldTypeTimestamp, fieldTypeDateTime: // Timestamp YYYY-MM-DD HH:MM:SS[.fractal]
+
+ num, isNull, n := readLengthEncodedInteger(data[pos:])
+ pos += n
+
+ switch {
+ case isNull:
+ dest[i] = nil
+ continue
+ case rows.columns[i].fieldType == fieldTypeTime:
+ // database/sql does not support an equivalent to TIME, return a string
+ var dstlen uint8
+ switch decimals := rows.columns[i].decimals; decimals {
+ case 0x00, 0x1f:
+ dstlen = 8
+ case 1, 2, 3, 4, 5, 6:
+ dstlen = 8 + 1 + decimals
+ default:
+ return fmt.Errorf(
+ "protocol error, illegal decimals value %d",
+ rows.columns[i].decimals,
+ )
+ }
+ dest[i], err = formatBinaryDateTime(data[pos:pos+int(num)], dstlen, true)
+ case rows.mc.parseTime:
+ dest[i], err = parseBinaryDateTime(num, data[pos:], rows.mc.cfg.Loc)
+ default:
+ var dstlen uint8
+ if rows.columns[i].fieldType == fieldTypeDate {
+ dstlen = 10
+ } else {
+ switch decimals := rows.columns[i].decimals; decimals {
+ case 0x00, 0x1f:
+ dstlen = 19
+ case 1, 2, 3, 4, 5, 6:
+ dstlen = 19 + 1 + decimals
+ default:
+ return fmt.Errorf(
+ "protocol error, illegal decimals value %d",
+ rows.columns[i].decimals,
+ )
+ }
+ }
+ dest[i], err = formatBinaryDateTime(data[pos:pos+int(num)], dstlen, false)
+ }
+
+ if err == nil {
+ pos += int(num)
+ continue
+ } else {
+ return err
+ }
+
+ // Please report if this happens!
+ default:
+ return fmt.Errorf("unknown field type %d", rows.columns[i].fieldType)
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/result.go b/vendor/github.com/go-sql-driver/mysql/result.go
new file mode 100644
index 0000000000..c6438d0347
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/result.go
@@ -0,0 +1,22 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+type mysqlResult struct {
+ affectedRows int64
+ insertId int64
+}
+
+func (res *mysqlResult) LastInsertId() (int64, error) {
+ return res.insertId, nil
+}
+
+func (res *mysqlResult) RowsAffected() (int64, error) {
+ return res.affectedRows, nil
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/rows.go b/vendor/github.com/go-sql-driver/mysql/rows.go
new file mode 100644
index 0000000000..c08255eeec
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/rows.go
@@ -0,0 +1,112 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+import (
+ "database/sql/driver"
+ "io"
+)
+
+type mysqlField struct {
+ tableName string
+ name string
+ flags fieldFlag
+ fieldType byte
+ decimals byte
+}
+
+type mysqlRows struct {
+ mc *mysqlConn
+ columns []mysqlField
+}
+
+type binaryRows struct {
+ mysqlRows
+}
+
+type textRows struct {
+ mysqlRows
+}
+
+type emptyRows struct{}
+
+func (rows *mysqlRows) Columns() []string {
+ columns := make([]string, len(rows.columns))
+ if rows.mc != nil && rows.mc.cfg.ColumnsWithAlias {
+ for i := range columns {
+ if tableName := rows.columns[i].tableName; len(tableName) > 0 {
+ columns[i] = tableName + "." + rows.columns[i].name
+ } else {
+ columns[i] = rows.columns[i].name
+ }
+ }
+ } else {
+ for i := range columns {
+ columns[i] = rows.columns[i].name
+ }
+ }
+ return columns
+}
+
+func (rows *mysqlRows) Close() error {
+ mc := rows.mc
+ if mc == nil {
+ return nil
+ }
+ if mc.netConn == nil {
+ return ErrInvalidConn
+ }
+
+ // Remove unread packets from stream
+ err := mc.readUntilEOF()
+ if err == nil {
+ if err = mc.discardResults(); err != nil {
+ return err
+ }
+ }
+
+ rows.mc = nil
+ return err
+}
+
+func (rows *binaryRows) Next(dest []driver.Value) error {
+ if mc := rows.mc; mc != nil {
+ if mc.netConn == nil {
+ return ErrInvalidConn
+ }
+
+ // Fetch next row from stream
+ return rows.readRow(dest)
+ }
+ return io.EOF
+}
+
+func (rows *textRows) Next(dest []driver.Value) error {
+ if mc := rows.mc; mc != nil {
+ if mc.netConn == nil {
+ return ErrInvalidConn
+ }
+
+ // Fetch next row from stream
+ return rows.readRow(dest)
+ }
+ return io.EOF
+}
+
+func (rows emptyRows) Columns() []string {
+ return nil
+}
+
+func (rows emptyRows) Close() error {
+ return nil
+}
+
+func (rows emptyRows) Next(dest []driver.Value) error {
+ return io.EOF
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/statement.go b/vendor/github.com/go-sql-driver/mysql/statement.go
new file mode 100644
index 0000000000..ead9a6bf47
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/statement.go
@@ -0,0 +1,150 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+import (
+ "database/sql/driver"
+ "fmt"
+ "reflect"
+ "strconv"
+)
+
+type mysqlStmt struct {
+ mc *mysqlConn
+ id uint32
+ paramCount int
+ columns []mysqlField // cached from the first query
+}
+
+func (stmt *mysqlStmt) Close() error {
+ if stmt.mc == nil || stmt.mc.netConn == nil {
+ errLog.Print(ErrInvalidConn)
+ return driver.ErrBadConn
+ }
+
+ err := stmt.mc.writeCommandPacketUint32(comStmtClose, stmt.id)
+ stmt.mc = nil
+ return err
+}
+
+func (stmt *mysqlStmt) NumInput() int {
+ return stmt.paramCount
+}
+
+func (stmt *mysqlStmt) ColumnConverter(idx int) driver.ValueConverter {
+ return converter{}
+}
+
+func (stmt *mysqlStmt) Exec(args []driver.Value) (driver.Result, error) {
+ if stmt.mc.netConn == nil {
+ errLog.Print(ErrInvalidConn)
+ return nil, driver.ErrBadConn
+ }
+ // Send command
+ err := stmt.writeExecutePacket(args)
+ if err != nil {
+ return nil, err
+ }
+
+ mc := stmt.mc
+
+ mc.affectedRows = 0
+ mc.insertId = 0
+
+ // Read Result
+ resLen, err := mc.readResultSetHeaderPacket()
+ if err == nil {
+ if resLen > 0 {
+ // Columns
+ err = mc.readUntilEOF()
+ if err != nil {
+ return nil, err
+ }
+
+ // Rows
+ err = mc.readUntilEOF()
+ }
+ if err == nil {
+ return &mysqlResult{
+ affectedRows: int64(mc.affectedRows),
+ insertId: int64(mc.insertId),
+ }, nil
+ }
+ }
+
+ return nil, err
+}
+
+func (stmt *mysqlStmt) Query(args []driver.Value) (driver.Rows, error) {
+ if stmt.mc.netConn == nil {
+ errLog.Print(ErrInvalidConn)
+ return nil, driver.ErrBadConn
+ }
+ // Send command
+ err := stmt.writeExecutePacket(args)
+ if err != nil {
+ return nil, err
+ }
+
+ mc := stmt.mc
+
+ // Read Result
+ resLen, err := mc.readResultSetHeaderPacket()
+ if err != nil {
+ return nil, err
+ }
+
+ rows := new(binaryRows)
+
+ if resLen > 0 {
+ rows.mc = mc
+ // Columns
+ // If not cached, read them and cache them
+ if stmt.columns == nil {
+ rows.columns, err = mc.readColumns(resLen)
+ stmt.columns = rows.columns
+ } else {
+ rows.columns = stmt.columns
+ err = mc.readUntilEOF()
+ }
+ }
+
+ return rows, err
+}
+
+type converter struct{}
+
+func (c converter) ConvertValue(v interface{}) (driver.Value, error) {
+ if driver.IsValue(v) {
+ return v, nil
+ }
+
+ rv := reflect.ValueOf(v)
+ switch rv.Kind() {
+ case reflect.Ptr:
+ // indirect pointers
+ if rv.IsNil() {
+ return nil, nil
+ }
+ return c.ConvertValue(rv.Elem().Interface())
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return rv.Int(), nil
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32:
+ return int64(rv.Uint()), nil
+ case reflect.Uint64:
+ u64 := rv.Uint()
+ if u64 >= 1<<63 {
+ return strconv.FormatUint(u64, 10), nil
+ }
+ return int64(u64), nil
+ case reflect.Float32, reflect.Float64:
+ return rv.Float(), nil
+ }
+ return nil, fmt.Errorf("unsupported type %T, a %s", v, rv.Kind())
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/transaction.go b/vendor/github.com/go-sql-driver/mysql/transaction.go
new file mode 100644
index 0000000000..33c749b35c
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/transaction.go
@@ -0,0 +1,31 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+type mysqlTx struct {
+ mc *mysqlConn
+}
+
+func (tx *mysqlTx) Commit() (err error) {
+ if tx.mc == nil || tx.mc.netConn == nil {
+ return ErrInvalidConn
+ }
+ err = tx.mc.exec("COMMIT")
+ tx.mc = nil
+ return
+}
+
+func (tx *mysqlTx) Rollback() (err error) {
+ if tx.mc == nil || tx.mc.netConn == nil {
+ return ErrInvalidConn
+ }
+ err = tx.mc.exec("ROLLBACK")
+ tx.mc = nil
+ return
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/utils.go b/vendor/github.com/go-sql-driver/mysql/utils.go
new file mode 100644
index 0000000000..d523b7ffdf
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/utils.go
@@ -0,0 +1,740 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+import (
+ "crypto/sha1"
+ "crypto/tls"
+ "database/sql/driver"
+ "encoding/binary"
+ "fmt"
+ "io"
+ "strings"
+ "time"
+)
+
+var (
+ tlsConfigRegister map[string]*tls.Config // Register for custom tls.Configs
+)
+
+// RegisterTLSConfig registers a custom tls.Config to be used with sql.Open.
+// Use the key as a value in the DSN where tls=value.
+//
+// rootCertPool := x509.NewCertPool()
+// pem, err := ioutil.ReadFile("/path/ca-cert.pem")
+// if err != nil {
+// log.Fatal(err)
+// }
+// if ok := rootCertPool.AppendCertsFromPEM(pem); !ok {
+// log.Fatal("Failed to append PEM.")
+// }
+// clientCert := make([]tls.Certificate, 0, 1)
+// certs, err := tls.LoadX509KeyPair("/path/client-cert.pem", "/path/client-key.pem")
+// if err != nil {
+// log.Fatal(err)
+// }
+// clientCert = append(clientCert, certs)
+// mysql.RegisterTLSConfig("custom", &tls.Config{
+// RootCAs: rootCertPool,
+// Certificates: clientCert,
+// })
+// db, err := sql.Open("mysql", "user@tcp(localhost:3306)/test?tls=custom")
+//
+func RegisterTLSConfig(key string, config *tls.Config) error {
+ if _, isBool := readBool(key); isBool || strings.ToLower(key) == "skip-verify" {
+ return fmt.Errorf("key '%s' is reserved", key)
+ }
+
+ if tlsConfigRegister == nil {
+ tlsConfigRegister = make(map[string]*tls.Config)
+ }
+
+ tlsConfigRegister[key] = config
+ return nil
+}
+
+// DeregisterTLSConfig removes the tls.Config associated with key.
+func DeregisterTLSConfig(key string) {
+ if tlsConfigRegister != nil {
+ delete(tlsConfigRegister, key)
+ }
+}
+
+// Returns the bool value of the input.
+// The 2nd return value indicates if the input was a valid bool value
+func readBool(input string) (value bool, valid bool) {
+ switch input {
+ case "1", "true", "TRUE", "True":
+ return true, true
+ case "0", "false", "FALSE", "False":
+ return false, true
+ }
+
+ // Not a valid bool value
+ return
+}
+
+/******************************************************************************
+* Authentication *
+******************************************************************************/
+
+// Encrypt password using 4.1+ method
+func scramblePassword(scramble, password []byte) []byte {
+ if len(password) == 0 {
+ return nil
+ }
+
+ // stage1Hash = SHA1(password)
+ crypt := sha1.New()
+ crypt.Write(password)
+ stage1 := crypt.Sum(nil)
+
+ // scrambleHash = SHA1(scramble + SHA1(stage1Hash))
+ // inner Hash
+ crypt.Reset()
+ crypt.Write(stage1)
+ hash := crypt.Sum(nil)
+
+ // outer Hash
+ crypt.Reset()
+ crypt.Write(scramble)
+ crypt.Write(hash)
+ scramble = crypt.Sum(nil)
+
+ // token = scrambleHash XOR stage1Hash
+ for i := range scramble {
+ scramble[i] ^= stage1[i]
+ }
+ return scramble
+}
+
+// Encrypt password using pre 4.1 (old password) method
+// https://github.com/atcurtis/mariadb/blob/master/mysys/my_rnd.c
+type myRnd struct {
+ seed1, seed2 uint32
+}
+
+const myRndMaxVal = 0x3FFFFFFF
+
+// Pseudo random number generator
+func newMyRnd(seed1, seed2 uint32) *myRnd {
+ return &myRnd{
+ seed1: seed1 % myRndMaxVal,
+ seed2: seed2 % myRndMaxVal,
+ }
+}
+
+// Tested to be equivalent to MariaDB's floating point variant
+// http://play.golang.org/p/QHvhd4qved
+// http://play.golang.org/p/RG0q4ElWDx
+func (r *myRnd) NextByte() byte {
+ r.seed1 = (r.seed1*3 + r.seed2) % myRndMaxVal
+ r.seed2 = (r.seed1 + r.seed2 + 33) % myRndMaxVal
+
+ return byte(uint64(r.seed1) * 31 / myRndMaxVal)
+}
+
+// Generate binary hash from byte string using insecure pre 4.1 method
+func pwHash(password []byte) (result [2]uint32) {
+ var add uint32 = 7
+ var tmp uint32
+
+ result[0] = 1345345333
+ result[1] = 0x12345671
+
+ for _, c := range password {
+ // skip spaces and tabs in password
+ if c == ' ' || c == '\t' {
+ continue
+ }
+
+ tmp = uint32(c)
+ result[0] ^= (((result[0] & 63) + add) * tmp) + (result[0] << 8)
+ result[1] += (result[1] << 8) ^ result[0]
+ add += tmp
+ }
+
+ // Remove sign bit (1<<31)-1)
+ result[0] &= 0x7FFFFFFF
+ result[1] &= 0x7FFFFFFF
+
+ return
+}
+
+// Encrypt password using insecure pre 4.1 method
+func scrambleOldPassword(scramble, password []byte) []byte {
+ if len(password) == 0 {
+ return nil
+ }
+
+ scramble = scramble[:8]
+
+ hashPw := pwHash(password)
+ hashSc := pwHash(scramble)
+
+ r := newMyRnd(hashPw[0]^hashSc[0], hashPw[1]^hashSc[1])
+
+ var out [8]byte
+ for i := range out {
+ out[i] = r.NextByte() + 64
+ }
+
+ mask := r.NextByte()
+ for i := range out {
+ out[i] ^= mask
+ }
+
+ return out[:]
+}
+
+/******************************************************************************
+* Time related utils *
+******************************************************************************/
+
+// NullTime represents a time.Time that may be NULL.
+// NullTime implements the Scanner interface so
+// it can be used as a scan destination:
+//
+// var nt NullTime
+// err := db.QueryRow("SELECT time FROM foo WHERE id=?", id).Scan(&nt)
+// ...
+// if nt.Valid {
+// // use nt.Time
+// } else {
+// // NULL value
+// }
+//
+// This NullTime implementation is not driver-specific
+type NullTime struct {
+ Time time.Time
+ Valid bool // Valid is true if Time is not NULL
+}
+
+// Scan implements the Scanner interface.
+// The value type must be time.Time or string / []byte (formatted time-string),
+// otherwise Scan fails.
+func (nt *NullTime) Scan(value interface{}) (err error) {
+ if value == nil {
+ nt.Time, nt.Valid = time.Time{}, false
+ return
+ }
+
+ switch v := value.(type) {
+ case time.Time:
+ nt.Time, nt.Valid = v, true
+ return
+ case []byte:
+ nt.Time, err = parseDateTime(string(v), time.UTC)
+ nt.Valid = (err == nil)
+ return
+ case string:
+ nt.Time, err = parseDateTime(v, time.UTC)
+ nt.Valid = (err == nil)
+ return
+ }
+
+ nt.Valid = false
+ return fmt.Errorf("Can't convert %T to time.Time", value)
+}
+
+// Value implements the driver Valuer interface.
+func (nt NullTime) Value() (driver.Value, error) {
+ if !nt.Valid {
+ return nil, nil
+ }
+ return nt.Time, nil
+}
+
+func parseDateTime(str string, loc *time.Location) (t time.Time, err error) {
+ base := "0000-00-00 00:00:00.0000000"
+ switch len(str) {
+ case 10, 19, 21, 22, 23, 24, 25, 26: // up to "YYYY-MM-DD HH:MM:SS.MMMMMM"
+ if str == base[:len(str)] {
+ return
+ }
+ t, err = time.Parse(timeFormat[:len(str)], str)
+ default:
+ err = fmt.Errorf("invalid time string: %s", str)
+ return
+ }
+
+ // Adjust location
+ if err == nil && loc != time.UTC {
+ y, mo, d := t.Date()
+ h, mi, s := t.Clock()
+ t, err = time.Date(y, mo, d, h, mi, s, t.Nanosecond(), loc), nil
+ }
+
+ return
+}
+
+func parseBinaryDateTime(num uint64, data []byte, loc *time.Location) (driver.Value, error) {
+ switch num {
+ case 0:
+ return time.Time{}, nil
+ case 4:
+ return time.Date(
+ int(binary.LittleEndian.Uint16(data[:2])), // year
+ time.Month(data[2]), // month
+ int(data[3]), // day
+ 0, 0, 0, 0,
+ loc,
+ ), nil
+ case 7:
+ return time.Date(
+ int(binary.LittleEndian.Uint16(data[:2])), // year
+ time.Month(data[2]), // month
+ int(data[3]), // day
+ int(data[4]), // hour
+ int(data[5]), // minutes
+ int(data[6]), // seconds
+ 0,
+ loc,
+ ), nil
+ case 11:
+ return time.Date(
+ int(binary.LittleEndian.Uint16(data[:2])), // year
+ time.Month(data[2]), // month
+ int(data[3]), // day
+ int(data[4]), // hour
+ int(data[5]), // minutes
+ int(data[6]), // seconds
+ int(binary.LittleEndian.Uint32(data[7:11]))*1000, // nanoseconds
+ loc,
+ ), nil
+ }
+ return nil, fmt.Errorf("invalid DATETIME packet length %d", num)
+}
+
+// zeroDateTime is used in formatBinaryDateTime to avoid an allocation
+// if the DATE or DATETIME has the zero value.
+// It must never be changed.
+// The current behavior depends on database/sql copying the result.
+var zeroDateTime = []byte("0000-00-00 00:00:00.000000")
+
+const digits01 = "0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789"
+const digits10 = "0000000000111111111122222222223333333333444444444455555555556666666666777777777788888888889999999999"
+
+func formatBinaryDateTime(src []byte, length uint8, justTime bool) (driver.Value, error) {
+ // length expects the deterministic length of the zero value,
+ // negative time and 100+ hours are automatically added if needed
+ if len(src) == 0 {
+ if justTime {
+ return zeroDateTime[11 : 11+length], nil
+ }
+ return zeroDateTime[:length], nil
+ }
+ var dst []byte // return value
+ var pt, p1, p2, p3 byte // current digit pair
+ var zOffs byte // offset of value in zeroDateTime
+ if justTime {
+ switch length {
+ case
+ 8, // time (can be up to 10 when negative and 100+ hours)
+ 10, 11, 12, 13, 14, 15: // time with fractional seconds
+ default:
+ return nil, fmt.Errorf("illegal TIME length %d", length)
+ }
+ switch len(src) {
+ case 8, 12:
+ default:
+ return nil, fmt.Errorf("invalid TIME packet length %d", len(src))
+ }
+ // +2 to enable negative time and 100+ hours
+ dst = make([]byte, 0, length+2)
+ if src[0] == 1 {
+ dst = append(dst, '-')
+ }
+ if src[1] != 0 {
+ hour := uint16(src[1])*24 + uint16(src[5])
+ pt = byte(hour / 100)
+ p1 = byte(hour - 100*uint16(pt))
+ dst = append(dst, digits01[pt])
+ } else {
+ p1 = src[5]
+ }
+ zOffs = 11
+ src = src[6:]
+ } else {
+ switch length {
+ case 10, 19, 21, 22, 23, 24, 25, 26:
+ default:
+ t := "DATE"
+ if length > 10 {
+ t += "TIME"
+ }
+ return nil, fmt.Errorf("illegal %s length %d", t, length)
+ }
+ switch len(src) {
+ case 4, 7, 11:
+ default:
+ t := "DATE"
+ if length > 10 {
+ t += "TIME"
+ }
+ return nil, fmt.Errorf("illegal %s packet length %d", t, len(src))
+ }
+ dst = make([]byte, 0, length)
+ // start with the date
+ year := binary.LittleEndian.Uint16(src[:2])
+ pt = byte(year / 100)
+ p1 = byte(year - 100*uint16(pt))
+ p2, p3 = src[2], src[3]
+ dst = append(dst,
+ digits10[pt], digits01[pt],
+ digits10[p1], digits01[p1], '-',
+ digits10[p2], digits01[p2], '-',
+ digits10[p3], digits01[p3],
+ )
+ if length == 10 {
+ return dst, nil
+ }
+ if len(src) == 4 {
+ return append(dst, zeroDateTime[10:length]...), nil
+ }
+ dst = append(dst, ' ')
+ p1 = src[4] // hour
+ src = src[5:]
+ }
+ // p1 is 2-digit hour, src is after hour
+ p2, p3 = src[0], src[1]
+ dst = append(dst,
+ digits10[p1], digits01[p1], ':',
+ digits10[p2], digits01[p2], ':',
+ digits10[p3], digits01[p3],
+ )
+ if length <= byte(len(dst)) {
+ return dst, nil
+ }
+ src = src[2:]
+ if len(src) == 0 {
+ return append(dst, zeroDateTime[19:zOffs+length]...), nil
+ }
+ microsecs := binary.LittleEndian.Uint32(src[:4])
+ p1 = byte(microsecs / 10000)
+ microsecs -= 10000 * uint32(p1)
+ p2 = byte(microsecs / 100)
+ microsecs -= 100 * uint32(p2)
+ p3 = byte(microsecs)
+ switch decimals := zOffs + length - 20; decimals {
+ default:
+ return append(dst, '.',
+ digits10[p1], digits01[p1],
+ digits10[p2], digits01[p2],
+ digits10[p3], digits01[p3],
+ ), nil
+ case 1:
+ return append(dst, '.',
+ digits10[p1],
+ ), nil
+ case 2:
+ return append(dst, '.',
+ digits10[p1], digits01[p1],
+ ), nil
+ case 3:
+ return append(dst, '.',
+ digits10[p1], digits01[p1],
+ digits10[p2],
+ ), nil
+ case 4:
+ return append(dst, '.',
+ digits10[p1], digits01[p1],
+ digits10[p2], digits01[p2],
+ ), nil
+ case 5:
+ return append(dst, '.',
+ digits10[p1], digits01[p1],
+ digits10[p2], digits01[p2],
+ digits10[p3],
+ ), nil
+ }
+}
+
+/******************************************************************************
+* Convert from and to bytes *
+******************************************************************************/
+
+func uint64ToBytes(n uint64) []byte {
+ return []byte{
+ byte(n),
+ byte(n >> 8),
+ byte(n >> 16),
+ byte(n >> 24),
+ byte(n >> 32),
+ byte(n >> 40),
+ byte(n >> 48),
+ byte(n >> 56),
+ }
+}
+
+func uint64ToString(n uint64) []byte {
+ var a [20]byte
+ i := 20
+
+ // U+0030 = 0
+ // ...
+ // U+0039 = 9
+
+ var q uint64
+ for n >= 10 {
+ i--
+ q = n / 10
+ a[i] = uint8(n-q*10) + 0x30
+ n = q
+ }
+
+ i--
+ a[i] = uint8(n) + 0x30
+
+ return a[i:]
+}
+
+// treats string value as unsigned integer representation
+func stringToInt(b []byte) int {
+ val := 0
+ for i := range b {
+ val *= 10
+ val += int(b[i] - 0x30)
+ }
+ return val
+}
+
+// returns the string read as a bytes slice, wheter the value is NULL,
+// the number of bytes read and an error, in case the string is longer than
+// the input slice
+func readLengthEncodedString(b []byte) ([]byte, bool, int, error) {
+ // Get length
+ num, isNull, n := readLengthEncodedInteger(b)
+ if num < 1 {
+ return b[n:n], isNull, n, nil
+ }
+
+ n += int(num)
+
+ // Check data length
+ if len(b) >= n {
+ return b[n-int(num) : n], false, n, nil
+ }
+ return nil, false, n, io.EOF
+}
+
+// returns the number of bytes skipped and an error, in case the string is
+// longer than the input slice
+func skipLengthEncodedString(b []byte) (int, error) {
+ // Get length
+ num, _, n := readLengthEncodedInteger(b)
+ if num < 1 {
+ return n, nil
+ }
+
+ n += int(num)
+
+ // Check data length
+ if len(b) >= n {
+ return n, nil
+ }
+ return n, io.EOF
+}
+
+// returns the number read, whether the value is NULL and the number of bytes read
+func readLengthEncodedInteger(b []byte) (uint64, bool, int) {
+ // See issue #349
+ if len(b) == 0 {
+ return 0, true, 1
+ }
+ switch b[0] {
+
+ // 251: NULL
+ case 0xfb:
+ return 0, true, 1
+
+ // 252: value of following 2
+ case 0xfc:
+ return uint64(b[1]) | uint64(b[2])<<8, false, 3
+
+ // 253: value of following 3
+ case 0xfd:
+ return uint64(b[1]) | uint64(b[2])<<8 | uint64(b[3])<<16, false, 4
+
+ // 254: value of following 8
+ case 0xfe:
+ return uint64(b[1]) | uint64(b[2])<<8 | uint64(b[3])<<16 |
+ uint64(b[4])<<24 | uint64(b[5])<<32 | uint64(b[6])<<40 |
+ uint64(b[7])<<48 | uint64(b[8])<<56,
+ false, 9
+ }
+
+ // 0-250: value of first byte
+ return uint64(b[0]), false, 1
+}
+
+// encodes a uint64 value and appends it to the given bytes slice
+func appendLengthEncodedInteger(b []byte, n uint64) []byte {
+ switch {
+ case n <= 250:
+ return append(b, byte(n))
+
+ case n <= 0xffff:
+ return append(b, 0xfc, byte(n), byte(n>>8))
+
+ case n <= 0xffffff:
+ return append(b, 0xfd, byte(n), byte(n>>8), byte(n>>16))
+ }
+ return append(b, 0xfe, byte(n), byte(n>>8), byte(n>>16), byte(n>>24),
+ byte(n>>32), byte(n>>40), byte(n>>48), byte(n>>56))
+}
+
+// reserveBuffer checks cap(buf) and expand buffer to len(buf) + appendSize.
+// If cap(buf) is not enough, reallocate new buffer.
+func reserveBuffer(buf []byte, appendSize int) []byte {
+ newSize := len(buf) + appendSize
+ if cap(buf) < newSize {
+ // Grow buffer exponentially
+ newBuf := make([]byte, len(buf)*2+appendSize)
+ copy(newBuf, buf)
+ buf = newBuf
+ }
+ return buf[:newSize]
+}
+
+// escapeBytesBackslash escapes []byte with backslashes (\)
+// This escapes the contents of a string (provided as []byte) by adding backslashes before special
+// characters, and turning others into specific escape sequences, such as
+// turning newlines into \n and null bytes into \0.
+// https://github.com/mysql/mysql-server/blob/mysql-5.7.5/mysys/charset.c#L823-L932
+func escapeBytesBackslash(buf, v []byte) []byte {
+ pos := len(buf)
+ buf = reserveBuffer(buf, len(v)*2)
+
+ for _, c := range v {
+ switch c {
+ case '\x00':
+ buf[pos] = '\\'
+ buf[pos+1] = '0'
+ pos += 2
+ case '\n':
+ buf[pos] = '\\'
+ buf[pos+1] = 'n'
+ pos += 2
+ case '\r':
+ buf[pos] = '\\'
+ buf[pos+1] = 'r'
+ pos += 2
+ case '\x1a':
+ buf[pos] = '\\'
+ buf[pos+1] = 'Z'
+ pos += 2
+ case '\'':
+ buf[pos] = '\\'
+ buf[pos+1] = '\''
+ pos += 2
+ case '"':
+ buf[pos] = '\\'
+ buf[pos+1] = '"'
+ pos += 2
+ case '\\':
+ buf[pos] = '\\'
+ buf[pos+1] = '\\'
+ pos += 2
+ default:
+ buf[pos] = c
+ pos++
+ }
+ }
+
+ return buf[:pos]
+}
+
+// escapeStringBackslash is similar to escapeBytesBackslash but for string.
+func escapeStringBackslash(buf []byte, v string) []byte {
+ pos := len(buf)
+ buf = reserveBuffer(buf, len(v)*2)
+
+ for i := 0; i < len(v); i++ {
+ c := v[i]
+ switch c {
+ case '\x00':
+ buf[pos] = '\\'
+ buf[pos+1] = '0'
+ pos += 2
+ case '\n':
+ buf[pos] = '\\'
+ buf[pos+1] = 'n'
+ pos += 2
+ case '\r':
+ buf[pos] = '\\'
+ buf[pos+1] = 'r'
+ pos += 2
+ case '\x1a':
+ buf[pos] = '\\'
+ buf[pos+1] = 'Z'
+ pos += 2
+ case '\'':
+ buf[pos] = '\\'
+ buf[pos+1] = '\''
+ pos += 2
+ case '"':
+ buf[pos] = '\\'
+ buf[pos+1] = '"'
+ pos += 2
+ case '\\':
+ buf[pos] = '\\'
+ buf[pos+1] = '\\'
+ pos += 2
+ default:
+ buf[pos] = c
+ pos++
+ }
+ }
+
+ return buf[:pos]
+}
+
+// escapeBytesQuotes escapes apostrophes in []byte by doubling them up.
+// This escapes the contents of a string by doubling up any apostrophes that
+// it contains. This is used when the NO_BACKSLASH_ESCAPES SQL_MODE is in
+// effect on the server.
+// https://github.com/mysql/mysql-server/blob/mysql-5.7.5/mysys/charset.c#L963-L1038
+func escapeBytesQuotes(buf, v []byte) []byte {
+ pos := len(buf)
+ buf = reserveBuffer(buf, len(v)*2)
+
+ for _, c := range v {
+ if c == '\'' {
+ buf[pos] = '\''
+ buf[pos+1] = '\''
+ pos += 2
+ } else {
+ buf[pos] = c
+ pos++
+ }
+ }
+
+ return buf[:pos]
+}
+
+// escapeStringQuotes is similar to escapeBytesQuotes but for string.
+func escapeStringQuotes(buf []byte, v string) []byte {
+ pos := len(buf)
+ buf = reserveBuffer(buf, len(v)*2)
+
+ for i := 0; i < len(v); i++ {
+ c := v[i]
+ if c == '\'' {
+ buf[pos] = '\''
+ buf[pos+1] = '\''
+ pos += 2
+ } else {
+ buf[pos] = c
+ pos++
+ }
+ }
+
+ return buf[:pos]
+}
diff --git a/vendor/github.com/golang/protobuf/LICENSE b/vendor/github.com/golang/protobuf/LICENSE
new file mode 100644
index 0000000000..1b1b1921ef
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/LICENSE
@@ -0,0 +1,31 @@
+Go support for Protocol Buffers - Google's data interchange format
+
+Copyright 2010 The Go Authors. All rights reserved.
+https://github.com/golang/protobuf
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
diff --git a/vendor/github.com/golang/protobuf/proto/Makefile b/vendor/github.com/golang/protobuf/proto/Makefile
new file mode 100644
index 0000000000..e2e0651a93
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/Makefile
@@ -0,0 +1,43 @@
+# Go support for Protocol Buffers - Google's data interchange format
+#
+# Copyright 2010 The Go Authors. All rights reserved.
+# https://github.com/golang/protobuf
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+install:
+ go install
+
+test: install generate-test-pbs
+ go test
+
+
+generate-test-pbs:
+ make install
+ make -C testdata
+ protoc --go_out=Mtestdata/test.proto=github.com/golang/protobuf/proto/testdata,Mgoogle/protobuf/any.proto=github.com/golang/protobuf/ptypes/any:. proto3_proto/proto3.proto
+ make
diff --git a/vendor/github.com/golang/protobuf/proto/clone.go b/vendor/github.com/golang/protobuf/proto/clone.go
new file mode 100644
index 0000000000..e392575b35
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/clone.go
@@ -0,0 +1,229 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2011 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Protocol buffer deep copy and merge.
+// TODO: RawMessage.
+
+package proto
+
+import (
+ "log"
+ "reflect"
+ "strings"
+)
+
+// Clone returns a deep copy of a protocol buffer.
+func Clone(pb Message) Message {
+ in := reflect.ValueOf(pb)
+ if in.IsNil() {
+ return pb
+ }
+
+ out := reflect.New(in.Type().Elem())
+ // out is empty so a merge is a deep copy.
+ mergeStruct(out.Elem(), in.Elem())
+ return out.Interface().(Message)
+}
+
+// Merge merges src into dst.
+// Required and optional fields that are set in src will be set to that value in dst.
+// Elements of repeated fields will be appended.
+// Merge panics if src and dst are not the same type, or if dst is nil.
+func Merge(dst, src Message) {
+ in := reflect.ValueOf(src)
+ out := reflect.ValueOf(dst)
+ if out.IsNil() {
+ panic("proto: nil destination")
+ }
+ if in.Type() != out.Type() {
+ // Explicit test prior to mergeStruct so that mistyped nils will fail
+ panic("proto: type mismatch")
+ }
+ if in.IsNil() {
+ // Merging nil into non-nil is a quiet no-op
+ return
+ }
+ mergeStruct(out.Elem(), in.Elem())
+}
+
+func mergeStruct(out, in reflect.Value) {
+ sprop := GetProperties(in.Type())
+ for i := 0; i < in.NumField(); i++ {
+ f := in.Type().Field(i)
+ if strings.HasPrefix(f.Name, "XXX_") {
+ continue
+ }
+ mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i])
+ }
+
+ if emIn, ok := extendable(in.Addr().Interface()); ok {
+ emOut, _ := extendable(out.Addr().Interface())
+ mIn, muIn := emIn.extensionsRead()
+ if mIn != nil {
+ mOut := emOut.extensionsWrite()
+ muIn.Lock()
+ mergeExtension(mOut, mIn)
+ muIn.Unlock()
+ }
+ }
+
+ uf := in.FieldByName("XXX_unrecognized")
+ if !uf.IsValid() {
+ return
+ }
+ uin := uf.Bytes()
+ if len(uin) > 0 {
+ out.FieldByName("XXX_unrecognized").SetBytes(append([]byte(nil), uin...))
+ }
+}
+
+// mergeAny performs a merge between two values of the same type.
+// viaPtr indicates whether the values were indirected through a pointer (implying proto2).
+// prop is set if this is a struct field (it may be nil).
+func mergeAny(out, in reflect.Value, viaPtr bool, prop *Properties) {
+ if in.Type() == protoMessageType {
+ if !in.IsNil() {
+ if out.IsNil() {
+ out.Set(reflect.ValueOf(Clone(in.Interface().(Message))))
+ } else {
+ Merge(out.Interface().(Message), in.Interface().(Message))
+ }
+ }
+ return
+ }
+ switch in.Kind() {
+ case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,
+ reflect.String, reflect.Uint32, reflect.Uint64:
+ if !viaPtr && isProto3Zero(in) {
+ return
+ }
+ out.Set(in)
+ case reflect.Interface:
+ // Probably a oneof field; copy non-nil values.
+ if in.IsNil() {
+ return
+ }
+ // Allocate destination if it is not set, or set to a different type.
+ // Otherwise we will merge as normal.
+ if out.IsNil() || out.Elem().Type() != in.Elem().Type() {
+ out.Set(reflect.New(in.Elem().Elem().Type())) // interface -> *T -> T -> new(T)
+ }
+ mergeAny(out.Elem(), in.Elem(), false, nil)
+ case reflect.Map:
+ if in.Len() == 0 {
+ return
+ }
+ if out.IsNil() {
+ out.Set(reflect.MakeMap(in.Type()))
+ }
+ // For maps with value types of *T or []byte we need to deep copy each value.
+ elemKind := in.Type().Elem().Kind()
+ for _, key := range in.MapKeys() {
+ var val reflect.Value
+ switch elemKind {
+ case reflect.Ptr:
+ val = reflect.New(in.Type().Elem().Elem())
+ mergeAny(val, in.MapIndex(key), false, nil)
+ case reflect.Slice:
+ val = in.MapIndex(key)
+ val = reflect.ValueOf(append([]byte{}, val.Bytes()...))
+ default:
+ val = in.MapIndex(key)
+ }
+ out.SetMapIndex(key, val)
+ }
+ case reflect.Ptr:
+ if in.IsNil() {
+ return
+ }
+ if out.IsNil() {
+ out.Set(reflect.New(in.Elem().Type()))
+ }
+ mergeAny(out.Elem(), in.Elem(), true, nil)
+ case reflect.Slice:
+ if in.IsNil() {
+ return
+ }
+ if in.Type().Elem().Kind() == reflect.Uint8 {
+ // []byte is a scalar bytes field, not a repeated field.
+
+ // Edge case: if this is in a proto3 message, a zero length
+ // bytes field is considered the zero value, and should not
+ // be merged.
+ if prop != nil && prop.proto3 && in.Len() == 0 {
+ return
+ }
+
+ // Make a deep copy.
+ // Append to []byte{} instead of []byte(nil) so that we never end up
+ // with a nil result.
+ out.SetBytes(append([]byte{}, in.Bytes()...))
+ return
+ }
+ n := in.Len()
+ if out.IsNil() {
+ out.Set(reflect.MakeSlice(in.Type(), 0, n))
+ }
+ switch in.Type().Elem().Kind() {
+ case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,
+ reflect.String, reflect.Uint32, reflect.Uint64:
+ out.Set(reflect.AppendSlice(out, in))
+ default:
+ for i := 0; i < n; i++ {
+ x := reflect.Indirect(reflect.New(in.Type().Elem()))
+ mergeAny(x, in.Index(i), false, nil)
+ out.Set(reflect.Append(out, x))
+ }
+ }
+ case reflect.Struct:
+ mergeStruct(out, in)
+ default:
+ // unknown type, so not a protocol buffer
+ log.Printf("proto: don't know how to copy %v", in)
+ }
+}
+
+func mergeExtension(out, in map[int32]Extension) {
+ for extNum, eIn := range in {
+ eOut := Extension{desc: eIn.desc}
+ if eIn.value != nil {
+ v := reflect.New(reflect.TypeOf(eIn.value)).Elem()
+ mergeAny(v, reflect.ValueOf(eIn.value), false, nil)
+ eOut.value = v.Interface()
+ }
+ if eIn.enc != nil {
+ eOut.enc = make([]byte, len(eIn.enc))
+ copy(eOut.enc, eIn.enc)
+ }
+
+ out[extNum] = eOut
+ }
+}
diff --git a/vendor/github.com/golang/protobuf/proto/decode.go b/vendor/github.com/golang/protobuf/proto/decode.go
new file mode 100644
index 0000000000..aa207298f9
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/decode.go
@@ -0,0 +1,970 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Routines for decoding protocol buffer data to construct in-memory representations.
+ */
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "reflect"
+)
+
+// errOverflow is returned when an integer is too large to be represented.
+var errOverflow = errors.New("proto: integer overflow")
+
+// ErrInternalBadWireType is returned by generated code when an incorrect
+// wire type is encountered. It does not get returned to user code.
+var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof")
+
+// The fundamental decoders that interpret bytes on the wire.
+// Those that take integer types all return uint64 and are
+// therefore of type valueDecoder.
+
+// DecodeVarint reads a varint-encoded integer from the slice.
+// It returns the integer and the number of bytes consumed, or
+// zero if there is not enough.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+func DecodeVarint(buf []byte) (x uint64, n int) {
+ for shift := uint(0); shift < 64; shift += 7 {
+ if n >= len(buf) {
+ return 0, 0
+ }
+ b := uint64(buf[n])
+ n++
+ x |= (b & 0x7F) << shift
+ if (b & 0x80) == 0 {
+ return x, n
+ }
+ }
+
+ // The number is too large to represent in a 64-bit value.
+ return 0, 0
+}
+
+func (p *Buffer) decodeVarintSlow() (x uint64, err error) {
+ i := p.index
+ l := len(p.buf)
+
+ for shift := uint(0); shift < 64; shift += 7 {
+ if i >= l {
+ err = io.ErrUnexpectedEOF
+ return
+ }
+ b := p.buf[i]
+ i++
+ x |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ p.index = i
+ return
+ }
+ }
+
+ // The number is too large to represent in a 64-bit value.
+ err = errOverflow
+ return
+}
+
+// DecodeVarint reads a varint-encoded integer from the Buffer.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+func (p *Buffer) DecodeVarint() (x uint64, err error) {
+ i := p.index
+ buf := p.buf
+
+ if i >= len(buf) {
+ return 0, io.ErrUnexpectedEOF
+ } else if buf[i] < 0x80 {
+ p.index++
+ return uint64(buf[i]), nil
+ } else if len(buf)-i < 10 {
+ return p.decodeVarintSlow()
+ }
+
+ var b uint64
+ // we already checked the first byte
+ x = uint64(buf[i]) - 0x80
+ i++
+
+ b = uint64(buf[i])
+ i++
+ x += b << 7
+ if b&0x80 == 0 {
+ goto done
+ }
+ x -= 0x80 << 7
+
+ b = uint64(buf[i])
+ i++
+ x += b << 14
+ if b&0x80 == 0 {
+ goto done
+ }
+ x -= 0x80 << 14
+
+ b = uint64(buf[i])
+ i++
+ x += b << 21
+ if b&0x80 == 0 {
+ goto done
+ }
+ x -= 0x80 << 21
+
+ b = uint64(buf[i])
+ i++
+ x += b << 28
+ if b&0x80 == 0 {
+ goto done
+ }
+ x -= 0x80 << 28
+
+ b = uint64(buf[i])
+ i++
+ x += b << 35
+ if b&0x80 == 0 {
+ goto done
+ }
+ x -= 0x80 << 35
+
+ b = uint64(buf[i])
+ i++
+ x += b << 42
+ if b&0x80 == 0 {
+ goto done
+ }
+ x -= 0x80 << 42
+
+ b = uint64(buf[i])
+ i++
+ x += b << 49
+ if b&0x80 == 0 {
+ goto done
+ }
+ x -= 0x80 << 49
+
+ b = uint64(buf[i])
+ i++
+ x += b << 56
+ if b&0x80 == 0 {
+ goto done
+ }
+ x -= 0x80 << 56
+
+ b = uint64(buf[i])
+ i++
+ x += b << 63
+ if b&0x80 == 0 {
+ goto done
+ }
+ // x -= 0x80 << 63 // Always zero.
+
+ return 0, errOverflow
+
+done:
+ p.index = i
+ return x, nil
+}
+
+// DecodeFixed64 reads a 64-bit integer from the Buffer.
+// This is the format for the
+// fixed64, sfixed64, and double protocol buffer types.
+func (p *Buffer) DecodeFixed64() (x uint64, err error) {
+ // x, err already 0
+ i := p.index + 8
+ if i < 0 || i > len(p.buf) {
+ err = io.ErrUnexpectedEOF
+ return
+ }
+ p.index = i
+
+ x = uint64(p.buf[i-8])
+ x |= uint64(p.buf[i-7]) << 8
+ x |= uint64(p.buf[i-6]) << 16
+ x |= uint64(p.buf[i-5]) << 24
+ x |= uint64(p.buf[i-4]) << 32
+ x |= uint64(p.buf[i-3]) << 40
+ x |= uint64(p.buf[i-2]) << 48
+ x |= uint64(p.buf[i-1]) << 56
+ return
+}
+
+// DecodeFixed32 reads a 32-bit integer from the Buffer.
+// This is the format for the
+// fixed32, sfixed32, and float protocol buffer types.
+func (p *Buffer) DecodeFixed32() (x uint64, err error) {
+ // x, err already 0
+ i := p.index + 4
+ if i < 0 || i > len(p.buf) {
+ err = io.ErrUnexpectedEOF
+ return
+ }
+ p.index = i
+
+ x = uint64(p.buf[i-4])
+ x |= uint64(p.buf[i-3]) << 8
+ x |= uint64(p.buf[i-2]) << 16
+ x |= uint64(p.buf[i-1]) << 24
+ return
+}
+
+// DecodeZigzag64 reads a zigzag-encoded 64-bit integer
+// from the Buffer.
+// This is the format used for the sint64 protocol buffer type.
+func (p *Buffer) DecodeZigzag64() (x uint64, err error) {
+ x, err = p.DecodeVarint()
+ if err != nil {
+ return
+ }
+ x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63)
+ return
+}
+
+// DecodeZigzag32 reads a zigzag-encoded 32-bit integer
+// from the Buffer.
+// This is the format used for the sint32 protocol buffer type.
+func (p *Buffer) DecodeZigzag32() (x uint64, err error) {
+ x, err = p.DecodeVarint()
+ if err != nil {
+ return
+ }
+ x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31))
+ return
+}
+
+// These are not ValueDecoders: they produce an array of bytes or a string.
+// bytes, embedded messages
+
+// DecodeRawBytes reads a count-delimited byte buffer from the Buffer.
+// This is the format used for the bytes protocol buffer
+// type and for embedded messages.
+func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) {
+ n, err := p.DecodeVarint()
+ if err != nil {
+ return nil, err
+ }
+
+ nb := int(n)
+ if nb < 0 {
+ return nil, fmt.Errorf("proto: bad byte length %d", nb)
+ }
+ end := p.index + nb
+ if end < p.index || end > len(p.buf) {
+ return nil, io.ErrUnexpectedEOF
+ }
+
+ if !alloc {
+ // todo: check if can get more uses of alloc=false
+ buf = p.buf[p.index:end]
+ p.index += nb
+ return
+ }
+
+ buf = make([]byte, nb)
+ copy(buf, p.buf[p.index:])
+ p.index += nb
+ return
+}
+
+// DecodeStringBytes reads an encoded string from the Buffer.
+// This is the format used for the proto2 string type.
+func (p *Buffer) DecodeStringBytes() (s string, err error) {
+ buf, err := p.DecodeRawBytes(false)
+ if err != nil {
+ return
+ }
+ return string(buf), nil
+}
+
+// Skip the next item in the buffer. Its wire type is decoded and presented as an argument.
+// If the protocol buffer has extensions, and the field matches, add it as an extension.
+// Otherwise, if the XXX_unrecognized field exists, append the skipped data there.
+func (o *Buffer) skipAndSave(t reflect.Type, tag, wire int, base structPointer, unrecField field) error {
+ oi := o.index
+
+ err := o.skip(t, tag, wire)
+ if err != nil {
+ return err
+ }
+
+ if !unrecField.IsValid() {
+ return nil
+ }
+
+ ptr := structPointer_Bytes(base, unrecField)
+
+ // Add the skipped field to struct field
+ obuf := o.buf
+
+ o.buf = *ptr
+ o.EncodeVarint(uint64(tag<<3 | wire))
+ *ptr = append(o.buf, obuf[oi:o.index]...)
+
+ o.buf = obuf
+
+ return nil
+}
+
+// Skip the next item in the buffer. Its wire type is decoded and presented as an argument.
+func (o *Buffer) skip(t reflect.Type, tag, wire int) error {
+
+ var u uint64
+ var err error
+
+ switch wire {
+ case WireVarint:
+ _, err = o.DecodeVarint()
+ case WireFixed64:
+ _, err = o.DecodeFixed64()
+ case WireBytes:
+ _, err = o.DecodeRawBytes(false)
+ case WireFixed32:
+ _, err = o.DecodeFixed32()
+ case WireStartGroup:
+ for {
+ u, err = o.DecodeVarint()
+ if err != nil {
+ break
+ }
+ fwire := int(u & 0x7)
+ if fwire == WireEndGroup {
+ break
+ }
+ ftag := int(u >> 3)
+ err = o.skip(t, ftag, fwire)
+ if err != nil {
+ break
+ }
+ }
+ default:
+ err = fmt.Errorf("proto: can't skip unknown wire type %d for %s", wire, t)
+ }
+ return err
+}
+
+// Unmarshaler is the interface representing objects that can
+// unmarshal themselves. The method should reset the receiver before
+// decoding starts. The argument points to data that may be
+// overwritten, so implementations should not keep references to the
+// buffer.
+type Unmarshaler interface {
+ Unmarshal([]byte) error
+}
+
+// Unmarshal parses the protocol buffer representation in buf and places the
+// decoded result in pb. If the struct underlying pb does not match
+// the data in buf, the results can be unpredictable.
+//
+// Unmarshal resets pb before starting to unmarshal, so any
+// existing data in pb is always removed. Use UnmarshalMerge
+// to preserve and append to existing data.
+func Unmarshal(buf []byte, pb Message) error {
+ pb.Reset()
+ return UnmarshalMerge(buf, pb)
+}
+
+// UnmarshalMerge parses the protocol buffer representation in buf and
+// writes the decoded result to pb. If the struct underlying pb does not match
+// the data in buf, the results can be unpredictable.
+//
+// UnmarshalMerge merges into existing data in pb.
+// Most code should use Unmarshal instead.
+func UnmarshalMerge(buf []byte, pb Message) error {
+ // If the object can unmarshal itself, let it.
+ if u, ok := pb.(Unmarshaler); ok {
+ return u.Unmarshal(buf)
+ }
+ return NewBuffer(buf).Unmarshal(pb)
+}
+
+// DecodeMessage reads a count-delimited message from the Buffer.
+func (p *Buffer) DecodeMessage(pb Message) error {
+ enc, err := p.DecodeRawBytes(false)
+ if err != nil {
+ return err
+ }
+ return NewBuffer(enc).Unmarshal(pb)
+}
+
+// DecodeGroup reads a tag-delimited group from the Buffer.
+func (p *Buffer) DecodeGroup(pb Message) error {
+ typ, base, err := getbase(pb)
+ if err != nil {
+ return err
+ }
+ return p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), true, base)
+}
+
+// Unmarshal parses the protocol buffer representation in the
+// Buffer and places the decoded result in pb. If the struct
+// underlying pb does not match the data in the buffer, the results can be
+// unpredictable.
+//
+// Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal.
+func (p *Buffer) Unmarshal(pb Message) error {
+ // If the object can unmarshal itself, let it.
+ if u, ok := pb.(Unmarshaler); ok {
+ err := u.Unmarshal(p.buf[p.index:])
+ p.index = len(p.buf)
+ return err
+ }
+
+ typ, base, err := getbase(pb)
+ if err != nil {
+ return err
+ }
+
+ err = p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), false, base)
+
+ if collectStats {
+ stats.Decode++
+ }
+
+ return err
+}
+
+// unmarshalType does the work of unmarshaling a structure.
+func (o *Buffer) unmarshalType(st reflect.Type, prop *StructProperties, is_group bool, base structPointer) error {
+ var state errorState
+ required, reqFields := prop.reqCount, uint64(0)
+
+ var err error
+ for err == nil && o.index < len(o.buf) {
+ oi := o.index
+ var u uint64
+ u, err = o.DecodeVarint()
+ if err != nil {
+ break
+ }
+ wire := int(u & 0x7)
+ if wire == WireEndGroup {
+ if is_group {
+ if required > 0 {
+ // Not enough information to determine the exact field.
+ // (See below.)
+ return &RequiredNotSetError{"{Unknown}"}
+ }
+ return nil // input is satisfied
+ }
+ return fmt.Errorf("proto: %s: wiretype end group for non-group", st)
+ }
+ tag := int(u >> 3)
+ if tag <= 0 {
+ return fmt.Errorf("proto: %s: illegal tag %d (wire type %d)", st, tag, wire)
+ }
+ fieldnum, ok := prop.decoderTags.get(tag)
+ if !ok {
+ // Maybe it's an extension?
+ if prop.extendable {
+ if e, _ := extendable(structPointer_Interface(base, st)); isExtensionField(e, int32(tag)) {
+ if err = o.skip(st, tag, wire); err == nil {
+ extmap := e.extensionsWrite()
+ ext := extmap[int32(tag)] // may be missing
+ ext.enc = append(ext.enc, o.buf[oi:o.index]...)
+ extmap[int32(tag)] = ext
+ }
+ continue
+ }
+ }
+ // Maybe it's a oneof?
+ if prop.oneofUnmarshaler != nil {
+ m := structPointer_Interface(base, st).(Message)
+ // First return value indicates whether tag is a oneof field.
+ ok, err = prop.oneofUnmarshaler(m, tag, wire, o)
+ if err == ErrInternalBadWireType {
+ // Map the error to something more descriptive.
+ // Do the formatting here to save generated code space.
+ err = fmt.Errorf("bad wiretype for oneof field in %T", m)
+ }
+ if ok {
+ continue
+ }
+ }
+ err = o.skipAndSave(st, tag, wire, base, prop.unrecField)
+ continue
+ }
+ p := prop.Prop[fieldnum]
+
+ if p.dec == nil {
+ fmt.Fprintf(os.Stderr, "proto: no protobuf decoder for %s.%s\n", st, st.Field(fieldnum).Name)
+ continue
+ }
+ dec := p.dec
+ if wire != WireStartGroup && wire != p.WireType {
+ if wire == WireBytes && p.packedDec != nil {
+ // a packable field
+ dec = p.packedDec
+ } else {
+ err = fmt.Errorf("proto: bad wiretype for field %s.%s: got wiretype %d, want %d", st, st.Field(fieldnum).Name, wire, p.WireType)
+ continue
+ }
+ }
+ decErr := dec(o, p, base)
+ if decErr != nil && !state.shouldContinue(decErr, p) {
+ err = decErr
+ }
+ if err == nil && p.Required {
+ // Successfully decoded a required field.
+ if tag <= 64 {
+ // use bitmap for fields 1-64 to catch field reuse.
+ var mask uint64 = 1 << uint64(tag-1)
+ if reqFields&mask == 0 {
+ // new required field
+ reqFields |= mask
+ required--
+ }
+ } else {
+ // This is imprecise. It can be fooled by a required field
+ // with a tag > 64 that is encoded twice; that's very rare.
+ // A fully correct implementation would require allocating
+ // a data structure, which we would like to avoid.
+ required--
+ }
+ }
+ }
+ if err == nil {
+ if is_group {
+ return io.ErrUnexpectedEOF
+ }
+ if state.err != nil {
+ return state.err
+ }
+ if required > 0 {
+ // Not enough information to determine the exact field. If we use extra
+ // CPU, we could determine the field only if the missing required field
+ // has a tag <= 64 and we check reqFields.
+ return &RequiredNotSetError{"{Unknown}"}
+ }
+ }
+ return err
+}
+
+// Individual type decoders
+// For each,
+// u is the decoded value,
+// v is a pointer to the field (pointer) in the struct
+
+// Sizes of the pools to allocate inside the Buffer.
+// The goal is modest amortization and allocation
+// on at least 16-byte boundaries.
+const (
+ boolPoolSize = 16
+ uint32PoolSize = 8
+ uint64PoolSize = 4
+)
+
+// Decode a bool.
+func (o *Buffer) dec_bool(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ if len(o.bools) == 0 {
+ o.bools = make([]bool, boolPoolSize)
+ }
+ o.bools[0] = u != 0
+ *structPointer_Bool(base, p.field) = &o.bools[0]
+ o.bools = o.bools[1:]
+ return nil
+}
+
+func (o *Buffer) dec_proto3_bool(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ *structPointer_BoolVal(base, p.field) = u != 0
+ return nil
+}
+
+// Decode an int32.
+func (o *Buffer) dec_int32(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ word32_Set(structPointer_Word32(base, p.field), o, uint32(u))
+ return nil
+}
+
+func (o *Buffer) dec_proto3_int32(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ word32Val_Set(structPointer_Word32Val(base, p.field), uint32(u))
+ return nil
+}
+
+// Decode an int64.
+func (o *Buffer) dec_int64(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ word64_Set(structPointer_Word64(base, p.field), o, u)
+ return nil
+}
+
+func (o *Buffer) dec_proto3_int64(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ word64Val_Set(structPointer_Word64Val(base, p.field), o, u)
+ return nil
+}
+
+// Decode a string.
+func (o *Buffer) dec_string(p *Properties, base structPointer) error {
+ s, err := o.DecodeStringBytes()
+ if err != nil {
+ return err
+ }
+ *structPointer_String(base, p.field) = &s
+ return nil
+}
+
+func (o *Buffer) dec_proto3_string(p *Properties, base structPointer) error {
+ s, err := o.DecodeStringBytes()
+ if err != nil {
+ return err
+ }
+ *structPointer_StringVal(base, p.field) = s
+ return nil
+}
+
+// Decode a slice of bytes ([]byte).
+func (o *Buffer) dec_slice_byte(p *Properties, base structPointer) error {
+ b, err := o.DecodeRawBytes(true)
+ if err != nil {
+ return err
+ }
+ *structPointer_Bytes(base, p.field) = b
+ return nil
+}
+
+// Decode a slice of bools ([]bool).
+func (o *Buffer) dec_slice_bool(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ v := structPointer_BoolSlice(base, p.field)
+ *v = append(*v, u != 0)
+ return nil
+}
+
+// Decode a slice of bools ([]bool) in packed format.
+func (o *Buffer) dec_slice_packed_bool(p *Properties, base structPointer) error {
+ v := structPointer_BoolSlice(base, p.field)
+
+ nn, err := o.DecodeVarint()
+ if err != nil {
+ return err
+ }
+ nb := int(nn) // number of bytes of encoded bools
+ fin := o.index + nb
+ if fin < o.index {
+ return errOverflow
+ }
+
+ y := *v
+ for o.index < fin {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ y = append(y, u != 0)
+ }
+
+ *v = y
+ return nil
+}
+
+// Decode a slice of int32s ([]int32).
+func (o *Buffer) dec_slice_int32(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ structPointer_Word32Slice(base, p.field).Append(uint32(u))
+ return nil
+}
+
+// Decode a slice of int32s ([]int32) in packed format.
+func (o *Buffer) dec_slice_packed_int32(p *Properties, base structPointer) error {
+ v := structPointer_Word32Slice(base, p.field)
+
+ nn, err := o.DecodeVarint()
+ if err != nil {
+ return err
+ }
+ nb := int(nn) // number of bytes of encoded int32s
+
+ fin := o.index + nb
+ if fin < o.index {
+ return errOverflow
+ }
+ for o.index < fin {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ v.Append(uint32(u))
+ }
+ return nil
+}
+
+// Decode a slice of int64s ([]int64).
+func (o *Buffer) dec_slice_int64(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+
+ structPointer_Word64Slice(base, p.field).Append(u)
+ return nil
+}
+
+// Decode a slice of int64s ([]int64) in packed format.
+func (o *Buffer) dec_slice_packed_int64(p *Properties, base structPointer) error {
+ v := structPointer_Word64Slice(base, p.field)
+
+ nn, err := o.DecodeVarint()
+ if err != nil {
+ return err
+ }
+ nb := int(nn) // number of bytes of encoded int64s
+
+ fin := o.index + nb
+ if fin < o.index {
+ return errOverflow
+ }
+ for o.index < fin {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ v.Append(u)
+ }
+ return nil
+}
+
+// Decode a slice of strings ([]string).
+func (o *Buffer) dec_slice_string(p *Properties, base structPointer) error {
+ s, err := o.DecodeStringBytes()
+ if err != nil {
+ return err
+ }
+ v := structPointer_StringSlice(base, p.field)
+ *v = append(*v, s)
+ return nil
+}
+
+// Decode a slice of slice of bytes ([][]byte).
+func (o *Buffer) dec_slice_slice_byte(p *Properties, base structPointer) error {
+ b, err := o.DecodeRawBytes(true)
+ if err != nil {
+ return err
+ }
+ v := structPointer_BytesSlice(base, p.field)
+ *v = append(*v, b)
+ return nil
+}
+
+// Decode a map field.
+func (o *Buffer) dec_new_map(p *Properties, base structPointer) error {
+ raw, err := o.DecodeRawBytes(false)
+ if err != nil {
+ return err
+ }
+ oi := o.index // index at the end of this map entry
+ o.index -= len(raw) // move buffer back to start of map entry
+
+ mptr := structPointer_NewAt(base, p.field, p.mtype) // *map[K]V
+ if mptr.Elem().IsNil() {
+ mptr.Elem().Set(reflect.MakeMap(mptr.Type().Elem()))
+ }
+ v := mptr.Elem() // map[K]V
+
+ // Prepare addressable doubly-indirect placeholders for the key and value types.
+ // See enc_new_map for why.
+ keyptr := reflect.New(reflect.PtrTo(p.mtype.Key())).Elem() // addressable *K
+ keybase := toStructPointer(keyptr.Addr()) // **K
+
+ var valbase structPointer
+ var valptr reflect.Value
+ switch p.mtype.Elem().Kind() {
+ case reflect.Slice:
+ // []byte
+ var dummy []byte
+ valptr = reflect.ValueOf(&dummy) // *[]byte
+ valbase = toStructPointer(valptr) // *[]byte
+ case reflect.Ptr:
+ // message; valptr is **Msg; need to allocate the intermediate pointer
+ valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V
+ valptr.Set(reflect.New(valptr.Type().Elem()))
+ valbase = toStructPointer(valptr)
+ default:
+ // everything else
+ valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V
+ valbase = toStructPointer(valptr.Addr()) // **V
+ }
+
+ // Decode.
+ // This parses a restricted wire format, namely the encoding of a message
+ // with two fields. See enc_new_map for the format.
+ for o.index < oi {
+ // tagcode for key and value properties are always a single byte
+ // because they have tags 1 and 2.
+ tagcode := o.buf[o.index]
+ o.index++
+ switch tagcode {
+ case p.mkeyprop.tagcode[0]:
+ if err := p.mkeyprop.dec(o, p.mkeyprop, keybase); err != nil {
+ return err
+ }
+ case p.mvalprop.tagcode[0]:
+ if err := p.mvalprop.dec(o, p.mvalprop, valbase); err != nil {
+ return err
+ }
+ default:
+ // TODO: Should we silently skip this instead?
+ return fmt.Errorf("proto: bad map data tag %d", raw[0])
+ }
+ }
+ keyelem, valelem := keyptr.Elem(), valptr.Elem()
+ if !keyelem.IsValid() {
+ keyelem = reflect.Zero(p.mtype.Key())
+ }
+ if !valelem.IsValid() {
+ valelem = reflect.Zero(p.mtype.Elem())
+ }
+
+ v.SetMapIndex(keyelem, valelem)
+ return nil
+}
+
+// Decode a group.
+func (o *Buffer) dec_struct_group(p *Properties, base structPointer) error {
+ bas := structPointer_GetStructPointer(base, p.field)
+ if structPointer_IsNil(bas) {
+ // allocate new nested message
+ bas = toStructPointer(reflect.New(p.stype))
+ structPointer_SetStructPointer(base, p.field, bas)
+ }
+ return o.unmarshalType(p.stype, p.sprop, true, bas)
+}
+
+// Decode an embedded message.
+func (o *Buffer) dec_struct_message(p *Properties, base structPointer) (err error) {
+ raw, e := o.DecodeRawBytes(false)
+ if e != nil {
+ return e
+ }
+
+ bas := structPointer_GetStructPointer(base, p.field)
+ if structPointer_IsNil(bas) {
+ // allocate new nested message
+ bas = toStructPointer(reflect.New(p.stype))
+ structPointer_SetStructPointer(base, p.field, bas)
+ }
+
+ // If the object can unmarshal itself, let it.
+ if p.isUnmarshaler {
+ iv := structPointer_Interface(bas, p.stype)
+ return iv.(Unmarshaler).Unmarshal(raw)
+ }
+
+ obuf := o.buf
+ oi := o.index
+ o.buf = raw
+ o.index = 0
+
+ err = o.unmarshalType(p.stype, p.sprop, false, bas)
+ o.buf = obuf
+ o.index = oi
+
+ return err
+}
+
+// Decode a slice of embedded messages.
+func (o *Buffer) dec_slice_struct_message(p *Properties, base structPointer) error {
+ return o.dec_slice_struct(p, false, base)
+}
+
+// Decode a slice of embedded groups.
+func (o *Buffer) dec_slice_struct_group(p *Properties, base structPointer) error {
+ return o.dec_slice_struct(p, true, base)
+}
+
+// Decode a slice of structs ([]*struct).
+func (o *Buffer) dec_slice_struct(p *Properties, is_group bool, base structPointer) error {
+ v := reflect.New(p.stype)
+ bas := toStructPointer(v)
+ structPointer_StructPointerSlice(base, p.field).Append(bas)
+
+ if is_group {
+ err := o.unmarshalType(p.stype, p.sprop, is_group, bas)
+ return err
+ }
+
+ raw, err := o.DecodeRawBytes(false)
+ if err != nil {
+ return err
+ }
+
+ // If the object can unmarshal itself, let it.
+ if p.isUnmarshaler {
+ iv := v.Interface()
+ return iv.(Unmarshaler).Unmarshal(raw)
+ }
+
+ obuf := o.buf
+ oi := o.index
+ o.buf = raw
+ o.index = 0
+
+ err = o.unmarshalType(p.stype, p.sprop, is_group, bas)
+
+ o.buf = obuf
+ o.index = oi
+
+ return err
+}
diff --git a/vendor/github.com/golang/protobuf/proto/encode.go b/vendor/github.com/golang/protobuf/proto/encode.go
new file mode 100644
index 0000000000..2b30f84626
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/encode.go
@@ -0,0 +1,1362 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Routines for encoding data into the wire format for protocol buffers.
+ */
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+ "sort"
+)
+
+// RequiredNotSetError is the error returned if Marshal is called with
+// a protocol buffer struct whose required fields have not
+// all been initialized. It is also the error returned if Unmarshal is
+// called with an encoded protocol buffer that does not include all the
+// required fields.
+//
+// When printed, RequiredNotSetError reports the first unset required field in a
+// message. If the field cannot be precisely determined, it is reported as
+// "{Unknown}".
+type RequiredNotSetError struct {
+ field string
+}
+
+func (e *RequiredNotSetError) Error() string {
+ return fmt.Sprintf("proto: required field %q not set", e.field)
+}
+
+var (
+ // errRepeatedHasNil is the error returned if Marshal is called with
+ // a struct with a repeated field containing a nil element.
+ errRepeatedHasNil = errors.New("proto: repeated field has nil element")
+
+ // errOneofHasNil is the error returned if Marshal is called with
+ // a struct with a oneof field containing a nil element.
+ errOneofHasNil = errors.New("proto: oneof field has nil value")
+
+ // ErrNil is the error returned if Marshal is called with nil.
+ ErrNil = errors.New("proto: Marshal called with nil")
+
+ // ErrTooLarge is the error returned if Marshal is called with a
+ // message that encodes to >2GB.
+ ErrTooLarge = errors.New("proto: message encodes to over 2 GB")
+)
+
+// The fundamental encoders that put bytes on the wire.
+// Those that take integer types all accept uint64 and are
+// therefore of type valueEncoder.
+
+const maxVarintBytes = 10 // maximum length of a varint
+
+// maxMarshalSize is the largest allowed size of an encoded protobuf,
+// since C++ and Java use signed int32s for the size.
+const maxMarshalSize = 1<<31 - 1
+
+// EncodeVarint returns the varint encoding of x.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+// Not used by the package itself, but helpful to clients
+// wishing to use the same encoding.
+func EncodeVarint(x uint64) []byte {
+ var buf [maxVarintBytes]byte
+ var n int
+ for n = 0; x > 127; n++ {
+ buf[n] = 0x80 | uint8(x&0x7F)
+ x >>= 7
+ }
+ buf[n] = uint8(x)
+ n++
+ return buf[0:n]
+}
+
+// EncodeVarint writes a varint-encoded integer to the Buffer.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+func (p *Buffer) EncodeVarint(x uint64) error {
+ for x >= 1<<7 {
+ p.buf = append(p.buf, uint8(x&0x7f|0x80))
+ x >>= 7
+ }
+ p.buf = append(p.buf, uint8(x))
+ return nil
+}
+
+// SizeVarint returns the varint encoding size of an integer.
+func SizeVarint(x uint64) int {
+ return sizeVarint(x)
+}
+
+func sizeVarint(x uint64) (n int) {
+ for {
+ n++
+ x >>= 7
+ if x == 0 {
+ break
+ }
+ }
+ return n
+}
+
+// EncodeFixed64 writes a 64-bit integer to the Buffer.
+// This is the format for the
+// fixed64, sfixed64, and double protocol buffer types.
+func (p *Buffer) EncodeFixed64(x uint64) error {
+ p.buf = append(p.buf,
+ uint8(x),
+ uint8(x>>8),
+ uint8(x>>16),
+ uint8(x>>24),
+ uint8(x>>32),
+ uint8(x>>40),
+ uint8(x>>48),
+ uint8(x>>56))
+ return nil
+}
+
+func sizeFixed64(x uint64) int {
+ return 8
+}
+
+// EncodeFixed32 writes a 32-bit integer to the Buffer.
+// This is the format for the
+// fixed32, sfixed32, and float protocol buffer types.
+func (p *Buffer) EncodeFixed32(x uint64) error {
+ p.buf = append(p.buf,
+ uint8(x),
+ uint8(x>>8),
+ uint8(x>>16),
+ uint8(x>>24))
+ return nil
+}
+
+func sizeFixed32(x uint64) int {
+ return 4
+}
+
+// EncodeZigzag64 writes a zigzag-encoded 64-bit integer
+// to the Buffer.
+// This is the format used for the sint64 protocol buffer type.
+func (p *Buffer) EncodeZigzag64(x uint64) error {
+ // use signed number to get arithmetic right shift.
+ return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+
+func sizeZigzag64(x uint64) int {
+ return sizeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+
+// EncodeZigzag32 writes a zigzag-encoded 32-bit integer
+// to the Buffer.
+// This is the format used for the sint32 protocol buffer type.
+func (p *Buffer) EncodeZigzag32(x uint64) error {
+ // use signed number to get arithmetic right shift.
+ return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31))))
+}
+
+func sizeZigzag32(x uint64) int {
+ return sizeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31))))
+}
+
+// EncodeRawBytes writes a count-delimited byte buffer to the Buffer.
+// This is the format used for the bytes protocol buffer
+// type and for embedded messages.
+func (p *Buffer) EncodeRawBytes(b []byte) error {
+ p.EncodeVarint(uint64(len(b)))
+ p.buf = append(p.buf, b...)
+ return nil
+}
+
+func sizeRawBytes(b []byte) int {
+ return sizeVarint(uint64(len(b))) +
+ len(b)
+}
+
+// EncodeStringBytes writes an encoded string to the Buffer.
+// This is the format used for the proto2 string type.
+func (p *Buffer) EncodeStringBytes(s string) error {
+ p.EncodeVarint(uint64(len(s)))
+ p.buf = append(p.buf, s...)
+ return nil
+}
+
+func sizeStringBytes(s string) int {
+ return sizeVarint(uint64(len(s))) +
+ len(s)
+}
+
+// Marshaler is the interface representing objects that can marshal themselves.
+type Marshaler interface {
+ Marshal() ([]byte, error)
+}
+
+// Marshal takes the protocol buffer
+// and encodes it into the wire format, returning the data.
+func Marshal(pb Message) ([]byte, error) {
+ // Can the object marshal itself?
+ if m, ok := pb.(Marshaler); ok {
+ return m.Marshal()
+ }
+ p := NewBuffer(nil)
+ err := p.Marshal(pb)
+ if p.buf == nil && err == nil {
+ // Return a non-nil slice on success.
+ return []byte{}, nil
+ }
+ return p.buf, err
+}
+
+// EncodeMessage writes the protocol buffer to the Buffer,
+// prefixed by a varint-encoded length.
+func (p *Buffer) EncodeMessage(pb Message) error {
+ t, base, err := getbase(pb)
+ if structPointer_IsNil(base) {
+ return ErrNil
+ }
+ if err == nil {
+ var state errorState
+ err = p.enc_len_struct(GetProperties(t.Elem()), base, &state)
+ }
+ return err
+}
+
+// Marshal takes the protocol buffer
+// and encodes it into the wire format, writing the result to the
+// Buffer.
+func (p *Buffer) Marshal(pb Message) error {
+ // Can the object marshal itself?
+ if m, ok := pb.(Marshaler); ok {
+ data, err := m.Marshal()
+ p.buf = append(p.buf, data...)
+ return err
+ }
+
+ t, base, err := getbase(pb)
+ if structPointer_IsNil(base) {
+ return ErrNil
+ }
+ if err == nil {
+ err = p.enc_struct(GetProperties(t.Elem()), base)
+ }
+
+ if collectStats {
+ (stats).Encode++ // Parens are to work around a goimports bug.
+ }
+
+ if len(p.buf) > maxMarshalSize {
+ return ErrTooLarge
+ }
+ return err
+}
+
+// Size returns the encoded size of a protocol buffer.
+func Size(pb Message) (n int) {
+ // Can the object marshal itself? If so, Size is slow.
+ // TODO: add Size to Marshaler, or add a Sizer interface.
+ if m, ok := pb.(Marshaler); ok {
+ b, _ := m.Marshal()
+ return len(b)
+ }
+
+ t, base, err := getbase(pb)
+ if structPointer_IsNil(base) {
+ return 0
+ }
+ if err == nil {
+ n = size_struct(GetProperties(t.Elem()), base)
+ }
+
+ if collectStats {
+ (stats).Size++ // Parens are to work around a goimports bug.
+ }
+
+ return
+}
+
+// Individual type encoders.
+
+// Encode a bool.
+func (o *Buffer) enc_bool(p *Properties, base structPointer) error {
+ v := *structPointer_Bool(base, p.field)
+ if v == nil {
+ return ErrNil
+ }
+ x := 0
+ if *v {
+ x = 1
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, uint64(x))
+ return nil
+}
+
+func (o *Buffer) enc_proto3_bool(p *Properties, base structPointer) error {
+ v := *structPointer_BoolVal(base, p.field)
+ if !v {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, 1)
+ return nil
+}
+
+func size_bool(p *Properties, base structPointer) int {
+ v := *structPointer_Bool(base, p.field)
+ if v == nil {
+ return 0
+ }
+ return len(p.tagcode) + 1 // each bool takes exactly one byte
+}
+
+func size_proto3_bool(p *Properties, base structPointer) int {
+ v := *structPointer_BoolVal(base, p.field)
+ if !v && !p.oneof {
+ return 0
+ }
+ return len(p.tagcode) + 1 // each bool takes exactly one byte
+}
+
+// Encode an int32.
+func (o *Buffer) enc_int32(p *Properties, base structPointer) error {
+ v := structPointer_Word32(base, p.field)
+ if word32_IsNil(v) {
+ return ErrNil
+ }
+ x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, uint64(x))
+ return nil
+}
+
+func (o *Buffer) enc_proto3_int32(p *Properties, base structPointer) error {
+ v := structPointer_Word32Val(base, p.field)
+ x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range
+ if x == 0 {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, uint64(x))
+ return nil
+}
+
+func size_int32(p *Properties, base structPointer) (n int) {
+ v := structPointer_Word32(base, p.field)
+ if word32_IsNil(v) {
+ return 0
+ }
+ x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range
+ n += len(p.tagcode)
+ n += p.valSize(uint64(x))
+ return
+}
+
+func size_proto3_int32(p *Properties, base structPointer) (n int) {
+ v := structPointer_Word32Val(base, p.field)
+ x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range
+ if x == 0 && !p.oneof {
+ return 0
+ }
+ n += len(p.tagcode)
+ n += p.valSize(uint64(x))
+ return
+}
+
+// Encode a uint32.
+// Exactly the same as int32, except for no sign extension.
+func (o *Buffer) enc_uint32(p *Properties, base structPointer) error {
+ v := structPointer_Word32(base, p.field)
+ if word32_IsNil(v) {
+ return ErrNil
+ }
+ x := word32_Get(v)
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, uint64(x))
+ return nil
+}
+
+func (o *Buffer) enc_proto3_uint32(p *Properties, base structPointer) error {
+ v := structPointer_Word32Val(base, p.field)
+ x := word32Val_Get(v)
+ if x == 0 {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, uint64(x))
+ return nil
+}
+
+func size_uint32(p *Properties, base structPointer) (n int) {
+ v := structPointer_Word32(base, p.field)
+ if word32_IsNil(v) {
+ return 0
+ }
+ x := word32_Get(v)
+ n += len(p.tagcode)
+ n += p.valSize(uint64(x))
+ return
+}
+
+func size_proto3_uint32(p *Properties, base structPointer) (n int) {
+ v := structPointer_Word32Val(base, p.field)
+ x := word32Val_Get(v)
+ if x == 0 && !p.oneof {
+ return 0
+ }
+ n += len(p.tagcode)
+ n += p.valSize(uint64(x))
+ return
+}
+
+// Encode an int64.
+func (o *Buffer) enc_int64(p *Properties, base structPointer) error {
+ v := structPointer_Word64(base, p.field)
+ if word64_IsNil(v) {
+ return ErrNil
+ }
+ x := word64_Get(v)
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, x)
+ return nil
+}
+
+func (o *Buffer) enc_proto3_int64(p *Properties, base structPointer) error {
+ v := structPointer_Word64Val(base, p.field)
+ x := word64Val_Get(v)
+ if x == 0 {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, x)
+ return nil
+}
+
+func size_int64(p *Properties, base structPointer) (n int) {
+ v := structPointer_Word64(base, p.field)
+ if word64_IsNil(v) {
+ return 0
+ }
+ x := word64_Get(v)
+ n += len(p.tagcode)
+ n += p.valSize(x)
+ return
+}
+
+func size_proto3_int64(p *Properties, base structPointer) (n int) {
+ v := structPointer_Word64Val(base, p.field)
+ x := word64Val_Get(v)
+ if x == 0 && !p.oneof {
+ return 0
+ }
+ n += len(p.tagcode)
+ n += p.valSize(x)
+ return
+}
+
+// Encode a string.
+func (o *Buffer) enc_string(p *Properties, base structPointer) error {
+ v := *structPointer_String(base, p.field)
+ if v == nil {
+ return ErrNil
+ }
+ x := *v
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeStringBytes(x)
+ return nil
+}
+
+func (o *Buffer) enc_proto3_string(p *Properties, base structPointer) error {
+ v := *structPointer_StringVal(base, p.field)
+ if v == "" {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeStringBytes(v)
+ return nil
+}
+
+func size_string(p *Properties, base structPointer) (n int) {
+ v := *structPointer_String(base, p.field)
+ if v == nil {
+ return 0
+ }
+ x := *v
+ n += len(p.tagcode)
+ n += sizeStringBytes(x)
+ return
+}
+
+func size_proto3_string(p *Properties, base structPointer) (n int) {
+ v := *structPointer_StringVal(base, p.field)
+ if v == "" && !p.oneof {
+ return 0
+ }
+ n += len(p.tagcode)
+ n += sizeStringBytes(v)
+ return
+}
+
+// All protocol buffer fields are nillable, but be careful.
+func isNil(v reflect.Value) bool {
+ switch v.Kind() {
+ case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
+ return v.IsNil()
+ }
+ return false
+}
+
+// Encode a message struct.
+func (o *Buffer) enc_struct_message(p *Properties, base structPointer) error {
+ var state errorState
+ structp := structPointer_GetStructPointer(base, p.field)
+ if structPointer_IsNil(structp) {
+ return ErrNil
+ }
+
+ // Can the object marshal itself?
+ if p.isMarshaler {
+ m := structPointer_Interface(structp, p.stype).(Marshaler)
+ data, err := m.Marshal()
+ if err != nil && !state.shouldContinue(err, nil) {
+ return err
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeRawBytes(data)
+ return state.err
+ }
+
+ o.buf = append(o.buf, p.tagcode...)
+ return o.enc_len_struct(p.sprop, structp, &state)
+}
+
+func size_struct_message(p *Properties, base structPointer) int {
+ structp := structPointer_GetStructPointer(base, p.field)
+ if structPointer_IsNil(structp) {
+ return 0
+ }
+
+ // Can the object marshal itself?
+ if p.isMarshaler {
+ m := structPointer_Interface(structp, p.stype).(Marshaler)
+ data, _ := m.Marshal()
+ n0 := len(p.tagcode)
+ n1 := sizeRawBytes(data)
+ return n0 + n1
+ }
+
+ n0 := len(p.tagcode)
+ n1 := size_struct(p.sprop, structp)
+ n2 := sizeVarint(uint64(n1)) // size of encoded length
+ return n0 + n1 + n2
+}
+
+// Encode a group struct.
+func (o *Buffer) enc_struct_group(p *Properties, base structPointer) error {
+ var state errorState
+ b := structPointer_GetStructPointer(base, p.field)
+ if structPointer_IsNil(b) {
+ return ErrNil
+ }
+
+ o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup))
+ err := o.enc_struct(p.sprop, b)
+ if err != nil && !state.shouldContinue(err, nil) {
+ return err
+ }
+ o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup))
+ return state.err
+}
+
+func size_struct_group(p *Properties, base structPointer) (n int) {
+ b := structPointer_GetStructPointer(base, p.field)
+ if structPointer_IsNil(b) {
+ return 0
+ }
+
+ n += sizeVarint(uint64((p.Tag << 3) | WireStartGroup))
+ n += size_struct(p.sprop, b)
+ n += sizeVarint(uint64((p.Tag << 3) | WireEndGroup))
+ return
+}
+
+// Encode a slice of bools ([]bool).
+func (o *Buffer) enc_slice_bool(p *Properties, base structPointer) error {
+ s := *structPointer_BoolSlice(base, p.field)
+ l := len(s)
+ if l == 0 {
+ return ErrNil
+ }
+ for _, x := range s {
+ o.buf = append(o.buf, p.tagcode...)
+ v := uint64(0)
+ if x {
+ v = 1
+ }
+ p.valEnc(o, v)
+ }
+ return nil
+}
+
+func size_slice_bool(p *Properties, base structPointer) int {
+ s := *structPointer_BoolSlice(base, p.field)
+ l := len(s)
+ if l == 0 {
+ return 0
+ }
+ return l * (len(p.tagcode) + 1) // each bool takes exactly one byte
+}
+
+// Encode a slice of bools ([]bool) in packed format.
+func (o *Buffer) enc_slice_packed_bool(p *Properties, base structPointer) error {
+ s := *structPointer_BoolSlice(base, p.field)
+ l := len(s)
+ if l == 0 {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeVarint(uint64(l)) // each bool takes exactly one byte
+ for _, x := range s {
+ v := uint64(0)
+ if x {
+ v = 1
+ }
+ p.valEnc(o, v)
+ }
+ return nil
+}
+
+func size_slice_packed_bool(p *Properties, base structPointer) (n int) {
+ s := *structPointer_BoolSlice(base, p.field)
+ l := len(s)
+ if l == 0 {
+ return 0
+ }
+ n += len(p.tagcode)
+ n += sizeVarint(uint64(l))
+ n += l // each bool takes exactly one byte
+ return
+}
+
+// Encode a slice of bytes ([]byte).
+func (o *Buffer) enc_slice_byte(p *Properties, base structPointer) error {
+ s := *structPointer_Bytes(base, p.field)
+ if s == nil {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeRawBytes(s)
+ return nil
+}
+
+func (o *Buffer) enc_proto3_slice_byte(p *Properties, base structPointer) error {
+ s := *structPointer_Bytes(base, p.field)
+ if len(s) == 0 {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeRawBytes(s)
+ return nil
+}
+
+func size_slice_byte(p *Properties, base structPointer) (n int) {
+ s := *structPointer_Bytes(base, p.field)
+ if s == nil && !p.oneof {
+ return 0
+ }
+ n += len(p.tagcode)
+ n += sizeRawBytes(s)
+ return
+}
+
+func size_proto3_slice_byte(p *Properties, base structPointer) (n int) {
+ s := *structPointer_Bytes(base, p.field)
+ if len(s) == 0 && !p.oneof {
+ return 0
+ }
+ n += len(p.tagcode)
+ n += sizeRawBytes(s)
+ return
+}
+
+// Encode a slice of int32s ([]int32).
+func (o *Buffer) enc_slice_int32(p *Properties, base structPointer) error {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return ErrNil
+ }
+ for i := 0; i < l; i++ {
+ o.buf = append(o.buf, p.tagcode...)
+ x := int32(s.Index(i)) // permit sign extension to use full 64-bit range
+ p.valEnc(o, uint64(x))
+ }
+ return nil
+}
+
+func size_slice_int32(p *Properties, base structPointer) (n int) {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return 0
+ }
+ for i := 0; i < l; i++ {
+ n += len(p.tagcode)
+ x := int32(s.Index(i)) // permit sign extension to use full 64-bit range
+ n += p.valSize(uint64(x))
+ }
+ return
+}
+
+// Encode a slice of int32s ([]int32) in packed format.
+func (o *Buffer) enc_slice_packed_int32(p *Properties, base structPointer) error {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return ErrNil
+ }
+ // TODO: Reuse a Buffer.
+ buf := NewBuffer(nil)
+ for i := 0; i < l; i++ {
+ x := int32(s.Index(i)) // permit sign extension to use full 64-bit range
+ p.valEnc(buf, uint64(x))
+ }
+
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeVarint(uint64(len(buf.buf)))
+ o.buf = append(o.buf, buf.buf...)
+ return nil
+}
+
+func size_slice_packed_int32(p *Properties, base structPointer) (n int) {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return 0
+ }
+ var bufSize int
+ for i := 0; i < l; i++ {
+ x := int32(s.Index(i)) // permit sign extension to use full 64-bit range
+ bufSize += p.valSize(uint64(x))
+ }
+
+ n += len(p.tagcode)
+ n += sizeVarint(uint64(bufSize))
+ n += bufSize
+ return
+}
+
+// Encode a slice of uint32s ([]uint32).
+// Exactly the same as int32, except for no sign extension.
+func (o *Buffer) enc_slice_uint32(p *Properties, base structPointer) error {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return ErrNil
+ }
+ for i := 0; i < l; i++ {
+ o.buf = append(o.buf, p.tagcode...)
+ x := s.Index(i)
+ p.valEnc(o, uint64(x))
+ }
+ return nil
+}
+
+func size_slice_uint32(p *Properties, base structPointer) (n int) {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return 0
+ }
+ for i := 0; i < l; i++ {
+ n += len(p.tagcode)
+ x := s.Index(i)
+ n += p.valSize(uint64(x))
+ }
+ return
+}
+
+// Encode a slice of uint32s ([]uint32) in packed format.
+// Exactly the same as int32, except for no sign extension.
+func (o *Buffer) enc_slice_packed_uint32(p *Properties, base structPointer) error {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return ErrNil
+ }
+ // TODO: Reuse a Buffer.
+ buf := NewBuffer(nil)
+ for i := 0; i < l; i++ {
+ p.valEnc(buf, uint64(s.Index(i)))
+ }
+
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeVarint(uint64(len(buf.buf)))
+ o.buf = append(o.buf, buf.buf...)
+ return nil
+}
+
+func size_slice_packed_uint32(p *Properties, base structPointer) (n int) {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return 0
+ }
+ var bufSize int
+ for i := 0; i < l; i++ {
+ bufSize += p.valSize(uint64(s.Index(i)))
+ }
+
+ n += len(p.tagcode)
+ n += sizeVarint(uint64(bufSize))
+ n += bufSize
+ return
+}
+
+// Encode a slice of int64s ([]int64).
+func (o *Buffer) enc_slice_int64(p *Properties, base structPointer) error {
+ s := structPointer_Word64Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return ErrNil
+ }
+ for i := 0; i < l; i++ {
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, s.Index(i))
+ }
+ return nil
+}
+
+func size_slice_int64(p *Properties, base structPointer) (n int) {
+ s := structPointer_Word64Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return 0
+ }
+ for i := 0; i < l; i++ {
+ n += len(p.tagcode)
+ n += p.valSize(s.Index(i))
+ }
+ return
+}
+
+// Encode a slice of int64s ([]int64) in packed format.
+func (o *Buffer) enc_slice_packed_int64(p *Properties, base structPointer) error {
+ s := structPointer_Word64Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return ErrNil
+ }
+ // TODO: Reuse a Buffer.
+ buf := NewBuffer(nil)
+ for i := 0; i < l; i++ {
+ p.valEnc(buf, s.Index(i))
+ }
+
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeVarint(uint64(len(buf.buf)))
+ o.buf = append(o.buf, buf.buf...)
+ return nil
+}
+
+func size_slice_packed_int64(p *Properties, base structPointer) (n int) {
+ s := structPointer_Word64Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return 0
+ }
+ var bufSize int
+ for i := 0; i < l; i++ {
+ bufSize += p.valSize(s.Index(i))
+ }
+
+ n += len(p.tagcode)
+ n += sizeVarint(uint64(bufSize))
+ n += bufSize
+ return
+}
+
+// Encode a slice of slice of bytes ([][]byte).
+func (o *Buffer) enc_slice_slice_byte(p *Properties, base structPointer) error {
+ ss := *structPointer_BytesSlice(base, p.field)
+ l := len(ss)
+ if l == 0 {
+ return ErrNil
+ }
+ for i := 0; i < l; i++ {
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeRawBytes(ss[i])
+ }
+ return nil
+}
+
+func size_slice_slice_byte(p *Properties, base structPointer) (n int) {
+ ss := *structPointer_BytesSlice(base, p.field)
+ l := len(ss)
+ if l == 0 {
+ return 0
+ }
+ n += l * len(p.tagcode)
+ for i := 0; i < l; i++ {
+ n += sizeRawBytes(ss[i])
+ }
+ return
+}
+
+// Encode a slice of strings ([]string).
+func (o *Buffer) enc_slice_string(p *Properties, base structPointer) error {
+ ss := *structPointer_StringSlice(base, p.field)
+ l := len(ss)
+ for i := 0; i < l; i++ {
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeStringBytes(ss[i])
+ }
+ return nil
+}
+
+func size_slice_string(p *Properties, base structPointer) (n int) {
+ ss := *structPointer_StringSlice(base, p.field)
+ l := len(ss)
+ n += l * len(p.tagcode)
+ for i := 0; i < l; i++ {
+ n += sizeStringBytes(ss[i])
+ }
+ return
+}
+
+// Encode a slice of message structs ([]*struct).
+func (o *Buffer) enc_slice_struct_message(p *Properties, base structPointer) error {
+ var state errorState
+ s := structPointer_StructPointerSlice(base, p.field)
+ l := s.Len()
+
+ for i := 0; i < l; i++ {
+ structp := s.Index(i)
+ if structPointer_IsNil(structp) {
+ return errRepeatedHasNil
+ }
+
+ // Can the object marshal itself?
+ if p.isMarshaler {
+ m := structPointer_Interface(structp, p.stype).(Marshaler)
+ data, err := m.Marshal()
+ if err != nil && !state.shouldContinue(err, nil) {
+ return err
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeRawBytes(data)
+ continue
+ }
+
+ o.buf = append(o.buf, p.tagcode...)
+ err := o.enc_len_struct(p.sprop, structp, &state)
+ if err != nil && !state.shouldContinue(err, nil) {
+ if err == ErrNil {
+ return errRepeatedHasNil
+ }
+ return err
+ }
+ }
+ return state.err
+}
+
+func size_slice_struct_message(p *Properties, base structPointer) (n int) {
+ s := structPointer_StructPointerSlice(base, p.field)
+ l := s.Len()
+ n += l * len(p.tagcode)
+ for i := 0; i < l; i++ {
+ structp := s.Index(i)
+ if structPointer_IsNil(structp) {
+ return // return the size up to this point
+ }
+
+ // Can the object marshal itself?
+ if p.isMarshaler {
+ m := structPointer_Interface(structp, p.stype).(Marshaler)
+ data, _ := m.Marshal()
+ n += sizeRawBytes(data)
+ continue
+ }
+
+ n0 := size_struct(p.sprop, structp)
+ n1 := sizeVarint(uint64(n0)) // size of encoded length
+ n += n0 + n1
+ }
+ return
+}
+
+// Encode a slice of group structs ([]*struct).
+func (o *Buffer) enc_slice_struct_group(p *Properties, base structPointer) error {
+ var state errorState
+ s := structPointer_StructPointerSlice(base, p.field)
+ l := s.Len()
+
+ for i := 0; i < l; i++ {
+ b := s.Index(i)
+ if structPointer_IsNil(b) {
+ return errRepeatedHasNil
+ }
+
+ o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup))
+
+ err := o.enc_struct(p.sprop, b)
+
+ if err != nil && !state.shouldContinue(err, nil) {
+ if err == ErrNil {
+ return errRepeatedHasNil
+ }
+ return err
+ }
+
+ o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup))
+ }
+ return state.err
+}
+
+func size_slice_struct_group(p *Properties, base structPointer) (n int) {
+ s := structPointer_StructPointerSlice(base, p.field)
+ l := s.Len()
+
+ n += l * sizeVarint(uint64((p.Tag<<3)|WireStartGroup))
+ n += l * sizeVarint(uint64((p.Tag<<3)|WireEndGroup))
+ for i := 0; i < l; i++ {
+ b := s.Index(i)
+ if structPointer_IsNil(b) {
+ return // return size up to this point
+ }
+
+ n += size_struct(p.sprop, b)
+ }
+ return
+}
+
+// Encode an extension map.
+func (o *Buffer) enc_map(p *Properties, base structPointer) error {
+ exts := structPointer_ExtMap(base, p.field)
+ if err := encodeExtensionsMap(*exts); err != nil {
+ return err
+ }
+
+ return o.enc_map_body(*exts)
+}
+
+func (o *Buffer) enc_exts(p *Properties, base structPointer) error {
+ exts := structPointer_Extensions(base, p.field)
+
+ v, mu := exts.extensionsRead()
+ if v == nil {
+ return nil
+ }
+
+ mu.Lock()
+ defer mu.Unlock()
+ if err := encodeExtensionsMap(v); err != nil {
+ return err
+ }
+
+ return o.enc_map_body(v)
+}
+
+func (o *Buffer) enc_map_body(v map[int32]Extension) error {
+ // Fast-path for common cases: zero or one extensions.
+ if len(v) <= 1 {
+ for _, e := range v {
+ o.buf = append(o.buf, e.enc...)
+ }
+ return nil
+ }
+
+ // Sort keys to provide a deterministic encoding.
+ keys := make([]int, 0, len(v))
+ for k := range v {
+ keys = append(keys, int(k))
+ }
+ sort.Ints(keys)
+
+ for _, k := range keys {
+ o.buf = append(o.buf, v[int32(k)].enc...)
+ }
+ return nil
+}
+
+func size_map(p *Properties, base structPointer) int {
+ v := structPointer_ExtMap(base, p.field)
+ return extensionsMapSize(*v)
+}
+
+func size_exts(p *Properties, base structPointer) int {
+ v := structPointer_Extensions(base, p.field)
+ return extensionsSize(v)
+}
+
+// Encode a map field.
+func (o *Buffer) enc_new_map(p *Properties, base structPointer) error {
+ var state errorState // XXX: or do we need to plumb this through?
+
+ /*
+ A map defined as
+ map map_field = N;
+ is encoded in the same way as
+ message MapFieldEntry {
+ key_type key = 1;
+ value_type value = 2;
+ }
+ repeated MapFieldEntry map_field = N;
+ */
+
+ v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V
+ if v.Len() == 0 {
+ return nil
+ }
+
+ keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype)
+
+ enc := func() error {
+ if err := p.mkeyprop.enc(o, p.mkeyprop, keybase); err != nil {
+ return err
+ }
+ if err := p.mvalprop.enc(o, p.mvalprop, valbase); err != nil && err != ErrNil {
+ return err
+ }
+ return nil
+ }
+
+ // Don't sort map keys. It is not required by the spec, and C++ doesn't do it.
+ for _, key := range v.MapKeys() {
+ val := v.MapIndex(key)
+
+ keycopy.Set(key)
+ valcopy.Set(val)
+
+ o.buf = append(o.buf, p.tagcode...)
+ if err := o.enc_len_thing(enc, &state); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func size_new_map(p *Properties, base structPointer) int {
+ v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V
+
+ keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype)
+
+ n := 0
+ for _, key := range v.MapKeys() {
+ val := v.MapIndex(key)
+ keycopy.Set(key)
+ valcopy.Set(val)
+
+ // Tag codes for key and val are the responsibility of the sub-sizer.
+ keysize := p.mkeyprop.size(p.mkeyprop, keybase)
+ valsize := p.mvalprop.size(p.mvalprop, valbase)
+ entry := keysize + valsize
+ // Add on tag code and length of map entry itself.
+ n += len(p.tagcode) + sizeVarint(uint64(entry)) + entry
+ }
+ return n
+}
+
+// mapEncodeScratch returns a new reflect.Value matching the map's value type,
+// and a structPointer suitable for passing to an encoder or sizer.
+func mapEncodeScratch(mapType reflect.Type) (keycopy, valcopy reflect.Value, keybase, valbase structPointer) {
+ // Prepare addressable doubly-indirect placeholders for the key and value types.
+ // This is needed because the element-type encoders expect **T, but the map iteration produces T.
+
+ keycopy = reflect.New(mapType.Key()).Elem() // addressable K
+ keyptr := reflect.New(reflect.PtrTo(keycopy.Type())).Elem() // addressable *K
+ keyptr.Set(keycopy.Addr()) //
+ keybase = toStructPointer(keyptr.Addr()) // **K
+
+ // Value types are more varied and require special handling.
+ switch mapType.Elem().Kind() {
+ case reflect.Slice:
+ // []byte
+ var dummy []byte
+ valcopy = reflect.ValueOf(&dummy).Elem() // addressable []byte
+ valbase = toStructPointer(valcopy.Addr())
+ case reflect.Ptr:
+ // message; the generated field type is map[K]*Msg (so V is *Msg),
+ // so we only need one level of indirection.
+ valcopy = reflect.New(mapType.Elem()).Elem() // addressable V
+ valbase = toStructPointer(valcopy.Addr())
+ default:
+ // everything else
+ valcopy = reflect.New(mapType.Elem()).Elem() // addressable V
+ valptr := reflect.New(reflect.PtrTo(valcopy.Type())).Elem() // addressable *V
+ valptr.Set(valcopy.Addr()) //
+ valbase = toStructPointer(valptr.Addr()) // **V
+ }
+ return
+}
+
+// Encode a struct.
+func (o *Buffer) enc_struct(prop *StructProperties, base structPointer) error {
+ var state errorState
+ // Encode fields in tag order so that decoders may use optimizations
+ // that depend on the ordering.
+ // https://developers.google.com/protocol-buffers/docs/encoding#order
+ for _, i := range prop.order {
+ p := prop.Prop[i]
+ if p.enc != nil {
+ err := p.enc(o, p, base)
+ if err != nil {
+ if err == ErrNil {
+ if p.Required && state.err == nil {
+ state.err = &RequiredNotSetError{p.Name}
+ }
+ } else if err == errRepeatedHasNil {
+ // Give more context to nil values in repeated fields.
+ return errors.New("repeated field " + p.OrigName + " has nil element")
+ } else if !state.shouldContinue(err, p) {
+ return err
+ }
+ }
+ if len(o.buf) > maxMarshalSize {
+ return ErrTooLarge
+ }
+ }
+ }
+
+ // Do oneof fields.
+ if prop.oneofMarshaler != nil {
+ m := structPointer_Interface(base, prop.stype).(Message)
+ if err := prop.oneofMarshaler(m, o); err == ErrNil {
+ return errOneofHasNil
+ } else if err != nil {
+ return err
+ }
+ }
+
+ // Add unrecognized fields at the end.
+ if prop.unrecField.IsValid() {
+ v := *structPointer_Bytes(base, prop.unrecField)
+ if len(o.buf)+len(v) > maxMarshalSize {
+ return ErrTooLarge
+ }
+ if len(v) > 0 {
+ o.buf = append(o.buf, v...)
+ }
+ }
+
+ return state.err
+}
+
+func size_struct(prop *StructProperties, base structPointer) (n int) {
+ for _, i := range prop.order {
+ p := prop.Prop[i]
+ if p.size != nil {
+ n += p.size(p, base)
+ }
+ }
+
+ // Add unrecognized fields at the end.
+ if prop.unrecField.IsValid() {
+ v := *structPointer_Bytes(base, prop.unrecField)
+ n += len(v)
+ }
+
+ // Factor in any oneof fields.
+ if prop.oneofSizer != nil {
+ m := structPointer_Interface(base, prop.stype).(Message)
+ n += prop.oneofSizer(m)
+ }
+
+ return
+}
+
+var zeroes [20]byte // longer than any conceivable sizeVarint
+
+// Encode a struct, preceded by its encoded length (as a varint).
+func (o *Buffer) enc_len_struct(prop *StructProperties, base structPointer, state *errorState) error {
+ return o.enc_len_thing(func() error { return o.enc_struct(prop, base) }, state)
+}
+
+// Encode something, preceded by its encoded length (as a varint).
+func (o *Buffer) enc_len_thing(enc func() error, state *errorState) error {
+ iLen := len(o.buf)
+ o.buf = append(o.buf, 0, 0, 0, 0) // reserve four bytes for length
+ iMsg := len(o.buf)
+ err := enc()
+ if err != nil && !state.shouldContinue(err, nil) {
+ return err
+ }
+ lMsg := len(o.buf) - iMsg
+ lLen := sizeVarint(uint64(lMsg))
+ switch x := lLen - (iMsg - iLen); {
+ case x > 0: // actual length is x bytes larger than the space we reserved
+ // Move msg x bytes right.
+ o.buf = append(o.buf, zeroes[:x]...)
+ copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg])
+ case x < 0: // actual length is x bytes smaller than the space we reserved
+ // Move msg x bytes left.
+ copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg])
+ o.buf = o.buf[:len(o.buf)+x] // x is negative
+ }
+ // Encode the length in the reserved space.
+ o.buf = o.buf[:iLen]
+ o.EncodeVarint(uint64(lMsg))
+ o.buf = o.buf[:len(o.buf)+lMsg]
+ return state.err
+}
+
+// errorState maintains the first error that occurs and updates that error
+// with additional context.
+type errorState struct {
+ err error
+}
+
+// shouldContinue reports whether encoding should continue upon encountering the
+// given error. If the error is RequiredNotSetError, shouldContinue returns true
+// and, if this is the first appearance of that error, remembers it for future
+// reporting.
+//
+// If prop is not nil, it may update any error with additional context about the
+// field with the error.
+func (s *errorState) shouldContinue(err error, prop *Properties) bool {
+ // Ignore unset required fields.
+ reqNotSet, ok := err.(*RequiredNotSetError)
+ if !ok {
+ return false
+ }
+ if s.err == nil {
+ if prop != nil {
+ err = &RequiredNotSetError{prop.Name + "." + reqNotSet.field}
+ }
+ s.err = err
+ }
+ return true
+}
diff --git a/vendor/github.com/golang/protobuf/proto/equal.go b/vendor/github.com/golang/protobuf/proto/equal.go
new file mode 100644
index 0000000000..2ed1cf5966
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/equal.go
@@ -0,0 +1,300 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2011 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Protocol buffer comparison.
+
+package proto
+
+import (
+ "bytes"
+ "log"
+ "reflect"
+ "strings"
+)
+
+/*
+Equal returns true iff protocol buffers a and b are equal.
+The arguments must both be pointers to protocol buffer structs.
+
+Equality is defined in this way:
+ - Two messages are equal iff they are the same type,
+ corresponding fields are equal, unknown field sets
+ are equal, and extensions sets are equal.
+ - Two set scalar fields are equal iff their values are equal.
+ If the fields are of a floating-point type, remember that
+ NaN != x for all x, including NaN. If the message is defined
+ in a proto3 .proto file, fields are not "set"; specifically,
+ zero length proto3 "bytes" fields are equal (nil == {}).
+ - Two repeated fields are equal iff their lengths are the same,
+ and their corresponding elements are equal. Note a "bytes" field,
+ although represented by []byte, is not a repeated field and the
+ rule for the scalar fields described above applies.
+ - Two unset fields are equal.
+ - Two unknown field sets are equal if their current
+ encoded state is equal.
+ - Two extension sets are equal iff they have corresponding
+ elements that are pairwise equal.
+ - Two map fields are equal iff their lengths are the same,
+ and they contain the same set of elements. Zero-length map
+ fields are equal.
+ - Every other combination of things are not equal.
+
+The return value is undefined if a and b are not protocol buffers.
+*/
+func Equal(a, b Message) bool {
+ if a == nil || b == nil {
+ return a == b
+ }
+ v1, v2 := reflect.ValueOf(a), reflect.ValueOf(b)
+ if v1.Type() != v2.Type() {
+ return false
+ }
+ if v1.Kind() == reflect.Ptr {
+ if v1.IsNil() {
+ return v2.IsNil()
+ }
+ if v2.IsNil() {
+ return false
+ }
+ v1, v2 = v1.Elem(), v2.Elem()
+ }
+ if v1.Kind() != reflect.Struct {
+ return false
+ }
+ return equalStruct(v1, v2)
+}
+
+// v1 and v2 are known to have the same type.
+func equalStruct(v1, v2 reflect.Value) bool {
+ sprop := GetProperties(v1.Type())
+ for i := 0; i < v1.NumField(); i++ {
+ f := v1.Type().Field(i)
+ if strings.HasPrefix(f.Name, "XXX_") {
+ continue
+ }
+ f1, f2 := v1.Field(i), v2.Field(i)
+ if f.Type.Kind() == reflect.Ptr {
+ if n1, n2 := f1.IsNil(), f2.IsNil(); n1 && n2 {
+ // both unset
+ continue
+ } else if n1 != n2 {
+ // set/unset mismatch
+ return false
+ }
+ b1, ok := f1.Interface().(raw)
+ if ok {
+ b2 := f2.Interface().(raw)
+ // RawMessage
+ if !bytes.Equal(b1.Bytes(), b2.Bytes()) {
+ return false
+ }
+ continue
+ }
+ f1, f2 = f1.Elem(), f2.Elem()
+ }
+ if !equalAny(f1, f2, sprop.Prop[i]) {
+ return false
+ }
+ }
+
+ if em1 := v1.FieldByName("XXX_InternalExtensions"); em1.IsValid() {
+ em2 := v2.FieldByName("XXX_InternalExtensions")
+ if !equalExtensions(v1.Type(), em1.Interface().(XXX_InternalExtensions), em2.Interface().(XXX_InternalExtensions)) {
+ return false
+ }
+ }
+
+ if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() {
+ em2 := v2.FieldByName("XXX_extensions")
+ if !equalExtMap(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) {
+ return false
+ }
+ }
+
+ uf := v1.FieldByName("XXX_unrecognized")
+ if !uf.IsValid() {
+ return true
+ }
+
+ u1 := uf.Bytes()
+ u2 := v2.FieldByName("XXX_unrecognized").Bytes()
+ if !bytes.Equal(u1, u2) {
+ return false
+ }
+
+ return true
+}
+
+// v1 and v2 are known to have the same type.
+// prop may be nil.
+func equalAny(v1, v2 reflect.Value, prop *Properties) bool {
+ if v1.Type() == protoMessageType {
+ m1, _ := v1.Interface().(Message)
+ m2, _ := v2.Interface().(Message)
+ return Equal(m1, m2)
+ }
+ switch v1.Kind() {
+ case reflect.Bool:
+ return v1.Bool() == v2.Bool()
+ case reflect.Float32, reflect.Float64:
+ return v1.Float() == v2.Float()
+ case reflect.Int32, reflect.Int64:
+ return v1.Int() == v2.Int()
+ case reflect.Interface:
+ // Probably a oneof field; compare the inner values.
+ n1, n2 := v1.IsNil(), v2.IsNil()
+ if n1 || n2 {
+ return n1 == n2
+ }
+ e1, e2 := v1.Elem(), v2.Elem()
+ if e1.Type() != e2.Type() {
+ return false
+ }
+ return equalAny(e1, e2, nil)
+ case reflect.Map:
+ if v1.Len() != v2.Len() {
+ return false
+ }
+ for _, key := range v1.MapKeys() {
+ val2 := v2.MapIndex(key)
+ if !val2.IsValid() {
+ // This key was not found in the second map.
+ return false
+ }
+ if !equalAny(v1.MapIndex(key), val2, nil) {
+ return false
+ }
+ }
+ return true
+ case reflect.Ptr:
+ // Maps may have nil values in them, so check for nil.
+ if v1.IsNil() && v2.IsNil() {
+ return true
+ }
+ if v1.IsNil() != v2.IsNil() {
+ return false
+ }
+ return equalAny(v1.Elem(), v2.Elem(), prop)
+ case reflect.Slice:
+ if v1.Type().Elem().Kind() == reflect.Uint8 {
+ // short circuit: []byte
+
+ // Edge case: if this is in a proto3 message, a zero length
+ // bytes field is considered the zero value.
+ if prop != nil && prop.proto3 && v1.Len() == 0 && v2.Len() == 0 {
+ return true
+ }
+ if v1.IsNil() != v2.IsNil() {
+ return false
+ }
+ return bytes.Equal(v1.Interface().([]byte), v2.Interface().([]byte))
+ }
+
+ if v1.Len() != v2.Len() {
+ return false
+ }
+ for i := 0; i < v1.Len(); i++ {
+ if !equalAny(v1.Index(i), v2.Index(i), prop) {
+ return false
+ }
+ }
+ return true
+ case reflect.String:
+ return v1.Interface().(string) == v2.Interface().(string)
+ case reflect.Struct:
+ return equalStruct(v1, v2)
+ case reflect.Uint32, reflect.Uint64:
+ return v1.Uint() == v2.Uint()
+ }
+
+ // unknown type, so not a protocol buffer
+ log.Printf("proto: don't know how to compare %v", v1)
+ return false
+}
+
+// base is the struct type that the extensions are based on.
+// x1 and x2 are InternalExtensions.
+func equalExtensions(base reflect.Type, x1, x2 XXX_InternalExtensions) bool {
+ em1, _ := x1.extensionsRead()
+ em2, _ := x2.extensionsRead()
+ return equalExtMap(base, em1, em2)
+}
+
+func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool {
+ if len(em1) != len(em2) {
+ return false
+ }
+
+ for extNum, e1 := range em1 {
+ e2, ok := em2[extNum]
+ if !ok {
+ return false
+ }
+
+ m1, m2 := e1.value, e2.value
+
+ if m1 != nil && m2 != nil {
+ // Both are unencoded.
+ if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) {
+ return false
+ }
+ continue
+ }
+
+ // At least one is encoded. To do a semantically correct comparison
+ // we need to unmarshal them first.
+ var desc *ExtensionDesc
+ if m := extensionMaps[base]; m != nil {
+ desc = m[extNum]
+ }
+ if desc == nil {
+ log.Printf("proto: don't know how to compare extension %d of %v", extNum, base)
+ continue
+ }
+ var err error
+ if m1 == nil {
+ m1, err = decodeExtension(e1.enc, desc)
+ }
+ if m2 == nil && err == nil {
+ m2, err = decodeExtension(e2.enc, desc)
+ }
+ if err != nil {
+ // The encoded form is invalid.
+ log.Printf("proto: badly encoded extension %d of %v: %v", extNum, base, err)
+ return false
+ }
+ if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) {
+ return false
+ }
+ }
+
+ return true
+}
diff --git a/vendor/github.com/golang/protobuf/proto/extensions.go b/vendor/github.com/golang/protobuf/proto/extensions.go
new file mode 100644
index 0000000000..eaad218312
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/extensions.go
@@ -0,0 +1,587 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Types and routines for supporting protocol buffer extensions.
+ */
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+ "strconv"
+ "sync"
+)
+
+// ErrMissingExtension is the error returned by GetExtension if the named extension is not in the message.
+var ErrMissingExtension = errors.New("proto: missing extension")
+
+// ExtensionRange represents a range of message extensions for a protocol buffer.
+// Used in code generated by the protocol compiler.
+type ExtensionRange struct {
+ Start, End int32 // both inclusive
+}
+
+// extendableProto is an interface implemented by any protocol buffer generated by the current
+// proto compiler that may be extended.
+type extendableProto interface {
+ Message
+ ExtensionRangeArray() []ExtensionRange
+ extensionsWrite() map[int32]Extension
+ extensionsRead() (map[int32]Extension, sync.Locker)
+}
+
+// extendableProtoV1 is an interface implemented by a protocol buffer generated by the previous
+// version of the proto compiler that may be extended.
+type extendableProtoV1 interface {
+ Message
+ ExtensionRangeArray() []ExtensionRange
+ ExtensionMap() map[int32]Extension
+}
+
+// extensionAdapter is a wrapper around extendableProtoV1 that implements extendableProto.
+type extensionAdapter struct {
+ extendableProtoV1
+}
+
+func (e extensionAdapter) extensionsWrite() map[int32]Extension {
+ return e.ExtensionMap()
+}
+
+func (e extensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) {
+ return e.ExtensionMap(), notLocker{}
+}
+
+// notLocker is a sync.Locker whose Lock and Unlock methods are nops.
+type notLocker struct{}
+
+func (n notLocker) Lock() {}
+func (n notLocker) Unlock() {}
+
+// extendable returns the extendableProto interface for the given generated proto message.
+// If the proto message has the old extension format, it returns a wrapper that implements
+// the extendableProto interface.
+func extendable(p interface{}) (extendableProto, bool) {
+ if ep, ok := p.(extendableProto); ok {
+ return ep, ok
+ }
+ if ep, ok := p.(extendableProtoV1); ok {
+ return extensionAdapter{ep}, ok
+ }
+ return nil, false
+}
+
+// XXX_InternalExtensions is an internal representation of proto extensions.
+//
+// Each generated message struct type embeds an anonymous XXX_InternalExtensions field,
+// thus gaining the unexported 'extensions' method, which can be called only from the proto package.
+//
+// The methods of XXX_InternalExtensions are not concurrency safe in general,
+// but calls to logically read-only methods such as has and get may be executed concurrently.
+type XXX_InternalExtensions struct {
+ // The struct must be indirect so that if a user inadvertently copies a
+ // generated message and its embedded XXX_InternalExtensions, they
+ // avoid the mayhem of a copied mutex.
+ //
+ // The mutex serializes all logically read-only operations to p.extensionMap.
+ // It is up to the client to ensure that write operations to p.extensionMap are
+ // mutually exclusive with other accesses.
+ p *struct {
+ mu sync.Mutex
+ extensionMap map[int32]Extension
+ }
+}
+
+// extensionsWrite returns the extension map, creating it on first use.
+func (e *XXX_InternalExtensions) extensionsWrite() map[int32]Extension {
+ if e.p == nil {
+ e.p = new(struct {
+ mu sync.Mutex
+ extensionMap map[int32]Extension
+ })
+ e.p.extensionMap = make(map[int32]Extension)
+ }
+ return e.p.extensionMap
+}
+
+// extensionsRead returns the extensions map for read-only use. It may be nil.
+// The caller must hold the returned mutex's lock when accessing Elements within the map.
+func (e *XXX_InternalExtensions) extensionsRead() (map[int32]Extension, sync.Locker) {
+ if e.p == nil {
+ return nil, nil
+ }
+ return e.p.extensionMap, &e.p.mu
+}
+
+var extendableProtoType = reflect.TypeOf((*extendableProto)(nil)).Elem()
+var extendableProtoV1Type = reflect.TypeOf((*extendableProtoV1)(nil)).Elem()
+
+// ExtensionDesc represents an extension specification.
+// Used in generated code from the protocol compiler.
+type ExtensionDesc struct {
+ ExtendedType Message // nil pointer to the type that is being extended
+ ExtensionType interface{} // nil pointer to the extension type
+ Field int32 // field number
+ Name string // fully-qualified name of extension, for text formatting
+ Tag string // protobuf tag style
+ Filename string // name of the file in which the extension is defined
+}
+
+func (ed *ExtensionDesc) repeated() bool {
+ t := reflect.TypeOf(ed.ExtensionType)
+ return t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8
+}
+
+// Extension represents an extension in a message.
+type Extension struct {
+ // When an extension is stored in a message using SetExtension
+ // only desc and value are set. When the message is marshaled
+ // enc will be set to the encoded form of the message.
+ //
+ // When a message is unmarshaled and contains extensions, each
+ // extension will have only enc set. When such an extension is
+ // accessed using GetExtension (or GetExtensions) desc and value
+ // will be set.
+ desc *ExtensionDesc
+ value interface{}
+ enc []byte
+}
+
+// SetRawExtension is for testing only.
+func SetRawExtension(base Message, id int32, b []byte) {
+ epb, ok := extendable(base)
+ if !ok {
+ return
+ }
+ extmap := epb.extensionsWrite()
+ extmap[id] = Extension{enc: b}
+}
+
+// isExtensionField returns true iff the given field number is in an extension range.
+func isExtensionField(pb extendableProto, field int32) bool {
+ for _, er := range pb.ExtensionRangeArray() {
+ if er.Start <= field && field <= er.End {
+ return true
+ }
+ }
+ return false
+}
+
+// checkExtensionTypes checks that the given extension is valid for pb.
+func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error {
+ var pbi interface{} = pb
+ // Check the extended type.
+ if ea, ok := pbi.(extensionAdapter); ok {
+ pbi = ea.extendableProtoV1
+ }
+ if a, b := reflect.TypeOf(pbi), reflect.TypeOf(extension.ExtendedType); a != b {
+ return errors.New("proto: bad extended type; " + b.String() + " does not extend " + a.String())
+ }
+ // Check the range.
+ if !isExtensionField(pb, extension.Field) {
+ return errors.New("proto: bad extension number; not in declared ranges")
+ }
+ return nil
+}
+
+// extPropKey is sufficient to uniquely identify an extension.
+type extPropKey struct {
+ base reflect.Type
+ field int32
+}
+
+var extProp = struct {
+ sync.RWMutex
+ m map[extPropKey]*Properties
+}{
+ m: make(map[extPropKey]*Properties),
+}
+
+func extensionProperties(ed *ExtensionDesc) *Properties {
+ key := extPropKey{base: reflect.TypeOf(ed.ExtendedType), field: ed.Field}
+
+ extProp.RLock()
+ if prop, ok := extProp.m[key]; ok {
+ extProp.RUnlock()
+ return prop
+ }
+ extProp.RUnlock()
+
+ extProp.Lock()
+ defer extProp.Unlock()
+ // Check again.
+ if prop, ok := extProp.m[key]; ok {
+ return prop
+ }
+
+ prop := new(Properties)
+ prop.Init(reflect.TypeOf(ed.ExtensionType), "unknown_name", ed.Tag, nil)
+ extProp.m[key] = prop
+ return prop
+}
+
+// encode encodes any unmarshaled (unencoded) extensions in e.
+func encodeExtensions(e *XXX_InternalExtensions) error {
+ m, mu := e.extensionsRead()
+ if m == nil {
+ return nil // fast path
+ }
+ mu.Lock()
+ defer mu.Unlock()
+ return encodeExtensionsMap(m)
+}
+
+// encode encodes any unmarshaled (unencoded) extensions in e.
+func encodeExtensionsMap(m map[int32]Extension) error {
+ for k, e := range m {
+ if e.value == nil || e.desc == nil {
+ // Extension is only in its encoded form.
+ continue
+ }
+
+ // We don't skip extensions that have an encoded form set,
+ // because the extension value may have been mutated after
+ // the last time this function was called.
+
+ et := reflect.TypeOf(e.desc.ExtensionType)
+ props := extensionProperties(e.desc)
+
+ p := NewBuffer(nil)
+ // If e.value has type T, the encoder expects a *struct{ X T }.
+ // Pass a *T with a zero field and hope it all works out.
+ x := reflect.New(et)
+ x.Elem().Set(reflect.ValueOf(e.value))
+ if err := props.enc(p, props, toStructPointer(x)); err != nil {
+ return err
+ }
+ e.enc = p.buf
+ m[k] = e
+ }
+ return nil
+}
+
+func extensionsSize(e *XXX_InternalExtensions) (n int) {
+ m, mu := e.extensionsRead()
+ if m == nil {
+ return 0
+ }
+ mu.Lock()
+ defer mu.Unlock()
+ return extensionsMapSize(m)
+}
+
+func extensionsMapSize(m map[int32]Extension) (n int) {
+ for _, e := range m {
+ if e.value == nil || e.desc == nil {
+ // Extension is only in its encoded form.
+ n += len(e.enc)
+ continue
+ }
+
+ // We don't skip extensions that have an encoded form set,
+ // because the extension value may have been mutated after
+ // the last time this function was called.
+
+ et := reflect.TypeOf(e.desc.ExtensionType)
+ props := extensionProperties(e.desc)
+
+ // If e.value has type T, the encoder expects a *struct{ X T }.
+ // Pass a *T with a zero field and hope it all works out.
+ x := reflect.New(et)
+ x.Elem().Set(reflect.ValueOf(e.value))
+ n += props.size(props, toStructPointer(x))
+ }
+ return
+}
+
+// HasExtension returns whether the given extension is present in pb.
+func HasExtension(pb Message, extension *ExtensionDesc) bool {
+ // TODO: Check types, field numbers, etc.?
+ epb, ok := extendable(pb)
+ if !ok {
+ return false
+ }
+ extmap, mu := epb.extensionsRead()
+ if extmap == nil {
+ return false
+ }
+ mu.Lock()
+ _, ok = extmap[extension.Field]
+ mu.Unlock()
+ return ok
+}
+
+// ClearExtension removes the given extension from pb.
+func ClearExtension(pb Message, extension *ExtensionDesc) {
+ epb, ok := extendable(pb)
+ if !ok {
+ return
+ }
+ // TODO: Check types, field numbers, etc.?
+ extmap := epb.extensionsWrite()
+ delete(extmap, extension.Field)
+}
+
+// GetExtension parses and returns the given extension of pb.
+// If the extension is not present and has no default value it returns ErrMissingExtension.
+func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) {
+ epb, ok := extendable(pb)
+ if !ok {
+ return nil, errors.New("proto: not an extendable proto")
+ }
+
+ if err := checkExtensionTypes(epb, extension); err != nil {
+ return nil, err
+ }
+
+ emap, mu := epb.extensionsRead()
+ if emap == nil {
+ return defaultExtensionValue(extension)
+ }
+ mu.Lock()
+ defer mu.Unlock()
+ e, ok := emap[extension.Field]
+ if !ok {
+ // defaultExtensionValue returns the default value or
+ // ErrMissingExtension if there is no default.
+ return defaultExtensionValue(extension)
+ }
+
+ if e.value != nil {
+ // Already decoded. Check the descriptor, though.
+ if e.desc != extension {
+ // This shouldn't happen. If it does, it means that
+ // GetExtension was called twice with two different
+ // descriptors with the same field number.
+ return nil, errors.New("proto: descriptor conflict")
+ }
+ return e.value, nil
+ }
+
+ v, err := decodeExtension(e.enc, extension)
+ if err != nil {
+ return nil, err
+ }
+
+ // Remember the decoded version and drop the encoded version.
+ // That way it is safe to mutate what we return.
+ e.value = v
+ e.desc = extension
+ e.enc = nil
+ emap[extension.Field] = e
+ return e.value, nil
+}
+
+// defaultExtensionValue returns the default value for extension.
+// If no default for an extension is defined ErrMissingExtension is returned.
+func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) {
+ t := reflect.TypeOf(extension.ExtensionType)
+ props := extensionProperties(extension)
+
+ sf, _, err := fieldDefault(t, props)
+ if err != nil {
+ return nil, err
+ }
+
+ if sf == nil || sf.value == nil {
+ // There is no default value.
+ return nil, ErrMissingExtension
+ }
+
+ if t.Kind() != reflect.Ptr {
+ // We do not need to return a Ptr, we can directly return sf.value.
+ return sf.value, nil
+ }
+
+ // We need to return an interface{} that is a pointer to sf.value.
+ value := reflect.New(t).Elem()
+ value.Set(reflect.New(value.Type().Elem()))
+ if sf.kind == reflect.Int32 {
+ // We may have an int32 or an enum, but the underlying data is int32.
+ // Since we can't set an int32 into a non int32 reflect.value directly
+ // set it as a int32.
+ value.Elem().SetInt(int64(sf.value.(int32)))
+ } else {
+ value.Elem().Set(reflect.ValueOf(sf.value))
+ }
+ return value.Interface(), nil
+}
+
+// decodeExtension decodes an extension encoded in b.
+func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) {
+ o := NewBuffer(b)
+
+ t := reflect.TypeOf(extension.ExtensionType)
+
+ props := extensionProperties(extension)
+
+ // t is a pointer to a struct, pointer to basic type or a slice.
+ // Allocate a "field" to store the pointer/slice itself; the
+ // pointer/slice will be stored here. We pass
+ // the address of this field to props.dec.
+ // This passes a zero field and a *t and lets props.dec
+ // interpret it as a *struct{ x t }.
+ value := reflect.New(t).Elem()
+
+ for {
+ // Discard wire type and field number varint. It isn't needed.
+ if _, err := o.DecodeVarint(); err != nil {
+ return nil, err
+ }
+
+ if err := props.dec(o, props, toStructPointer(value.Addr())); err != nil {
+ return nil, err
+ }
+
+ if o.index >= len(o.buf) {
+ break
+ }
+ }
+ return value.Interface(), nil
+}
+
+// GetExtensions returns a slice of the extensions present in pb that are also listed in es.
+// The returned slice has the same length as es; missing extensions will appear as nil elements.
+func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) {
+ epb, ok := extendable(pb)
+ if !ok {
+ return nil, errors.New("proto: not an extendable proto")
+ }
+ extensions = make([]interface{}, len(es))
+ for i, e := range es {
+ extensions[i], err = GetExtension(epb, e)
+ if err == ErrMissingExtension {
+ err = nil
+ }
+ if err != nil {
+ return
+ }
+ }
+ return
+}
+
+// ExtensionDescs returns a new slice containing pb's extension descriptors, in undefined order.
+// For non-registered extensions, ExtensionDescs returns an incomplete descriptor containing
+// just the Field field, which defines the extension's field number.
+func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) {
+ epb, ok := extendable(pb)
+ if !ok {
+ return nil, fmt.Errorf("proto: %T is not an extendable proto.Message", pb)
+ }
+ registeredExtensions := RegisteredExtensions(pb)
+
+ emap, mu := epb.extensionsRead()
+ if emap == nil {
+ return nil, nil
+ }
+ mu.Lock()
+ defer mu.Unlock()
+ extensions := make([]*ExtensionDesc, 0, len(emap))
+ for extid, e := range emap {
+ desc := e.desc
+ if desc == nil {
+ desc = registeredExtensions[extid]
+ if desc == nil {
+ desc = &ExtensionDesc{Field: extid}
+ }
+ }
+
+ extensions = append(extensions, desc)
+ }
+ return extensions, nil
+}
+
+// SetExtension sets the specified extension of pb to the specified value.
+func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error {
+ epb, ok := extendable(pb)
+ if !ok {
+ return errors.New("proto: not an extendable proto")
+ }
+ if err := checkExtensionTypes(epb, extension); err != nil {
+ return err
+ }
+ typ := reflect.TypeOf(extension.ExtensionType)
+ if typ != reflect.TypeOf(value) {
+ return errors.New("proto: bad extension value type")
+ }
+ // nil extension values need to be caught early, because the
+ // encoder can't distinguish an ErrNil due to a nil extension
+ // from an ErrNil due to a missing field. Extensions are
+ // always optional, so the encoder would just swallow the error
+ // and drop all the extensions from the encoded message.
+ if reflect.ValueOf(value).IsNil() {
+ return fmt.Errorf("proto: SetExtension called with nil value of type %T", value)
+ }
+
+ extmap := epb.extensionsWrite()
+ extmap[extension.Field] = Extension{desc: extension, value: value}
+ return nil
+}
+
+// ClearAllExtensions clears all extensions from pb.
+func ClearAllExtensions(pb Message) {
+ epb, ok := extendable(pb)
+ if !ok {
+ return
+ }
+ m := epb.extensionsWrite()
+ for k := range m {
+ delete(m, k)
+ }
+}
+
+// A global registry of extensions.
+// The generated code will register the generated descriptors by calling RegisterExtension.
+
+var extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc)
+
+// RegisterExtension is called from the generated code.
+func RegisterExtension(desc *ExtensionDesc) {
+ st := reflect.TypeOf(desc.ExtendedType).Elem()
+ m := extensionMaps[st]
+ if m == nil {
+ m = make(map[int32]*ExtensionDesc)
+ extensionMaps[st] = m
+ }
+ if _, ok := m[desc.Field]; ok {
+ panic("proto: duplicate extension registered: " + st.String() + " " + strconv.Itoa(int(desc.Field)))
+ }
+ m[desc.Field] = desc
+}
+
+// RegisteredExtensions returns a map of the registered extensions of a
+// protocol buffer struct, indexed by the extension number.
+// The argument pb should be a nil pointer to the struct type.
+func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc {
+ return extensionMaps[reflect.TypeOf(pb).Elem()]
+}
diff --git a/vendor/github.com/golang/protobuf/proto/lib.go b/vendor/github.com/golang/protobuf/proto/lib.go
new file mode 100644
index 0000000000..ac4ddbc075
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/lib.go
@@ -0,0 +1,898 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+/*
+Package proto converts data structures to and from the wire format of
+protocol buffers. It works in concert with the Go source code generated
+for .proto files by the protocol compiler.
+
+A summary of the properties of the protocol buffer interface
+for a protocol buffer variable v:
+
+ - Names are turned from camel_case to CamelCase for export.
+ - There are no methods on v to set fields; just treat
+ them as structure fields.
+ - There are getters that return a field's value if set,
+ and return the field's default value if unset.
+ The getters work even if the receiver is a nil message.
+ - The zero value for a struct is its correct initialization state.
+ All desired fields must be set before marshaling.
+ - A Reset() method will restore a protobuf struct to its zero state.
+ - Non-repeated fields are pointers to the values; nil means unset.
+ That is, optional or required field int32 f becomes F *int32.
+ - Repeated fields are slices.
+ - Helper functions are available to aid the setting of fields.
+ msg.Foo = proto.String("hello") // set field
+ - Constants are defined to hold the default values of all fields that
+ have them. They have the form Default_StructName_FieldName.
+ Because the getter methods handle defaulted values,
+ direct use of these constants should be rare.
+ - Enums are given type names and maps from names to values.
+ Enum values are prefixed by the enclosing message's name, or by the
+ enum's type name if it is a top-level enum. Enum types have a String
+ method, and a Enum method to assist in message construction.
+ - Nested messages, groups and enums have type names prefixed with the name of
+ the surrounding message type.
+ - Extensions are given descriptor names that start with E_,
+ followed by an underscore-delimited list of the nested messages
+ that contain it (if any) followed by the CamelCased name of the
+ extension field itself. HasExtension, ClearExtension, GetExtension
+ and SetExtension are functions for manipulating extensions.
+ - Oneof field sets are given a single field in their message,
+ with distinguished wrapper types for each possible field value.
+ - Marshal and Unmarshal are functions to encode and decode the wire format.
+
+When the .proto file specifies `syntax="proto3"`, there are some differences:
+
+ - Non-repeated fields of non-message type are values instead of pointers.
+ - Getters are only generated for message and oneof fields.
+ - Enum types do not get an Enum method.
+
+The simplest way to describe this is to see an example.
+Given file test.proto, containing
+
+ package example;
+
+ enum FOO { X = 17; }
+
+ message Test {
+ required string label = 1;
+ optional int32 type = 2 [default=77];
+ repeated int64 reps = 3;
+ optional group OptionalGroup = 4 {
+ required string RequiredField = 5;
+ }
+ oneof union {
+ int32 number = 6;
+ string name = 7;
+ }
+ }
+
+The resulting file, test.pb.go, is:
+
+ package example
+
+ import proto "github.com/golang/protobuf/proto"
+ import math "math"
+
+ type FOO int32
+ const (
+ FOO_X FOO = 17
+ )
+ var FOO_name = map[int32]string{
+ 17: "X",
+ }
+ var FOO_value = map[string]int32{
+ "X": 17,
+ }
+
+ func (x FOO) Enum() *FOO {
+ p := new(FOO)
+ *p = x
+ return p
+ }
+ func (x FOO) String() string {
+ return proto.EnumName(FOO_name, int32(x))
+ }
+ func (x *FOO) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(FOO_value, data)
+ if err != nil {
+ return err
+ }
+ *x = FOO(value)
+ return nil
+ }
+
+ type Test struct {
+ Label *string `protobuf:"bytes,1,req,name=label" json:"label,omitempty"`
+ Type *int32 `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"`
+ Reps []int64 `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"`
+ Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"`
+ // Types that are valid to be assigned to Union:
+ // *Test_Number
+ // *Test_Name
+ Union isTest_Union `protobuf_oneof:"union"`
+ XXX_unrecognized []byte `json:"-"`
+ }
+ func (m *Test) Reset() { *m = Test{} }
+ func (m *Test) String() string { return proto.CompactTextString(m) }
+ func (*Test) ProtoMessage() {}
+
+ type isTest_Union interface {
+ isTest_Union()
+ }
+
+ type Test_Number struct {
+ Number int32 `protobuf:"varint,6,opt,name=number"`
+ }
+ type Test_Name struct {
+ Name string `protobuf:"bytes,7,opt,name=name"`
+ }
+
+ func (*Test_Number) isTest_Union() {}
+ func (*Test_Name) isTest_Union() {}
+
+ func (m *Test) GetUnion() isTest_Union {
+ if m != nil {
+ return m.Union
+ }
+ return nil
+ }
+ const Default_Test_Type int32 = 77
+
+ func (m *Test) GetLabel() string {
+ if m != nil && m.Label != nil {
+ return *m.Label
+ }
+ return ""
+ }
+
+ func (m *Test) GetType() int32 {
+ if m != nil && m.Type != nil {
+ return *m.Type
+ }
+ return Default_Test_Type
+ }
+
+ func (m *Test) GetOptionalgroup() *Test_OptionalGroup {
+ if m != nil {
+ return m.Optionalgroup
+ }
+ return nil
+ }
+
+ type Test_OptionalGroup struct {
+ RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"`
+ }
+ func (m *Test_OptionalGroup) Reset() { *m = Test_OptionalGroup{} }
+ func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) }
+
+ func (m *Test_OptionalGroup) GetRequiredField() string {
+ if m != nil && m.RequiredField != nil {
+ return *m.RequiredField
+ }
+ return ""
+ }
+
+ func (m *Test) GetNumber() int32 {
+ if x, ok := m.GetUnion().(*Test_Number); ok {
+ return x.Number
+ }
+ return 0
+ }
+
+ func (m *Test) GetName() string {
+ if x, ok := m.GetUnion().(*Test_Name); ok {
+ return x.Name
+ }
+ return ""
+ }
+
+ func init() {
+ proto.RegisterEnum("example.FOO", FOO_name, FOO_value)
+ }
+
+To create and play with a Test object:
+
+ package main
+
+ import (
+ "log"
+
+ "github.com/golang/protobuf/proto"
+ pb "./example.pb"
+ )
+
+ func main() {
+ test := &pb.Test{
+ Label: proto.String("hello"),
+ Type: proto.Int32(17),
+ Reps: []int64{1, 2, 3},
+ Optionalgroup: &pb.Test_OptionalGroup{
+ RequiredField: proto.String("good bye"),
+ },
+ Union: &pb.Test_Name{"fred"},
+ }
+ data, err := proto.Marshal(test)
+ if err != nil {
+ log.Fatal("marshaling error: ", err)
+ }
+ newTest := &pb.Test{}
+ err = proto.Unmarshal(data, newTest)
+ if err != nil {
+ log.Fatal("unmarshaling error: ", err)
+ }
+ // Now test and newTest contain the same data.
+ if test.GetLabel() != newTest.GetLabel() {
+ log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel())
+ }
+ // Use a type switch to determine which oneof was set.
+ switch u := test.Union.(type) {
+ case *pb.Test_Number: // u.Number contains the number.
+ case *pb.Test_Name: // u.Name contains the string.
+ }
+ // etc.
+ }
+*/
+package proto
+
+import (
+ "encoding/json"
+ "fmt"
+ "log"
+ "reflect"
+ "sort"
+ "strconv"
+ "sync"
+)
+
+// Message is implemented by generated protocol buffer messages.
+type Message interface {
+ Reset()
+ String() string
+ ProtoMessage()
+}
+
+// Stats records allocation details about the protocol buffer encoders
+// and decoders. Useful for tuning the library itself.
+type Stats struct {
+ Emalloc uint64 // mallocs in encode
+ Dmalloc uint64 // mallocs in decode
+ Encode uint64 // number of encodes
+ Decode uint64 // number of decodes
+ Chit uint64 // number of cache hits
+ Cmiss uint64 // number of cache misses
+ Size uint64 // number of sizes
+}
+
+// Set to true to enable stats collection.
+const collectStats = false
+
+var stats Stats
+
+// GetStats returns a copy of the global Stats structure.
+func GetStats() Stats { return stats }
+
+// A Buffer is a buffer manager for marshaling and unmarshaling
+// protocol buffers. It may be reused between invocations to
+// reduce memory usage. It is not necessary to use a Buffer;
+// the global functions Marshal and Unmarshal create a
+// temporary Buffer and are fine for most applications.
+type Buffer struct {
+ buf []byte // encode/decode byte stream
+ index int // read point
+
+ // pools of basic types to amortize allocation.
+ bools []bool
+ uint32s []uint32
+ uint64s []uint64
+
+ // extra pools, only used with pointer_reflect.go
+ int32s []int32
+ int64s []int64
+ float32s []float32
+ float64s []float64
+}
+
+// NewBuffer allocates a new Buffer and initializes its internal data to
+// the contents of the argument slice.
+func NewBuffer(e []byte) *Buffer {
+ return &Buffer{buf: e}
+}
+
+// Reset resets the Buffer, ready for marshaling a new protocol buffer.
+func (p *Buffer) Reset() {
+ p.buf = p.buf[0:0] // for reading/writing
+ p.index = 0 // for reading
+}
+
+// SetBuf replaces the internal buffer with the slice,
+// ready for unmarshaling the contents of the slice.
+func (p *Buffer) SetBuf(s []byte) {
+ p.buf = s
+ p.index = 0
+}
+
+// Bytes returns the contents of the Buffer.
+func (p *Buffer) Bytes() []byte { return p.buf }
+
+/*
+ * Helper routines for simplifying the creation of optional fields of basic type.
+ */
+
+// Bool is a helper routine that allocates a new bool value
+// to store v and returns a pointer to it.
+func Bool(v bool) *bool {
+ return &v
+}
+
+// Int32 is a helper routine that allocates a new int32 value
+// to store v and returns a pointer to it.
+func Int32(v int32) *int32 {
+ return &v
+}
+
+// Int is a helper routine that allocates a new int32 value
+// to store v and returns a pointer to it, but unlike Int32
+// its argument value is an int.
+func Int(v int) *int32 {
+ p := new(int32)
+ *p = int32(v)
+ return p
+}
+
+// Int64 is a helper routine that allocates a new int64 value
+// to store v and returns a pointer to it.
+func Int64(v int64) *int64 {
+ return &v
+}
+
+// Float32 is a helper routine that allocates a new float32 value
+// to store v and returns a pointer to it.
+func Float32(v float32) *float32 {
+ return &v
+}
+
+// Float64 is a helper routine that allocates a new float64 value
+// to store v and returns a pointer to it.
+func Float64(v float64) *float64 {
+ return &v
+}
+
+// Uint32 is a helper routine that allocates a new uint32 value
+// to store v and returns a pointer to it.
+func Uint32(v uint32) *uint32 {
+ return &v
+}
+
+// Uint64 is a helper routine that allocates a new uint64 value
+// to store v and returns a pointer to it.
+func Uint64(v uint64) *uint64 {
+ return &v
+}
+
+// String is a helper routine that allocates a new string value
+// to store v and returns a pointer to it.
+func String(v string) *string {
+ return &v
+}
+
+// EnumName is a helper function to simplify printing protocol buffer enums
+// by name. Given an enum map and a value, it returns a useful string.
+func EnumName(m map[int32]string, v int32) string {
+ s, ok := m[v]
+ if ok {
+ return s
+ }
+ return strconv.Itoa(int(v))
+}
+
+// UnmarshalJSONEnum is a helper function to simplify recovering enum int values
+// from their JSON-encoded representation. Given a map from the enum's symbolic
+// names to its int values, and a byte buffer containing the JSON-encoded
+// value, it returns an int32 that can be cast to the enum type by the caller.
+//
+// The function can deal with both JSON representations, numeric and symbolic.
+func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) {
+ if data[0] == '"' {
+ // New style: enums are strings.
+ var repr string
+ if err := json.Unmarshal(data, &repr); err != nil {
+ return -1, err
+ }
+ val, ok := m[repr]
+ if !ok {
+ return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr)
+ }
+ return val, nil
+ }
+ // Old style: enums are ints.
+ var val int32
+ if err := json.Unmarshal(data, &val); err != nil {
+ return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName)
+ }
+ return val, nil
+}
+
+// DebugPrint dumps the encoded data in b in a debugging format with a header
+// including the string s. Used in testing but made available for general debugging.
+func (p *Buffer) DebugPrint(s string, b []byte) {
+ var u uint64
+
+ obuf := p.buf
+ index := p.index
+ p.buf = b
+ p.index = 0
+ depth := 0
+
+ fmt.Printf("\n--- %s ---\n", s)
+
+out:
+ for {
+ for i := 0; i < depth; i++ {
+ fmt.Print(" ")
+ }
+
+ index := p.index
+ if index == len(p.buf) {
+ break
+ }
+
+ op, err := p.DecodeVarint()
+ if err != nil {
+ fmt.Printf("%3d: fetching op err %v\n", index, err)
+ break out
+ }
+ tag := op >> 3
+ wire := op & 7
+
+ switch wire {
+ default:
+ fmt.Printf("%3d: t=%3d unknown wire=%d\n",
+ index, tag, wire)
+ break out
+
+ case WireBytes:
+ var r []byte
+
+ r, err = p.DecodeRawBytes(false)
+ if err != nil {
+ break out
+ }
+ fmt.Printf("%3d: t=%3d bytes [%d]", index, tag, len(r))
+ if len(r) <= 6 {
+ for i := 0; i < len(r); i++ {
+ fmt.Printf(" %.2x", r[i])
+ }
+ } else {
+ for i := 0; i < 3; i++ {
+ fmt.Printf(" %.2x", r[i])
+ }
+ fmt.Printf(" ..")
+ for i := len(r) - 3; i < len(r); i++ {
+ fmt.Printf(" %.2x", r[i])
+ }
+ }
+ fmt.Printf("\n")
+
+ case WireFixed32:
+ u, err = p.DecodeFixed32()
+ if err != nil {
+ fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err)
+ break out
+ }
+ fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u)
+
+ case WireFixed64:
+ u, err = p.DecodeFixed64()
+ if err != nil {
+ fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err)
+ break out
+ }
+ fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u)
+
+ case WireVarint:
+ u, err = p.DecodeVarint()
+ if err != nil {
+ fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err)
+ break out
+ }
+ fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u)
+
+ case WireStartGroup:
+ fmt.Printf("%3d: t=%3d start\n", index, tag)
+ depth++
+
+ case WireEndGroup:
+ depth--
+ fmt.Printf("%3d: t=%3d end\n", index, tag)
+ }
+ }
+
+ if depth != 0 {
+ fmt.Printf("%3d: start-end not balanced %d\n", p.index, depth)
+ }
+ fmt.Printf("\n")
+
+ p.buf = obuf
+ p.index = index
+}
+
+// SetDefaults sets unset protocol buffer fields to their default values.
+// It only modifies fields that are both unset and have defined defaults.
+// It recursively sets default values in any non-nil sub-messages.
+func SetDefaults(pb Message) {
+ setDefaults(reflect.ValueOf(pb), true, false)
+}
+
+// v is a pointer to a struct.
+func setDefaults(v reflect.Value, recur, zeros bool) {
+ v = v.Elem()
+
+ defaultMu.RLock()
+ dm, ok := defaults[v.Type()]
+ defaultMu.RUnlock()
+ if !ok {
+ dm = buildDefaultMessage(v.Type())
+ defaultMu.Lock()
+ defaults[v.Type()] = dm
+ defaultMu.Unlock()
+ }
+
+ for _, sf := range dm.scalars {
+ f := v.Field(sf.index)
+ if !f.IsNil() {
+ // field already set
+ continue
+ }
+ dv := sf.value
+ if dv == nil && !zeros {
+ // no explicit default, and don't want to set zeros
+ continue
+ }
+ fptr := f.Addr().Interface() // **T
+ // TODO: Consider batching the allocations we do here.
+ switch sf.kind {
+ case reflect.Bool:
+ b := new(bool)
+ if dv != nil {
+ *b = dv.(bool)
+ }
+ *(fptr.(**bool)) = b
+ case reflect.Float32:
+ f := new(float32)
+ if dv != nil {
+ *f = dv.(float32)
+ }
+ *(fptr.(**float32)) = f
+ case reflect.Float64:
+ f := new(float64)
+ if dv != nil {
+ *f = dv.(float64)
+ }
+ *(fptr.(**float64)) = f
+ case reflect.Int32:
+ // might be an enum
+ if ft := f.Type(); ft != int32PtrType {
+ // enum
+ f.Set(reflect.New(ft.Elem()))
+ if dv != nil {
+ f.Elem().SetInt(int64(dv.(int32)))
+ }
+ } else {
+ // int32 field
+ i := new(int32)
+ if dv != nil {
+ *i = dv.(int32)
+ }
+ *(fptr.(**int32)) = i
+ }
+ case reflect.Int64:
+ i := new(int64)
+ if dv != nil {
+ *i = dv.(int64)
+ }
+ *(fptr.(**int64)) = i
+ case reflect.String:
+ s := new(string)
+ if dv != nil {
+ *s = dv.(string)
+ }
+ *(fptr.(**string)) = s
+ case reflect.Uint8:
+ // exceptional case: []byte
+ var b []byte
+ if dv != nil {
+ db := dv.([]byte)
+ b = make([]byte, len(db))
+ copy(b, db)
+ } else {
+ b = []byte{}
+ }
+ *(fptr.(*[]byte)) = b
+ case reflect.Uint32:
+ u := new(uint32)
+ if dv != nil {
+ *u = dv.(uint32)
+ }
+ *(fptr.(**uint32)) = u
+ case reflect.Uint64:
+ u := new(uint64)
+ if dv != nil {
+ *u = dv.(uint64)
+ }
+ *(fptr.(**uint64)) = u
+ default:
+ log.Printf("proto: can't set default for field %v (sf.kind=%v)", f, sf.kind)
+ }
+ }
+
+ for _, ni := range dm.nested {
+ f := v.Field(ni)
+ // f is *T or []*T or map[T]*T
+ switch f.Kind() {
+ case reflect.Ptr:
+ if f.IsNil() {
+ continue
+ }
+ setDefaults(f, recur, zeros)
+
+ case reflect.Slice:
+ for i := 0; i < f.Len(); i++ {
+ e := f.Index(i)
+ if e.IsNil() {
+ continue
+ }
+ setDefaults(e, recur, zeros)
+ }
+
+ case reflect.Map:
+ for _, k := range f.MapKeys() {
+ e := f.MapIndex(k)
+ if e.IsNil() {
+ continue
+ }
+ setDefaults(e, recur, zeros)
+ }
+ }
+ }
+}
+
+var (
+ // defaults maps a protocol buffer struct type to a slice of the fields,
+ // with its scalar fields set to their proto-declared non-zero default values.
+ defaultMu sync.RWMutex
+ defaults = make(map[reflect.Type]defaultMessage)
+
+ int32PtrType = reflect.TypeOf((*int32)(nil))
+)
+
+// defaultMessage represents information about the default values of a message.
+type defaultMessage struct {
+ scalars []scalarField
+ nested []int // struct field index of nested messages
+}
+
+type scalarField struct {
+ index int // struct field index
+ kind reflect.Kind // element type (the T in *T or []T)
+ value interface{} // the proto-declared default value, or nil
+}
+
+// t is a struct type.
+func buildDefaultMessage(t reflect.Type) (dm defaultMessage) {
+ sprop := GetProperties(t)
+ for _, prop := range sprop.Prop {
+ fi, ok := sprop.decoderTags.get(prop.Tag)
+ if !ok {
+ // XXX_unrecognized
+ continue
+ }
+ ft := t.Field(fi).Type
+
+ sf, nested, err := fieldDefault(ft, prop)
+ switch {
+ case err != nil:
+ log.Print(err)
+ case nested:
+ dm.nested = append(dm.nested, fi)
+ case sf != nil:
+ sf.index = fi
+ dm.scalars = append(dm.scalars, *sf)
+ }
+ }
+
+ return dm
+}
+
+// fieldDefault returns the scalarField for field type ft.
+// sf will be nil if the field can not have a default.
+// nestedMessage will be true if this is a nested message.
+// Note that sf.index is not set on return.
+func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) {
+ var canHaveDefault bool
+ switch ft.Kind() {
+ case reflect.Ptr:
+ if ft.Elem().Kind() == reflect.Struct {
+ nestedMessage = true
+ } else {
+ canHaveDefault = true // proto2 scalar field
+ }
+
+ case reflect.Slice:
+ switch ft.Elem().Kind() {
+ case reflect.Ptr:
+ nestedMessage = true // repeated message
+ case reflect.Uint8:
+ canHaveDefault = true // bytes field
+ }
+
+ case reflect.Map:
+ if ft.Elem().Kind() == reflect.Ptr {
+ nestedMessage = true // map with message values
+ }
+ }
+
+ if !canHaveDefault {
+ if nestedMessage {
+ return nil, true, nil
+ }
+ return nil, false, nil
+ }
+
+ // We now know that ft is a pointer or slice.
+ sf = &scalarField{kind: ft.Elem().Kind()}
+
+ // scalar fields without defaults
+ if !prop.HasDefault {
+ return sf, false, nil
+ }
+
+ // a scalar field: either *T or []byte
+ switch ft.Elem().Kind() {
+ case reflect.Bool:
+ x, err := strconv.ParseBool(prop.Default)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default bool %q: %v", prop.Default, err)
+ }
+ sf.value = x
+ case reflect.Float32:
+ x, err := strconv.ParseFloat(prop.Default, 32)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default float32 %q: %v", prop.Default, err)
+ }
+ sf.value = float32(x)
+ case reflect.Float64:
+ x, err := strconv.ParseFloat(prop.Default, 64)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default float64 %q: %v", prop.Default, err)
+ }
+ sf.value = x
+ case reflect.Int32:
+ x, err := strconv.ParseInt(prop.Default, 10, 32)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default int32 %q: %v", prop.Default, err)
+ }
+ sf.value = int32(x)
+ case reflect.Int64:
+ x, err := strconv.ParseInt(prop.Default, 10, 64)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default int64 %q: %v", prop.Default, err)
+ }
+ sf.value = x
+ case reflect.String:
+ sf.value = prop.Default
+ case reflect.Uint8:
+ // []byte (not *uint8)
+ sf.value = []byte(prop.Default)
+ case reflect.Uint32:
+ x, err := strconv.ParseUint(prop.Default, 10, 32)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default uint32 %q: %v", prop.Default, err)
+ }
+ sf.value = uint32(x)
+ case reflect.Uint64:
+ x, err := strconv.ParseUint(prop.Default, 10, 64)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default uint64 %q: %v", prop.Default, err)
+ }
+ sf.value = x
+ default:
+ return nil, false, fmt.Errorf("proto: unhandled def kind %v", ft.Elem().Kind())
+ }
+
+ return sf, false, nil
+}
+
+// Map fields may have key types of non-float scalars, strings and enums.
+// The easiest way to sort them in some deterministic order is to use fmt.
+// If this turns out to be inefficient we can always consider other options,
+// such as doing a Schwartzian transform.
+
+func mapKeys(vs []reflect.Value) sort.Interface {
+ s := mapKeySorter{
+ vs: vs,
+ // default Less function: textual comparison
+ less: func(a, b reflect.Value) bool {
+ return fmt.Sprint(a.Interface()) < fmt.Sprint(b.Interface())
+ },
+ }
+
+ // Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps;
+ // numeric keys are sorted numerically.
+ if len(vs) == 0 {
+ return s
+ }
+ switch vs[0].Kind() {
+ case reflect.Int32, reflect.Int64:
+ s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() }
+ case reflect.Uint32, reflect.Uint64:
+ s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() }
+ }
+
+ return s
+}
+
+type mapKeySorter struct {
+ vs []reflect.Value
+ less func(a, b reflect.Value) bool
+}
+
+func (s mapKeySorter) Len() int { return len(s.vs) }
+func (s mapKeySorter) Swap(i, j int) { s.vs[i], s.vs[j] = s.vs[j], s.vs[i] }
+func (s mapKeySorter) Less(i, j int) bool {
+ return s.less(s.vs[i], s.vs[j])
+}
+
+// isProto3Zero reports whether v is a zero proto3 value.
+func isProto3Zero(v reflect.Value) bool {
+ switch v.Kind() {
+ case reflect.Bool:
+ return !v.Bool()
+ case reflect.Int32, reflect.Int64:
+ return v.Int() == 0
+ case reflect.Uint32, reflect.Uint64:
+ return v.Uint() == 0
+ case reflect.Float32, reflect.Float64:
+ return v.Float() == 0
+ case reflect.String:
+ return v.String() == ""
+ }
+ return false
+}
+
+// ProtoPackageIsVersion2 is referenced from generated protocol buffer files
+// to assert that that code is compatible with this version of the proto package.
+const ProtoPackageIsVersion2 = true
+
+// ProtoPackageIsVersion1 is referenced from generated protocol buffer files
+// to assert that that code is compatible with this version of the proto package.
+const ProtoPackageIsVersion1 = true
diff --git a/vendor/github.com/golang/protobuf/proto/message_set.go b/vendor/github.com/golang/protobuf/proto/message_set.go
new file mode 100644
index 0000000000..fd982decd6
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/message_set.go
@@ -0,0 +1,311 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Support for message sets.
+ */
+
+import (
+ "bytes"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "reflect"
+ "sort"
+)
+
+// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID.
+// A message type ID is required for storing a protocol buffer in a message set.
+var errNoMessageTypeID = errors.New("proto does not have a message type ID")
+
+// The first two types (_MessageSet_Item and messageSet)
+// model what the protocol compiler produces for the following protocol message:
+// message MessageSet {
+// repeated group Item = 1 {
+// required int32 type_id = 2;
+// required string message = 3;
+// };
+// }
+// That is the MessageSet wire format. We can't use a proto to generate these
+// because that would introduce a circular dependency between it and this package.
+
+type _MessageSet_Item struct {
+ TypeId *int32 `protobuf:"varint,2,req,name=type_id"`
+ Message []byte `protobuf:"bytes,3,req,name=message"`
+}
+
+type messageSet struct {
+ Item []*_MessageSet_Item `protobuf:"group,1,rep"`
+ XXX_unrecognized []byte
+ // TODO: caching?
+}
+
+// Make sure messageSet is a Message.
+var _ Message = (*messageSet)(nil)
+
+// messageTypeIder is an interface satisfied by a protocol buffer type
+// that may be stored in a MessageSet.
+type messageTypeIder interface {
+ MessageTypeId() int32
+}
+
+func (ms *messageSet) find(pb Message) *_MessageSet_Item {
+ mti, ok := pb.(messageTypeIder)
+ if !ok {
+ return nil
+ }
+ id := mti.MessageTypeId()
+ for _, item := range ms.Item {
+ if *item.TypeId == id {
+ return item
+ }
+ }
+ return nil
+}
+
+func (ms *messageSet) Has(pb Message) bool {
+ if ms.find(pb) != nil {
+ return true
+ }
+ return false
+}
+
+func (ms *messageSet) Unmarshal(pb Message) error {
+ if item := ms.find(pb); item != nil {
+ return Unmarshal(item.Message, pb)
+ }
+ if _, ok := pb.(messageTypeIder); !ok {
+ return errNoMessageTypeID
+ }
+ return nil // TODO: return error instead?
+}
+
+func (ms *messageSet) Marshal(pb Message) error {
+ msg, err := Marshal(pb)
+ if err != nil {
+ return err
+ }
+ if item := ms.find(pb); item != nil {
+ // reuse existing item
+ item.Message = msg
+ return nil
+ }
+
+ mti, ok := pb.(messageTypeIder)
+ if !ok {
+ return errNoMessageTypeID
+ }
+
+ mtid := mti.MessageTypeId()
+ ms.Item = append(ms.Item, &_MessageSet_Item{
+ TypeId: &mtid,
+ Message: msg,
+ })
+ return nil
+}
+
+func (ms *messageSet) Reset() { *ms = messageSet{} }
+func (ms *messageSet) String() string { return CompactTextString(ms) }
+func (*messageSet) ProtoMessage() {}
+
+// Support for the message_set_wire_format message option.
+
+func skipVarint(buf []byte) []byte {
+ i := 0
+ for ; buf[i]&0x80 != 0; i++ {
+ }
+ return buf[i+1:]
+}
+
+// MarshalMessageSet encodes the extension map represented by m in the message set wire format.
+// It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option.
+func MarshalMessageSet(exts interface{}) ([]byte, error) {
+ var m map[int32]Extension
+ switch exts := exts.(type) {
+ case *XXX_InternalExtensions:
+ if err := encodeExtensions(exts); err != nil {
+ return nil, err
+ }
+ m, _ = exts.extensionsRead()
+ case map[int32]Extension:
+ if err := encodeExtensionsMap(exts); err != nil {
+ return nil, err
+ }
+ m = exts
+ default:
+ return nil, errors.New("proto: not an extension map")
+ }
+
+ // Sort extension IDs to provide a deterministic encoding.
+ // See also enc_map in encode.go.
+ ids := make([]int, 0, len(m))
+ for id := range m {
+ ids = append(ids, int(id))
+ }
+ sort.Ints(ids)
+
+ ms := &messageSet{Item: make([]*_MessageSet_Item, 0, len(m))}
+ for _, id := range ids {
+ e := m[int32(id)]
+ // Remove the wire type and field number varint, as well as the length varint.
+ msg := skipVarint(skipVarint(e.enc))
+
+ ms.Item = append(ms.Item, &_MessageSet_Item{
+ TypeId: Int32(int32(id)),
+ Message: msg,
+ })
+ }
+ return Marshal(ms)
+}
+
+// UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format.
+// It is called by generated Unmarshal methods on protocol buffer messages with the message_set_wire_format option.
+func UnmarshalMessageSet(buf []byte, exts interface{}) error {
+ var m map[int32]Extension
+ switch exts := exts.(type) {
+ case *XXX_InternalExtensions:
+ m = exts.extensionsWrite()
+ case map[int32]Extension:
+ m = exts
+ default:
+ return errors.New("proto: not an extension map")
+ }
+
+ ms := new(messageSet)
+ if err := Unmarshal(buf, ms); err != nil {
+ return err
+ }
+ for _, item := range ms.Item {
+ id := *item.TypeId
+ msg := item.Message
+
+ // Restore wire type and field number varint, plus length varint.
+ // Be careful to preserve duplicate items.
+ b := EncodeVarint(uint64(id)<<3 | WireBytes)
+ if ext, ok := m[id]; ok {
+ // Existing data; rip off the tag and length varint
+ // so we join the new data correctly.
+ // We can assume that ext.enc is set because we are unmarshaling.
+ o := ext.enc[len(b):] // skip wire type and field number
+ _, n := DecodeVarint(o) // calculate length of length varint
+ o = o[n:] // skip length varint
+ msg = append(o, msg...) // join old data and new data
+ }
+ b = append(b, EncodeVarint(uint64(len(msg)))...)
+ b = append(b, msg...)
+
+ m[id] = Extension{enc: b}
+ }
+ return nil
+}
+
+// MarshalMessageSetJSON encodes the extension map represented by m in JSON format.
+// It is called by generated MarshalJSON methods on protocol buffer messages with the message_set_wire_format option.
+func MarshalMessageSetJSON(exts interface{}) ([]byte, error) {
+ var m map[int32]Extension
+ switch exts := exts.(type) {
+ case *XXX_InternalExtensions:
+ m, _ = exts.extensionsRead()
+ case map[int32]Extension:
+ m = exts
+ default:
+ return nil, errors.New("proto: not an extension map")
+ }
+ var b bytes.Buffer
+ b.WriteByte('{')
+
+ // Process the map in key order for deterministic output.
+ ids := make([]int32, 0, len(m))
+ for id := range m {
+ ids = append(ids, id)
+ }
+ sort.Sort(int32Slice(ids)) // int32Slice defined in text.go
+
+ for i, id := range ids {
+ ext := m[id]
+ if i > 0 {
+ b.WriteByte(',')
+ }
+
+ msd, ok := messageSetMap[id]
+ if !ok {
+ // Unknown type; we can't render it, so skip it.
+ continue
+ }
+ fmt.Fprintf(&b, `"[%s]":`, msd.name)
+
+ x := ext.value
+ if x == nil {
+ x = reflect.New(msd.t.Elem()).Interface()
+ if err := Unmarshal(ext.enc, x.(Message)); err != nil {
+ return nil, err
+ }
+ }
+ d, err := json.Marshal(x)
+ if err != nil {
+ return nil, err
+ }
+ b.Write(d)
+ }
+ b.WriteByte('}')
+ return b.Bytes(), nil
+}
+
+// UnmarshalMessageSetJSON decodes the extension map encoded in buf in JSON format.
+// It is called by generated UnmarshalJSON methods on protocol buffer messages with the message_set_wire_format option.
+func UnmarshalMessageSetJSON(buf []byte, exts interface{}) error {
+ // Common-case fast path.
+ if len(buf) == 0 || bytes.Equal(buf, []byte("{}")) {
+ return nil
+ }
+
+ // This is fairly tricky, and it's not clear that it is needed.
+ return errors.New("TODO: UnmarshalMessageSetJSON not yet implemented")
+}
+
+// A global registry of types that can be used in a MessageSet.
+
+var messageSetMap = make(map[int32]messageSetDesc)
+
+type messageSetDesc struct {
+ t reflect.Type // pointer to struct
+ name string
+}
+
+// RegisterMessageSetType is called from the generated code.
+func RegisterMessageSetType(m Message, fieldNum int32, name string) {
+ messageSetMap[fieldNum] = messageSetDesc{
+ t: reflect.TypeOf(m),
+ name: name,
+ }
+}
diff --git a/vendor/github.com/golang/protobuf/proto/pointer_reflect.go b/vendor/github.com/golang/protobuf/proto/pointer_reflect.go
new file mode 100644
index 0000000000..fb512e2e16
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/pointer_reflect.go
@@ -0,0 +1,484 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2012 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// +build appengine js
+
+// This file contains an implementation of proto field accesses using package reflect.
+// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can
+// be used on App Engine.
+
+package proto
+
+import (
+ "math"
+ "reflect"
+)
+
+// A structPointer is a pointer to a struct.
+type structPointer struct {
+ v reflect.Value
+}
+
+// toStructPointer returns a structPointer equivalent to the given reflect value.
+// The reflect value must itself be a pointer to a struct.
+func toStructPointer(v reflect.Value) structPointer {
+ return structPointer{v}
+}
+
+// IsNil reports whether p is nil.
+func structPointer_IsNil(p structPointer) bool {
+ return p.v.IsNil()
+}
+
+// Interface returns the struct pointer as an interface value.
+func structPointer_Interface(p structPointer, _ reflect.Type) interface{} {
+ return p.v.Interface()
+}
+
+// A field identifies a field in a struct, accessible from a structPointer.
+// In this implementation, a field is identified by the sequence of field indices
+// passed to reflect's FieldByIndex.
+type field []int
+
+// toField returns a field equivalent to the given reflect field.
+func toField(f *reflect.StructField) field {
+ return f.Index
+}
+
+// invalidField is an invalid field identifier.
+var invalidField = field(nil)
+
+// IsValid reports whether the field identifier is valid.
+func (f field) IsValid() bool { return f != nil }
+
+// field returns the given field in the struct as a reflect value.
+func structPointer_field(p structPointer, f field) reflect.Value {
+ // Special case: an extension map entry with a value of type T
+ // passes a *T to the struct-handling code with a zero field,
+ // expecting that it will be treated as equivalent to *struct{ X T },
+ // which has the same memory layout. We have to handle that case
+ // specially, because reflect will panic if we call FieldByIndex on a
+ // non-struct.
+ if f == nil {
+ return p.v.Elem()
+ }
+
+ return p.v.Elem().FieldByIndex(f)
+}
+
+// ifield returns the given field in the struct as an interface value.
+func structPointer_ifield(p structPointer, f field) interface{} {
+ return structPointer_field(p, f).Addr().Interface()
+}
+
+// Bytes returns the address of a []byte field in the struct.
+func structPointer_Bytes(p structPointer, f field) *[]byte {
+ return structPointer_ifield(p, f).(*[]byte)
+}
+
+// BytesSlice returns the address of a [][]byte field in the struct.
+func structPointer_BytesSlice(p structPointer, f field) *[][]byte {
+ return structPointer_ifield(p, f).(*[][]byte)
+}
+
+// Bool returns the address of a *bool field in the struct.
+func structPointer_Bool(p structPointer, f field) **bool {
+ return structPointer_ifield(p, f).(**bool)
+}
+
+// BoolVal returns the address of a bool field in the struct.
+func structPointer_BoolVal(p structPointer, f field) *bool {
+ return structPointer_ifield(p, f).(*bool)
+}
+
+// BoolSlice returns the address of a []bool field in the struct.
+func structPointer_BoolSlice(p structPointer, f field) *[]bool {
+ return structPointer_ifield(p, f).(*[]bool)
+}
+
+// String returns the address of a *string field in the struct.
+func structPointer_String(p structPointer, f field) **string {
+ return structPointer_ifield(p, f).(**string)
+}
+
+// StringVal returns the address of a string field in the struct.
+func structPointer_StringVal(p structPointer, f field) *string {
+ return structPointer_ifield(p, f).(*string)
+}
+
+// StringSlice returns the address of a []string field in the struct.
+func structPointer_StringSlice(p structPointer, f field) *[]string {
+ return structPointer_ifield(p, f).(*[]string)
+}
+
+// Extensions returns the address of an extension map field in the struct.
+func structPointer_Extensions(p structPointer, f field) *XXX_InternalExtensions {
+ return structPointer_ifield(p, f).(*XXX_InternalExtensions)
+}
+
+// ExtMap returns the address of an extension map field in the struct.
+func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension {
+ return structPointer_ifield(p, f).(*map[int32]Extension)
+}
+
+// NewAt returns the reflect.Value for a pointer to a field in the struct.
+func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value {
+ return structPointer_field(p, f).Addr()
+}
+
+// SetStructPointer writes a *struct field in the struct.
+func structPointer_SetStructPointer(p structPointer, f field, q structPointer) {
+ structPointer_field(p, f).Set(q.v)
+}
+
+// GetStructPointer reads a *struct field in the struct.
+func structPointer_GetStructPointer(p structPointer, f field) structPointer {
+ return structPointer{structPointer_field(p, f)}
+}
+
+// StructPointerSlice the address of a []*struct field in the struct.
+func structPointer_StructPointerSlice(p structPointer, f field) structPointerSlice {
+ return structPointerSlice{structPointer_field(p, f)}
+}
+
+// A structPointerSlice represents the address of a slice of pointers to structs
+// (themselves messages or groups). That is, v.Type() is *[]*struct{...}.
+type structPointerSlice struct {
+ v reflect.Value
+}
+
+func (p structPointerSlice) Len() int { return p.v.Len() }
+func (p structPointerSlice) Index(i int) structPointer { return structPointer{p.v.Index(i)} }
+func (p structPointerSlice) Append(q structPointer) {
+ p.v.Set(reflect.Append(p.v, q.v))
+}
+
+var (
+ int32Type = reflect.TypeOf(int32(0))
+ uint32Type = reflect.TypeOf(uint32(0))
+ float32Type = reflect.TypeOf(float32(0))
+ int64Type = reflect.TypeOf(int64(0))
+ uint64Type = reflect.TypeOf(uint64(0))
+ float64Type = reflect.TypeOf(float64(0))
+)
+
+// A word32 represents a field of type *int32, *uint32, *float32, or *enum.
+// That is, v.Type() is *int32, *uint32, *float32, or *enum and v is assignable.
+type word32 struct {
+ v reflect.Value
+}
+
+// IsNil reports whether p is nil.
+func word32_IsNil(p word32) bool {
+ return p.v.IsNil()
+}
+
+// Set sets p to point at a newly allocated word with bits set to x.
+func word32_Set(p word32, o *Buffer, x uint32) {
+ t := p.v.Type().Elem()
+ switch t {
+ case int32Type:
+ if len(o.int32s) == 0 {
+ o.int32s = make([]int32, uint32PoolSize)
+ }
+ o.int32s[0] = int32(x)
+ p.v.Set(reflect.ValueOf(&o.int32s[0]))
+ o.int32s = o.int32s[1:]
+ return
+ case uint32Type:
+ if len(o.uint32s) == 0 {
+ o.uint32s = make([]uint32, uint32PoolSize)
+ }
+ o.uint32s[0] = x
+ p.v.Set(reflect.ValueOf(&o.uint32s[0]))
+ o.uint32s = o.uint32s[1:]
+ return
+ case float32Type:
+ if len(o.float32s) == 0 {
+ o.float32s = make([]float32, uint32PoolSize)
+ }
+ o.float32s[0] = math.Float32frombits(x)
+ p.v.Set(reflect.ValueOf(&o.float32s[0]))
+ o.float32s = o.float32s[1:]
+ return
+ }
+
+ // must be enum
+ p.v.Set(reflect.New(t))
+ p.v.Elem().SetInt(int64(int32(x)))
+}
+
+// Get gets the bits pointed at by p, as a uint32.
+func word32_Get(p word32) uint32 {
+ elem := p.v.Elem()
+ switch elem.Kind() {
+ case reflect.Int32:
+ return uint32(elem.Int())
+ case reflect.Uint32:
+ return uint32(elem.Uint())
+ case reflect.Float32:
+ return math.Float32bits(float32(elem.Float()))
+ }
+ panic("unreachable")
+}
+
+// Word32 returns a reference to a *int32, *uint32, *float32, or *enum field in the struct.
+func structPointer_Word32(p structPointer, f field) word32 {
+ return word32{structPointer_field(p, f)}
+}
+
+// A word32Val represents a field of type int32, uint32, float32, or enum.
+// That is, v.Type() is int32, uint32, float32, or enum and v is assignable.
+type word32Val struct {
+ v reflect.Value
+}
+
+// Set sets *p to x.
+func word32Val_Set(p word32Val, x uint32) {
+ switch p.v.Type() {
+ case int32Type:
+ p.v.SetInt(int64(x))
+ return
+ case uint32Type:
+ p.v.SetUint(uint64(x))
+ return
+ case float32Type:
+ p.v.SetFloat(float64(math.Float32frombits(x)))
+ return
+ }
+
+ // must be enum
+ p.v.SetInt(int64(int32(x)))
+}
+
+// Get gets the bits pointed at by p, as a uint32.
+func word32Val_Get(p word32Val) uint32 {
+ elem := p.v
+ switch elem.Kind() {
+ case reflect.Int32:
+ return uint32(elem.Int())
+ case reflect.Uint32:
+ return uint32(elem.Uint())
+ case reflect.Float32:
+ return math.Float32bits(float32(elem.Float()))
+ }
+ panic("unreachable")
+}
+
+// Word32Val returns a reference to a int32, uint32, float32, or enum field in the struct.
+func structPointer_Word32Val(p structPointer, f field) word32Val {
+ return word32Val{structPointer_field(p, f)}
+}
+
+// A word32Slice is a slice of 32-bit values.
+// That is, v.Type() is []int32, []uint32, []float32, or []enum.
+type word32Slice struct {
+ v reflect.Value
+}
+
+func (p word32Slice) Append(x uint32) {
+ n, m := p.v.Len(), p.v.Cap()
+ if n < m {
+ p.v.SetLen(n + 1)
+ } else {
+ t := p.v.Type().Elem()
+ p.v.Set(reflect.Append(p.v, reflect.Zero(t)))
+ }
+ elem := p.v.Index(n)
+ switch elem.Kind() {
+ case reflect.Int32:
+ elem.SetInt(int64(int32(x)))
+ case reflect.Uint32:
+ elem.SetUint(uint64(x))
+ case reflect.Float32:
+ elem.SetFloat(float64(math.Float32frombits(x)))
+ }
+}
+
+func (p word32Slice) Len() int {
+ return p.v.Len()
+}
+
+func (p word32Slice) Index(i int) uint32 {
+ elem := p.v.Index(i)
+ switch elem.Kind() {
+ case reflect.Int32:
+ return uint32(elem.Int())
+ case reflect.Uint32:
+ return uint32(elem.Uint())
+ case reflect.Float32:
+ return math.Float32bits(float32(elem.Float()))
+ }
+ panic("unreachable")
+}
+
+// Word32Slice returns a reference to a []int32, []uint32, []float32, or []enum field in the struct.
+func structPointer_Word32Slice(p structPointer, f field) word32Slice {
+ return word32Slice{structPointer_field(p, f)}
+}
+
+// word64 is like word32 but for 64-bit values.
+type word64 struct {
+ v reflect.Value
+}
+
+func word64_Set(p word64, o *Buffer, x uint64) {
+ t := p.v.Type().Elem()
+ switch t {
+ case int64Type:
+ if len(o.int64s) == 0 {
+ o.int64s = make([]int64, uint64PoolSize)
+ }
+ o.int64s[0] = int64(x)
+ p.v.Set(reflect.ValueOf(&o.int64s[0]))
+ o.int64s = o.int64s[1:]
+ return
+ case uint64Type:
+ if len(o.uint64s) == 0 {
+ o.uint64s = make([]uint64, uint64PoolSize)
+ }
+ o.uint64s[0] = x
+ p.v.Set(reflect.ValueOf(&o.uint64s[0]))
+ o.uint64s = o.uint64s[1:]
+ return
+ case float64Type:
+ if len(o.float64s) == 0 {
+ o.float64s = make([]float64, uint64PoolSize)
+ }
+ o.float64s[0] = math.Float64frombits(x)
+ p.v.Set(reflect.ValueOf(&o.float64s[0]))
+ o.float64s = o.float64s[1:]
+ return
+ }
+ panic("unreachable")
+}
+
+func word64_IsNil(p word64) bool {
+ return p.v.IsNil()
+}
+
+func word64_Get(p word64) uint64 {
+ elem := p.v.Elem()
+ switch elem.Kind() {
+ case reflect.Int64:
+ return uint64(elem.Int())
+ case reflect.Uint64:
+ return elem.Uint()
+ case reflect.Float64:
+ return math.Float64bits(elem.Float())
+ }
+ panic("unreachable")
+}
+
+func structPointer_Word64(p structPointer, f field) word64 {
+ return word64{structPointer_field(p, f)}
+}
+
+// word64Val is like word32Val but for 64-bit values.
+type word64Val struct {
+ v reflect.Value
+}
+
+func word64Val_Set(p word64Val, o *Buffer, x uint64) {
+ switch p.v.Type() {
+ case int64Type:
+ p.v.SetInt(int64(x))
+ return
+ case uint64Type:
+ p.v.SetUint(x)
+ return
+ case float64Type:
+ p.v.SetFloat(math.Float64frombits(x))
+ return
+ }
+ panic("unreachable")
+}
+
+func word64Val_Get(p word64Val) uint64 {
+ elem := p.v
+ switch elem.Kind() {
+ case reflect.Int64:
+ return uint64(elem.Int())
+ case reflect.Uint64:
+ return elem.Uint()
+ case reflect.Float64:
+ return math.Float64bits(elem.Float())
+ }
+ panic("unreachable")
+}
+
+func structPointer_Word64Val(p structPointer, f field) word64Val {
+ return word64Val{structPointer_field(p, f)}
+}
+
+type word64Slice struct {
+ v reflect.Value
+}
+
+func (p word64Slice) Append(x uint64) {
+ n, m := p.v.Len(), p.v.Cap()
+ if n < m {
+ p.v.SetLen(n + 1)
+ } else {
+ t := p.v.Type().Elem()
+ p.v.Set(reflect.Append(p.v, reflect.Zero(t)))
+ }
+ elem := p.v.Index(n)
+ switch elem.Kind() {
+ case reflect.Int64:
+ elem.SetInt(int64(int64(x)))
+ case reflect.Uint64:
+ elem.SetUint(uint64(x))
+ case reflect.Float64:
+ elem.SetFloat(float64(math.Float64frombits(x)))
+ }
+}
+
+func (p word64Slice) Len() int {
+ return p.v.Len()
+}
+
+func (p word64Slice) Index(i int) uint64 {
+ elem := p.v.Index(i)
+ switch elem.Kind() {
+ case reflect.Int64:
+ return uint64(elem.Int())
+ case reflect.Uint64:
+ return uint64(elem.Uint())
+ case reflect.Float64:
+ return math.Float64bits(float64(elem.Float()))
+ }
+ panic("unreachable")
+}
+
+func structPointer_Word64Slice(p structPointer, f field) word64Slice {
+ return word64Slice{structPointer_field(p, f)}
+}
diff --git a/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go b/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go
new file mode 100644
index 0000000000..6b5567d47c
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go
@@ -0,0 +1,270 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2012 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// +build !appengine,!js
+
+// This file contains the implementation of the proto field accesses using package unsafe.
+
+package proto
+
+import (
+ "reflect"
+ "unsafe"
+)
+
+// NOTE: These type_Foo functions would more idiomatically be methods,
+// but Go does not allow methods on pointer types, and we must preserve
+// some pointer type for the garbage collector. We use these
+// funcs with clunky names as our poor approximation to methods.
+//
+// An alternative would be
+// type structPointer struct { p unsafe.Pointer }
+// but that does not registerize as well.
+
+// A structPointer is a pointer to a struct.
+type structPointer unsafe.Pointer
+
+// toStructPointer returns a structPointer equivalent to the given reflect value.
+func toStructPointer(v reflect.Value) structPointer {
+ return structPointer(unsafe.Pointer(v.Pointer()))
+}
+
+// IsNil reports whether p is nil.
+func structPointer_IsNil(p structPointer) bool {
+ return p == nil
+}
+
+// Interface returns the struct pointer, assumed to have element type t,
+// as an interface value.
+func structPointer_Interface(p structPointer, t reflect.Type) interface{} {
+ return reflect.NewAt(t, unsafe.Pointer(p)).Interface()
+}
+
+// A field identifies a field in a struct, accessible from a structPointer.
+// In this implementation, a field is identified by its byte offset from the start of the struct.
+type field uintptr
+
+// toField returns a field equivalent to the given reflect field.
+func toField(f *reflect.StructField) field {
+ return field(f.Offset)
+}
+
+// invalidField is an invalid field identifier.
+const invalidField = ^field(0)
+
+// IsValid reports whether the field identifier is valid.
+func (f field) IsValid() bool {
+ return f != ^field(0)
+}
+
+// Bytes returns the address of a []byte field in the struct.
+func structPointer_Bytes(p structPointer, f field) *[]byte {
+ return (*[]byte)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// BytesSlice returns the address of a [][]byte field in the struct.
+func structPointer_BytesSlice(p structPointer, f field) *[][]byte {
+ return (*[][]byte)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// Bool returns the address of a *bool field in the struct.
+func structPointer_Bool(p structPointer, f field) **bool {
+ return (**bool)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// BoolVal returns the address of a bool field in the struct.
+func structPointer_BoolVal(p structPointer, f field) *bool {
+ return (*bool)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// BoolSlice returns the address of a []bool field in the struct.
+func structPointer_BoolSlice(p structPointer, f field) *[]bool {
+ return (*[]bool)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// String returns the address of a *string field in the struct.
+func structPointer_String(p structPointer, f field) **string {
+ return (**string)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// StringVal returns the address of a string field in the struct.
+func structPointer_StringVal(p structPointer, f field) *string {
+ return (*string)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// StringSlice returns the address of a []string field in the struct.
+func structPointer_StringSlice(p structPointer, f field) *[]string {
+ return (*[]string)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// ExtMap returns the address of an extension map field in the struct.
+func structPointer_Extensions(p structPointer, f field) *XXX_InternalExtensions {
+ return (*XXX_InternalExtensions)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension {
+ return (*map[int32]Extension)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// NewAt returns the reflect.Value for a pointer to a field in the struct.
+func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value {
+ return reflect.NewAt(typ, unsafe.Pointer(uintptr(p)+uintptr(f)))
+}
+
+// SetStructPointer writes a *struct field in the struct.
+func structPointer_SetStructPointer(p structPointer, f field, q structPointer) {
+ *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))) = q
+}
+
+// GetStructPointer reads a *struct field in the struct.
+func structPointer_GetStructPointer(p structPointer, f field) structPointer {
+ return *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// StructPointerSlice the address of a []*struct field in the struct.
+func structPointer_StructPointerSlice(p structPointer, f field) *structPointerSlice {
+ return (*structPointerSlice)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// A structPointerSlice represents a slice of pointers to structs (themselves submessages or groups).
+type structPointerSlice []structPointer
+
+func (v *structPointerSlice) Len() int { return len(*v) }
+func (v *structPointerSlice) Index(i int) structPointer { return (*v)[i] }
+func (v *structPointerSlice) Append(p structPointer) { *v = append(*v, p) }
+
+// A word32 is the address of a "pointer to 32-bit value" field.
+type word32 **uint32
+
+// IsNil reports whether *v is nil.
+func word32_IsNil(p word32) bool {
+ return *p == nil
+}
+
+// Set sets *v to point at a newly allocated word set to x.
+func word32_Set(p word32, o *Buffer, x uint32) {
+ if len(o.uint32s) == 0 {
+ o.uint32s = make([]uint32, uint32PoolSize)
+ }
+ o.uint32s[0] = x
+ *p = &o.uint32s[0]
+ o.uint32s = o.uint32s[1:]
+}
+
+// Get gets the value pointed at by *v.
+func word32_Get(p word32) uint32 {
+ return **p
+}
+
+// Word32 returns the address of a *int32, *uint32, *float32, or *enum field in the struct.
+func structPointer_Word32(p structPointer, f field) word32 {
+ return word32((**uint32)(unsafe.Pointer(uintptr(p) + uintptr(f))))
+}
+
+// A word32Val is the address of a 32-bit value field.
+type word32Val *uint32
+
+// Set sets *p to x.
+func word32Val_Set(p word32Val, x uint32) {
+ *p = x
+}
+
+// Get gets the value pointed at by p.
+func word32Val_Get(p word32Val) uint32 {
+ return *p
+}
+
+// Word32Val returns the address of a *int32, *uint32, *float32, or *enum field in the struct.
+func structPointer_Word32Val(p structPointer, f field) word32Val {
+ return word32Val((*uint32)(unsafe.Pointer(uintptr(p) + uintptr(f))))
+}
+
+// A word32Slice is a slice of 32-bit values.
+type word32Slice []uint32
+
+func (v *word32Slice) Append(x uint32) { *v = append(*v, x) }
+func (v *word32Slice) Len() int { return len(*v) }
+func (v *word32Slice) Index(i int) uint32 { return (*v)[i] }
+
+// Word32Slice returns the address of a []int32, []uint32, []float32, or []enum field in the struct.
+func structPointer_Word32Slice(p structPointer, f field) *word32Slice {
+ return (*word32Slice)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// word64 is like word32 but for 64-bit values.
+type word64 **uint64
+
+func word64_Set(p word64, o *Buffer, x uint64) {
+ if len(o.uint64s) == 0 {
+ o.uint64s = make([]uint64, uint64PoolSize)
+ }
+ o.uint64s[0] = x
+ *p = &o.uint64s[0]
+ o.uint64s = o.uint64s[1:]
+}
+
+func word64_IsNil(p word64) bool {
+ return *p == nil
+}
+
+func word64_Get(p word64) uint64 {
+ return **p
+}
+
+func structPointer_Word64(p structPointer, f field) word64 {
+ return word64((**uint64)(unsafe.Pointer(uintptr(p) + uintptr(f))))
+}
+
+// word64Val is like word32Val but for 64-bit values.
+type word64Val *uint64
+
+func word64Val_Set(p word64Val, o *Buffer, x uint64) {
+ *p = x
+}
+
+func word64Val_Get(p word64Val) uint64 {
+ return *p
+}
+
+func structPointer_Word64Val(p structPointer, f field) word64Val {
+ return word64Val((*uint64)(unsafe.Pointer(uintptr(p) + uintptr(f))))
+}
+
+// word64Slice is like word32Slice but for 64-bit values.
+type word64Slice []uint64
+
+func (v *word64Slice) Append(x uint64) { *v = append(*v, x) }
+func (v *word64Slice) Len() int { return len(*v) }
+func (v *word64Slice) Index(i int) uint64 { return (*v)[i] }
+
+func structPointer_Word64Slice(p structPointer, f field) *word64Slice {
+ return (*word64Slice)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
diff --git a/vendor/github.com/golang/protobuf/proto/properties.go b/vendor/github.com/golang/protobuf/proto/properties.go
new file mode 100644
index 0000000000..ec2289c005
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/properties.go
@@ -0,0 +1,872 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Routines for encoding data into the wire format for protocol buffers.
+ */
+
+import (
+ "fmt"
+ "log"
+ "os"
+ "reflect"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+)
+
+const debug bool = false
+
+// Constants that identify the encoding of a value on the wire.
+const (
+ WireVarint = 0
+ WireFixed64 = 1
+ WireBytes = 2
+ WireStartGroup = 3
+ WireEndGroup = 4
+ WireFixed32 = 5
+)
+
+const startSize = 10 // initial slice/string sizes
+
+// Encoders are defined in encode.go
+// An encoder outputs the full representation of a field, including its
+// tag and encoder type.
+type encoder func(p *Buffer, prop *Properties, base structPointer) error
+
+// A valueEncoder encodes a single integer in a particular encoding.
+type valueEncoder func(o *Buffer, x uint64) error
+
+// Sizers are defined in encode.go
+// A sizer returns the encoded size of a field, including its tag and encoder
+// type.
+type sizer func(prop *Properties, base structPointer) int
+
+// A valueSizer returns the encoded size of a single integer in a particular
+// encoding.
+type valueSizer func(x uint64) int
+
+// Decoders are defined in decode.go
+// A decoder creates a value from its wire representation.
+// Unrecognized subelements are saved in unrec.
+type decoder func(p *Buffer, prop *Properties, base structPointer) error
+
+// A valueDecoder decodes a single integer in a particular encoding.
+type valueDecoder func(o *Buffer) (x uint64, err error)
+
+// A oneofMarshaler does the marshaling for all oneof fields in a message.
+type oneofMarshaler func(Message, *Buffer) error
+
+// A oneofUnmarshaler does the unmarshaling for a oneof field in a message.
+type oneofUnmarshaler func(Message, int, int, *Buffer) (bool, error)
+
+// A oneofSizer does the sizing for all oneof fields in a message.
+type oneofSizer func(Message) int
+
+// tagMap is an optimization over map[int]int for typical protocol buffer
+// use-cases. Encoded protocol buffers are often in tag order with small tag
+// numbers.
+type tagMap struct {
+ fastTags []int
+ slowTags map[int]int
+}
+
+// tagMapFastLimit is the upper bound on the tag number that will be stored in
+// the tagMap slice rather than its map.
+const tagMapFastLimit = 1024
+
+func (p *tagMap) get(t int) (int, bool) {
+ if t > 0 && t < tagMapFastLimit {
+ if t >= len(p.fastTags) {
+ return 0, false
+ }
+ fi := p.fastTags[t]
+ return fi, fi >= 0
+ }
+ fi, ok := p.slowTags[t]
+ return fi, ok
+}
+
+func (p *tagMap) put(t int, fi int) {
+ if t > 0 && t < tagMapFastLimit {
+ for len(p.fastTags) < t+1 {
+ p.fastTags = append(p.fastTags, -1)
+ }
+ p.fastTags[t] = fi
+ return
+ }
+ if p.slowTags == nil {
+ p.slowTags = make(map[int]int)
+ }
+ p.slowTags[t] = fi
+}
+
+// StructProperties represents properties for all the fields of a struct.
+// decoderTags and decoderOrigNames should only be used by the decoder.
+type StructProperties struct {
+ Prop []*Properties // properties for each field
+ reqCount int // required count
+ decoderTags tagMap // map from proto tag to struct field number
+ decoderOrigNames map[string]int // map from original name to struct field number
+ order []int // list of struct field numbers in tag order
+ unrecField field // field id of the XXX_unrecognized []byte field
+ extendable bool // is this an extendable proto
+
+ oneofMarshaler oneofMarshaler
+ oneofUnmarshaler oneofUnmarshaler
+ oneofSizer oneofSizer
+ stype reflect.Type
+
+ // OneofTypes contains information about the oneof fields in this message.
+ // It is keyed by the original name of a field.
+ OneofTypes map[string]*OneofProperties
+}
+
+// OneofProperties represents information about a specific field in a oneof.
+type OneofProperties struct {
+ Type reflect.Type // pointer to generated struct type for this oneof field
+ Field int // struct field number of the containing oneof in the message
+ Prop *Properties
+}
+
+// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec.
+// See encode.go, (*Buffer).enc_struct.
+
+func (sp *StructProperties) Len() int { return len(sp.order) }
+func (sp *StructProperties) Less(i, j int) bool {
+ return sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag
+}
+func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] }
+
+// Properties represents the protocol-specific behavior of a single struct field.
+type Properties struct {
+ Name string // name of the field, for error messages
+ OrigName string // original name before protocol compiler (always set)
+ JSONName string // name to use for JSON; determined by protoc
+ Wire string
+ WireType int
+ Tag int
+ Required bool
+ Optional bool
+ Repeated bool
+ Packed bool // relevant for repeated primitives only
+ Enum string // set for enum types only
+ proto3 bool // whether this is known to be a proto3 field; set for []byte only
+ oneof bool // whether this is a oneof field
+
+ Default string // default value
+ HasDefault bool // whether an explicit default was provided
+ def_uint64 uint64
+
+ enc encoder
+ valEnc valueEncoder // set for bool and numeric types only
+ field field
+ tagcode []byte // encoding of EncodeVarint((Tag<<3)|WireType)
+ tagbuf [8]byte
+ stype reflect.Type // set for struct types only
+ sprop *StructProperties // set for struct types only
+ isMarshaler bool
+ isUnmarshaler bool
+
+ mtype reflect.Type // set for map types only
+ mkeyprop *Properties // set for map types only
+ mvalprop *Properties // set for map types only
+
+ size sizer
+ valSize valueSizer // set for bool and numeric types only
+
+ dec decoder
+ valDec valueDecoder // set for bool and numeric types only
+
+ // If this is a packable field, this will be the decoder for the packed version of the field.
+ packedDec decoder
+}
+
+// String formats the properties in the protobuf struct field tag style.
+func (p *Properties) String() string {
+ s := p.Wire
+ s = ","
+ s += strconv.Itoa(p.Tag)
+ if p.Required {
+ s += ",req"
+ }
+ if p.Optional {
+ s += ",opt"
+ }
+ if p.Repeated {
+ s += ",rep"
+ }
+ if p.Packed {
+ s += ",packed"
+ }
+ s += ",name=" + p.OrigName
+ if p.JSONName != p.OrigName {
+ s += ",json=" + p.JSONName
+ }
+ if p.proto3 {
+ s += ",proto3"
+ }
+ if p.oneof {
+ s += ",oneof"
+ }
+ if len(p.Enum) > 0 {
+ s += ",enum=" + p.Enum
+ }
+ if p.HasDefault {
+ s += ",def=" + p.Default
+ }
+ return s
+}
+
+// Parse populates p by parsing a string in the protobuf struct field tag style.
+func (p *Properties) Parse(s string) {
+ // "bytes,49,opt,name=foo,def=hello!"
+ fields := strings.Split(s, ",") // breaks def=, but handled below.
+ if len(fields) < 2 {
+ fmt.Fprintf(os.Stderr, "proto: tag has too few fields: %q\n", s)
+ return
+ }
+
+ p.Wire = fields[0]
+ switch p.Wire {
+ case "varint":
+ p.WireType = WireVarint
+ p.valEnc = (*Buffer).EncodeVarint
+ p.valDec = (*Buffer).DecodeVarint
+ p.valSize = sizeVarint
+ case "fixed32":
+ p.WireType = WireFixed32
+ p.valEnc = (*Buffer).EncodeFixed32
+ p.valDec = (*Buffer).DecodeFixed32
+ p.valSize = sizeFixed32
+ case "fixed64":
+ p.WireType = WireFixed64
+ p.valEnc = (*Buffer).EncodeFixed64
+ p.valDec = (*Buffer).DecodeFixed64
+ p.valSize = sizeFixed64
+ case "zigzag32":
+ p.WireType = WireVarint
+ p.valEnc = (*Buffer).EncodeZigzag32
+ p.valDec = (*Buffer).DecodeZigzag32
+ p.valSize = sizeZigzag32
+ case "zigzag64":
+ p.WireType = WireVarint
+ p.valEnc = (*Buffer).EncodeZigzag64
+ p.valDec = (*Buffer).DecodeZigzag64
+ p.valSize = sizeZigzag64
+ case "bytes", "group":
+ p.WireType = WireBytes
+ // no numeric converter for non-numeric types
+ default:
+ fmt.Fprintf(os.Stderr, "proto: tag has unknown wire type: %q\n", s)
+ return
+ }
+
+ var err error
+ p.Tag, err = strconv.Atoi(fields[1])
+ if err != nil {
+ return
+ }
+
+ for i := 2; i < len(fields); i++ {
+ f := fields[i]
+ switch {
+ case f == "req":
+ p.Required = true
+ case f == "opt":
+ p.Optional = true
+ case f == "rep":
+ p.Repeated = true
+ case f == "packed":
+ p.Packed = true
+ case strings.HasPrefix(f, "name="):
+ p.OrigName = f[5:]
+ case strings.HasPrefix(f, "json="):
+ p.JSONName = f[5:]
+ case strings.HasPrefix(f, "enum="):
+ p.Enum = f[5:]
+ case f == "proto3":
+ p.proto3 = true
+ case f == "oneof":
+ p.oneof = true
+ case strings.HasPrefix(f, "def="):
+ p.HasDefault = true
+ p.Default = f[4:] // rest of string
+ if i+1 < len(fields) {
+ // Commas aren't escaped, and def is always last.
+ p.Default += "," + strings.Join(fields[i+1:], ",")
+ break
+ }
+ }
+ }
+}
+
+func logNoSliceEnc(t1, t2 reflect.Type) {
+ fmt.Fprintf(os.Stderr, "proto: no slice oenc for %T = []%T\n", t1, t2)
+}
+
+var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem()
+
+// Initialize the fields for encoding and decoding.
+func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lockGetProp bool) {
+ p.enc = nil
+ p.dec = nil
+ p.size = nil
+
+ switch t1 := typ; t1.Kind() {
+ default:
+ fmt.Fprintf(os.Stderr, "proto: no coders for %v\n", t1)
+
+ // proto3 scalar types
+
+ case reflect.Bool:
+ p.enc = (*Buffer).enc_proto3_bool
+ p.dec = (*Buffer).dec_proto3_bool
+ p.size = size_proto3_bool
+ case reflect.Int32:
+ p.enc = (*Buffer).enc_proto3_int32
+ p.dec = (*Buffer).dec_proto3_int32
+ p.size = size_proto3_int32
+ case reflect.Uint32:
+ p.enc = (*Buffer).enc_proto3_uint32
+ p.dec = (*Buffer).dec_proto3_int32 // can reuse
+ p.size = size_proto3_uint32
+ case reflect.Int64, reflect.Uint64:
+ p.enc = (*Buffer).enc_proto3_int64
+ p.dec = (*Buffer).dec_proto3_int64
+ p.size = size_proto3_int64
+ case reflect.Float32:
+ p.enc = (*Buffer).enc_proto3_uint32 // can just treat them as bits
+ p.dec = (*Buffer).dec_proto3_int32
+ p.size = size_proto3_uint32
+ case reflect.Float64:
+ p.enc = (*Buffer).enc_proto3_int64 // can just treat them as bits
+ p.dec = (*Buffer).dec_proto3_int64
+ p.size = size_proto3_int64
+ case reflect.String:
+ p.enc = (*Buffer).enc_proto3_string
+ p.dec = (*Buffer).dec_proto3_string
+ p.size = size_proto3_string
+
+ case reflect.Ptr:
+ switch t2 := t1.Elem(); t2.Kind() {
+ default:
+ fmt.Fprintf(os.Stderr, "proto: no encoder function for %v -> %v\n", t1, t2)
+ break
+ case reflect.Bool:
+ p.enc = (*Buffer).enc_bool
+ p.dec = (*Buffer).dec_bool
+ p.size = size_bool
+ case reflect.Int32:
+ p.enc = (*Buffer).enc_int32
+ p.dec = (*Buffer).dec_int32
+ p.size = size_int32
+ case reflect.Uint32:
+ p.enc = (*Buffer).enc_uint32
+ p.dec = (*Buffer).dec_int32 // can reuse
+ p.size = size_uint32
+ case reflect.Int64, reflect.Uint64:
+ p.enc = (*Buffer).enc_int64
+ p.dec = (*Buffer).dec_int64
+ p.size = size_int64
+ case reflect.Float32:
+ p.enc = (*Buffer).enc_uint32 // can just treat them as bits
+ p.dec = (*Buffer).dec_int32
+ p.size = size_uint32
+ case reflect.Float64:
+ p.enc = (*Buffer).enc_int64 // can just treat them as bits
+ p.dec = (*Buffer).dec_int64
+ p.size = size_int64
+ case reflect.String:
+ p.enc = (*Buffer).enc_string
+ p.dec = (*Buffer).dec_string
+ p.size = size_string
+ case reflect.Struct:
+ p.stype = t1.Elem()
+ p.isMarshaler = isMarshaler(t1)
+ p.isUnmarshaler = isUnmarshaler(t1)
+ if p.Wire == "bytes" {
+ p.enc = (*Buffer).enc_struct_message
+ p.dec = (*Buffer).dec_struct_message
+ p.size = size_struct_message
+ } else {
+ p.enc = (*Buffer).enc_struct_group
+ p.dec = (*Buffer).dec_struct_group
+ p.size = size_struct_group
+ }
+ }
+
+ case reflect.Slice:
+ switch t2 := t1.Elem(); t2.Kind() {
+ default:
+ logNoSliceEnc(t1, t2)
+ break
+ case reflect.Bool:
+ if p.Packed {
+ p.enc = (*Buffer).enc_slice_packed_bool
+ p.size = size_slice_packed_bool
+ } else {
+ p.enc = (*Buffer).enc_slice_bool
+ p.size = size_slice_bool
+ }
+ p.dec = (*Buffer).dec_slice_bool
+ p.packedDec = (*Buffer).dec_slice_packed_bool
+ case reflect.Int32:
+ if p.Packed {
+ p.enc = (*Buffer).enc_slice_packed_int32
+ p.size = size_slice_packed_int32
+ } else {
+ p.enc = (*Buffer).enc_slice_int32
+ p.size = size_slice_int32
+ }
+ p.dec = (*Buffer).dec_slice_int32
+ p.packedDec = (*Buffer).dec_slice_packed_int32
+ case reflect.Uint32:
+ if p.Packed {
+ p.enc = (*Buffer).enc_slice_packed_uint32
+ p.size = size_slice_packed_uint32
+ } else {
+ p.enc = (*Buffer).enc_slice_uint32
+ p.size = size_slice_uint32
+ }
+ p.dec = (*Buffer).dec_slice_int32
+ p.packedDec = (*Buffer).dec_slice_packed_int32
+ case reflect.Int64, reflect.Uint64:
+ if p.Packed {
+ p.enc = (*Buffer).enc_slice_packed_int64
+ p.size = size_slice_packed_int64
+ } else {
+ p.enc = (*Buffer).enc_slice_int64
+ p.size = size_slice_int64
+ }
+ p.dec = (*Buffer).dec_slice_int64
+ p.packedDec = (*Buffer).dec_slice_packed_int64
+ case reflect.Uint8:
+ p.dec = (*Buffer).dec_slice_byte
+ if p.proto3 {
+ p.enc = (*Buffer).enc_proto3_slice_byte
+ p.size = size_proto3_slice_byte
+ } else {
+ p.enc = (*Buffer).enc_slice_byte
+ p.size = size_slice_byte
+ }
+ case reflect.Float32, reflect.Float64:
+ switch t2.Bits() {
+ case 32:
+ // can just treat them as bits
+ if p.Packed {
+ p.enc = (*Buffer).enc_slice_packed_uint32
+ p.size = size_slice_packed_uint32
+ } else {
+ p.enc = (*Buffer).enc_slice_uint32
+ p.size = size_slice_uint32
+ }
+ p.dec = (*Buffer).dec_slice_int32
+ p.packedDec = (*Buffer).dec_slice_packed_int32
+ case 64:
+ // can just treat them as bits
+ if p.Packed {
+ p.enc = (*Buffer).enc_slice_packed_int64
+ p.size = size_slice_packed_int64
+ } else {
+ p.enc = (*Buffer).enc_slice_int64
+ p.size = size_slice_int64
+ }
+ p.dec = (*Buffer).dec_slice_int64
+ p.packedDec = (*Buffer).dec_slice_packed_int64
+ default:
+ logNoSliceEnc(t1, t2)
+ break
+ }
+ case reflect.String:
+ p.enc = (*Buffer).enc_slice_string
+ p.dec = (*Buffer).dec_slice_string
+ p.size = size_slice_string
+ case reflect.Ptr:
+ switch t3 := t2.Elem(); t3.Kind() {
+ default:
+ fmt.Fprintf(os.Stderr, "proto: no ptr oenc for %T -> %T -> %T\n", t1, t2, t3)
+ break
+ case reflect.Struct:
+ p.stype = t2.Elem()
+ p.isMarshaler = isMarshaler(t2)
+ p.isUnmarshaler = isUnmarshaler(t2)
+ if p.Wire == "bytes" {
+ p.enc = (*Buffer).enc_slice_struct_message
+ p.dec = (*Buffer).dec_slice_struct_message
+ p.size = size_slice_struct_message
+ } else {
+ p.enc = (*Buffer).enc_slice_struct_group
+ p.dec = (*Buffer).dec_slice_struct_group
+ p.size = size_slice_struct_group
+ }
+ }
+ case reflect.Slice:
+ switch t2.Elem().Kind() {
+ default:
+ fmt.Fprintf(os.Stderr, "proto: no slice elem oenc for %T -> %T -> %T\n", t1, t2, t2.Elem())
+ break
+ case reflect.Uint8:
+ p.enc = (*Buffer).enc_slice_slice_byte
+ p.dec = (*Buffer).dec_slice_slice_byte
+ p.size = size_slice_slice_byte
+ }
+ }
+
+ case reflect.Map:
+ p.enc = (*Buffer).enc_new_map
+ p.dec = (*Buffer).dec_new_map
+ p.size = size_new_map
+
+ p.mtype = t1
+ p.mkeyprop = &Properties{}
+ p.mkeyprop.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp)
+ p.mvalprop = &Properties{}
+ vtype := p.mtype.Elem()
+ if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice {
+ // The value type is not a message (*T) or bytes ([]byte),
+ // so we need encoders for the pointer to this type.
+ vtype = reflect.PtrTo(vtype)
+ }
+ p.mvalprop.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp)
+ }
+
+ // precalculate tag code
+ wire := p.WireType
+ if p.Packed {
+ wire = WireBytes
+ }
+ x := uint32(p.Tag)<<3 | uint32(wire)
+ i := 0
+ for i = 0; x > 127; i++ {
+ p.tagbuf[i] = 0x80 | uint8(x&0x7F)
+ x >>= 7
+ }
+ p.tagbuf[i] = uint8(x)
+ p.tagcode = p.tagbuf[0 : i+1]
+
+ if p.stype != nil {
+ if lockGetProp {
+ p.sprop = GetProperties(p.stype)
+ } else {
+ p.sprop = getPropertiesLocked(p.stype)
+ }
+ }
+}
+
+var (
+ marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem()
+ unmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem()
+)
+
+// isMarshaler reports whether type t implements Marshaler.
+func isMarshaler(t reflect.Type) bool {
+ // We're checking for (likely) pointer-receiver methods
+ // so if t is not a pointer, something is very wrong.
+ // The calls above only invoke isMarshaler on pointer types.
+ if t.Kind() != reflect.Ptr {
+ panic("proto: misuse of isMarshaler")
+ }
+ return t.Implements(marshalerType)
+}
+
+// isUnmarshaler reports whether type t implements Unmarshaler.
+func isUnmarshaler(t reflect.Type) bool {
+ // We're checking for (likely) pointer-receiver methods
+ // so if t is not a pointer, something is very wrong.
+ // The calls above only invoke isUnmarshaler on pointer types.
+ if t.Kind() != reflect.Ptr {
+ panic("proto: misuse of isUnmarshaler")
+ }
+ return t.Implements(unmarshalerType)
+}
+
+// Init populates the properties from a protocol buffer struct tag.
+func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) {
+ p.init(typ, name, tag, f, true)
+}
+
+func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) {
+ // "bytes,49,opt,def=hello!"
+ p.Name = name
+ p.OrigName = name
+ if f != nil {
+ p.field = toField(f)
+ }
+ if tag == "" {
+ return
+ }
+ p.Parse(tag)
+ p.setEncAndDec(typ, f, lockGetProp)
+}
+
+var (
+ propertiesMu sync.RWMutex
+ propertiesMap = make(map[reflect.Type]*StructProperties)
+)
+
+// GetProperties returns the list of properties for the type represented by t.
+// t must represent a generated struct type of a protocol message.
+func GetProperties(t reflect.Type) *StructProperties {
+ if t.Kind() != reflect.Struct {
+ panic("proto: type must have kind struct")
+ }
+
+ // Most calls to GetProperties in a long-running program will be
+ // retrieving details for types we have seen before.
+ propertiesMu.RLock()
+ sprop, ok := propertiesMap[t]
+ propertiesMu.RUnlock()
+ if ok {
+ if collectStats {
+ stats.Chit++
+ }
+ return sprop
+ }
+
+ propertiesMu.Lock()
+ sprop = getPropertiesLocked(t)
+ propertiesMu.Unlock()
+ return sprop
+}
+
+// getPropertiesLocked requires that propertiesMu is held.
+func getPropertiesLocked(t reflect.Type) *StructProperties {
+ if prop, ok := propertiesMap[t]; ok {
+ if collectStats {
+ stats.Chit++
+ }
+ return prop
+ }
+ if collectStats {
+ stats.Cmiss++
+ }
+
+ prop := new(StructProperties)
+ // in case of recursive protos, fill this in now.
+ propertiesMap[t] = prop
+
+ // build properties
+ prop.extendable = reflect.PtrTo(t).Implements(extendableProtoType) ||
+ reflect.PtrTo(t).Implements(extendableProtoV1Type)
+ prop.unrecField = invalidField
+ prop.Prop = make([]*Properties, t.NumField())
+ prop.order = make([]int, t.NumField())
+
+ for i := 0; i < t.NumField(); i++ {
+ f := t.Field(i)
+ p := new(Properties)
+ name := f.Name
+ p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false)
+
+ if f.Name == "XXX_InternalExtensions" { // special case
+ p.enc = (*Buffer).enc_exts
+ p.dec = nil // not needed
+ p.size = size_exts
+ } else if f.Name == "XXX_extensions" { // special case
+ p.enc = (*Buffer).enc_map
+ p.dec = nil // not needed
+ p.size = size_map
+ } else if f.Name == "XXX_unrecognized" { // special case
+ prop.unrecField = toField(&f)
+ }
+ oneof := f.Tag.Get("protobuf_oneof") // special case
+ if oneof != "" {
+ // Oneof fields don't use the traditional protobuf tag.
+ p.OrigName = oneof
+ }
+ prop.Prop[i] = p
+ prop.order[i] = i
+ if debug {
+ print(i, " ", f.Name, " ", t.String(), " ")
+ if p.Tag > 0 {
+ print(p.String())
+ }
+ print("\n")
+ }
+ if p.enc == nil && !strings.HasPrefix(f.Name, "XXX_") && oneof == "" {
+ fmt.Fprintln(os.Stderr, "proto: no encoder for", f.Name, f.Type.String(), "[GetProperties]")
+ }
+ }
+
+ // Re-order prop.order.
+ sort.Sort(prop)
+
+ type oneofMessage interface {
+ XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{})
+ }
+ if om, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok {
+ var oots []interface{}
+ prop.oneofMarshaler, prop.oneofUnmarshaler, prop.oneofSizer, oots = om.XXX_OneofFuncs()
+ prop.stype = t
+
+ // Interpret oneof metadata.
+ prop.OneofTypes = make(map[string]*OneofProperties)
+ for _, oot := range oots {
+ oop := &OneofProperties{
+ Type: reflect.ValueOf(oot).Type(), // *T
+ Prop: new(Properties),
+ }
+ sft := oop.Type.Elem().Field(0)
+ oop.Prop.Name = sft.Name
+ oop.Prop.Parse(sft.Tag.Get("protobuf"))
+ // There will be exactly one interface field that
+ // this new value is assignable to.
+ for i := 0; i < t.NumField(); i++ {
+ f := t.Field(i)
+ if f.Type.Kind() != reflect.Interface {
+ continue
+ }
+ if !oop.Type.AssignableTo(f.Type) {
+ continue
+ }
+ oop.Field = i
+ break
+ }
+ prop.OneofTypes[oop.Prop.OrigName] = oop
+ }
+ }
+
+ // build required counts
+ // build tags
+ reqCount := 0
+ prop.decoderOrigNames = make(map[string]int)
+ for i, p := range prop.Prop {
+ if strings.HasPrefix(p.Name, "XXX_") {
+ // Internal fields should not appear in tags/origNames maps.
+ // They are handled specially when encoding and decoding.
+ continue
+ }
+ if p.Required {
+ reqCount++
+ }
+ prop.decoderTags.put(p.Tag, i)
+ prop.decoderOrigNames[p.OrigName] = i
+ }
+ prop.reqCount = reqCount
+
+ return prop
+}
+
+// Return the Properties object for the x[0]'th field of the structure.
+func propByIndex(t reflect.Type, x []int) *Properties {
+ if len(x) != 1 {
+ fmt.Fprintf(os.Stderr, "proto: field index dimension %d (not 1) for type %s\n", len(x), t)
+ return nil
+ }
+ prop := GetProperties(t)
+ return prop.Prop[x[0]]
+}
+
+// Get the address and type of a pointer to a struct from an interface.
+func getbase(pb Message) (t reflect.Type, b structPointer, err error) {
+ if pb == nil {
+ err = ErrNil
+ return
+ }
+ // get the reflect type of the pointer to the struct.
+ t = reflect.TypeOf(pb)
+ // get the address of the struct.
+ value := reflect.ValueOf(pb)
+ b = toStructPointer(value)
+ return
+}
+
+// A global registry of enum types.
+// The generated code will register the generated maps by calling RegisterEnum.
+
+var enumValueMaps = make(map[string]map[string]int32)
+
+// RegisterEnum is called from the generated code to install the enum descriptor
+// maps into the global table to aid parsing text format protocol buffers.
+func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) {
+ if _, ok := enumValueMaps[typeName]; ok {
+ panic("proto: duplicate enum registered: " + typeName)
+ }
+ enumValueMaps[typeName] = valueMap
+}
+
+// EnumValueMap returns the mapping from names to integers of the
+// enum type enumType, or a nil if not found.
+func EnumValueMap(enumType string) map[string]int32 {
+ return enumValueMaps[enumType]
+}
+
+// A registry of all linked message types.
+// The string is a fully-qualified proto name ("pkg.Message").
+var (
+ protoTypes = make(map[string]reflect.Type)
+ revProtoTypes = make(map[reflect.Type]string)
+)
+
+// RegisterType is called from generated code and maps from the fully qualified
+// proto name to the type (pointer to struct) of the protocol buffer.
+func RegisterType(x Message, name string) {
+ if _, ok := protoTypes[name]; ok {
+ // TODO: Some day, make this a panic.
+ log.Printf("proto: duplicate proto type registered: %s", name)
+ return
+ }
+ t := reflect.TypeOf(x)
+ protoTypes[name] = t
+ revProtoTypes[t] = name
+}
+
+// MessageName returns the fully-qualified proto name for the given message type.
+func MessageName(x Message) string {
+ type xname interface {
+ XXX_MessageName() string
+ }
+ if m, ok := x.(xname); ok {
+ return m.XXX_MessageName()
+ }
+ return revProtoTypes[reflect.TypeOf(x)]
+}
+
+// MessageType returns the message type (pointer to struct) for a named message.
+func MessageType(name string) reflect.Type { return protoTypes[name] }
+
+// A registry of all linked proto files.
+var (
+ protoFiles = make(map[string][]byte) // file name => fileDescriptor
+)
+
+// RegisterFile is called from generated code and maps from the
+// full file name of a .proto file to its compressed FileDescriptorProto.
+func RegisterFile(filename string, fileDescriptor []byte) {
+ protoFiles[filename] = fileDescriptor
+}
+
+// FileDescriptor returns the compressed FileDescriptorProto for a .proto file.
+func FileDescriptor(filename string) []byte { return protoFiles[filename] }
diff --git a/vendor/github.com/golang/protobuf/proto/text.go b/vendor/github.com/golang/protobuf/proto/text.go
new file mode 100644
index 0000000000..965876bf03
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/text.go
@@ -0,0 +1,854 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+// Functions for writing the text protocol buffer format.
+
+import (
+ "bufio"
+ "bytes"
+ "encoding"
+ "errors"
+ "fmt"
+ "io"
+ "log"
+ "math"
+ "reflect"
+ "sort"
+ "strings"
+)
+
+var (
+ newline = []byte("\n")
+ spaces = []byte(" ")
+ gtNewline = []byte(">\n")
+ endBraceNewline = []byte("}\n")
+ backslashN = []byte{'\\', 'n'}
+ backslashR = []byte{'\\', 'r'}
+ backslashT = []byte{'\\', 't'}
+ backslashDQ = []byte{'\\', '"'}
+ backslashBS = []byte{'\\', '\\'}
+ posInf = []byte("inf")
+ negInf = []byte("-inf")
+ nan = []byte("nan")
+)
+
+type writer interface {
+ io.Writer
+ WriteByte(byte) error
+}
+
+// textWriter is an io.Writer that tracks its indentation level.
+type textWriter struct {
+ ind int
+ complete bool // if the current position is a complete line
+ compact bool // whether to write out as a one-liner
+ w writer
+}
+
+func (w *textWriter) WriteString(s string) (n int, err error) {
+ if !strings.Contains(s, "\n") {
+ if !w.compact && w.complete {
+ w.writeIndent()
+ }
+ w.complete = false
+ return io.WriteString(w.w, s)
+ }
+ // WriteString is typically called without newlines, so this
+ // codepath and its copy are rare. We copy to avoid
+ // duplicating all of Write's logic here.
+ return w.Write([]byte(s))
+}
+
+func (w *textWriter) Write(p []byte) (n int, err error) {
+ newlines := bytes.Count(p, newline)
+ if newlines == 0 {
+ if !w.compact && w.complete {
+ w.writeIndent()
+ }
+ n, err = w.w.Write(p)
+ w.complete = false
+ return n, err
+ }
+
+ frags := bytes.SplitN(p, newline, newlines+1)
+ if w.compact {
+ for i, frag := range frags {
+ if i > 0 {
+ if err := w.w.WriteByte(' '); err != nil {
+ return n, err
+ }
+ n++
+ }
+ nn, err := w.w.Write(frag)
+ n += nn
+ if err != nil {
+ return n, err
+ }
+ }
+ return n, nil
+ }
+
+ for i, frag := range frags {
+ if w.complete {
+ w.writeIndent()
+ }
+ nn, err := w.w.Write(frag)
+ n += nn
+ if err != nil {
+ return n, err
+ }
+ if i+1 < len(frags) {
+ if err := w.w.WriteByte('\n'); err != nil {
+ return n, err
+ }
+ n++
+ }
+ }
+ w.complete = len(frags[len(frags)-1]) == 0
+ return n, nil
+}
+
+func (w *textWriter) WriteByte(c byte) error {
+ if w.compact && c == '\n' {
+ c = ' '
+ }
+ if !w.compact && w.complete {
+ w.writeIndent()
+ }
+ err := w.w.WriteByte(c)
+ w.complete = c == '\n'
+ return err
+}
+
+func (w *textWriter) indent() { w.ind++ }
+
+func (w *textWriter) unindent() {
+ if w.ind == 0 {
+ log.Print("proto: textWriter unindented too far")
+ return
+ }
+ w.ind--
+}
+
+func writeName(w *textWriter, props *Properties) error {
+ if _, err := w.WriteString(props.OrigName); err != nil {
+ return err
+ }
+ if props.Wire != "group" {
+ return w.WriteByte(':')
+ }
+ return nil
+}
+
+// raw is the interface satisfied by RawMessage.
+type raw interface {
+ Bytes() []byte
+}
+
+func requiresQuotes(u string) bool {
+ // When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted.
+ for _, ch := range u {
+ switch {
+ case ch == '.' || ch == '/' || ch == '_':
+ continue
+ case '0' <= ch && ch <= '9':
+ continue
+ case 'A' <= ch && ch <= 'Z':
+ continue
+ case 'a' <= ch && ch <= 'z':
+ continue
+ default:
+ return true
+ }
+ }
+ return false
+}
+
+// isAny reports whether sv is a google.protobuf.Any message
+func isAny(sv reflect.Value) bool {
+ type wkt interface {
+ XXX_WellKnownType() string
+ }
+ t, ok := sv.Addr().Interface().(wkt)
+ return ok && t.XXX_WellKnownType() == "Any"
+}
+
+// writeProto3Any writes an expanded google.protobuf.Any message.
+//
+// It returns (false, nil) if sv value can't be unmarshaled (e.g. because
+// required messages are not linked in).
+//
+// It returns (true, error) when sv was written in expanded format or an error
+// was encountered.
+func (tm *TextMarshaler) writeProto3Any(w *textWriter, sv reflect.Value) (bool, error) {
+ turl := sv.FieldByName("TypeUrl")
+ val := sv.FieldByName("Value")
+ if !turl.IsValid() || !val.IsValid() {
+ return true, errors.New("proto: invalid google.protobuf.Any message")
+ }
+
+ b, ok := val.Interface().([]byte)
+ if !ok {
+ return true, errors.New("proto: invalid google.protobuf.Any message")
+ }
+
+ parts := strings.Split(turl.String(), "/")
+ mt := MessageType(parts[len(parts)-1])
+ if mt == nil {
+ return false, nil
+ }
+ m := reflect.New(mt.Elem())
+ if err := Unmarshal(b, m.Interface().(Message)); err != nil {
+ return false, nil
+ }
+ w.Write([]byte("["))
+ u := turl.String()
+ if requiresQuotes(u) {
+ writeString(w, u)
+ } else {
+ w.Write([]byte(u))
+ }
+ if w.compact {
+ w.Write([]byte("]:<"))
+ } else {
+ w.Write([]byte("]: <\n"))
+ w.ind++
+ }
+ if err := tm.writeStruct(w, m.Elem()); err != nil {
+ return true, err
+ }
+ if w.compact {
+ w.Write([]byte("> "))
+ } else {
+ w.ind--
+ w.Write([]byte(">\n"))
+ }
+ return true, nil
+}
+
+func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error {
+ if tm.ExpandAny && isAny(sv) {
+ if canExpand, err := tm.writeProto3Any(w, sv); canExpand {
+ return err
+ }
+ }
+ st := sv.Type()
+ sprops := GetProperties(st)
+ for i := 0; i < sv.NumField(); i++ {
+ fv := sv.Field(i)
+ props := sprops.Prop[i]
+ name := st.Field(i).Name
+
+ if strings.HasPrefix(name, "XXX_") {
+ // There are two XXX_ fields:
+ // XXX_unrecognized []byte
+ // XXX_extensions map[int32]proto.Extension
+ // The first is handled here;
+ // the second is handled at the bottom of this function.
+ if name == "XXX_unrecognized" && !fv.IsNil() {
+ if err := writeUnknownStruct(w, fv.Interface().([]byte)); err != nil {
+ return err
+ }
+ }
+ continue
+ }
+ if fv.Kind() == reflect.Ptr && fv.IsNil() {
+ // Field not filled in. This could be an optional field or
+ // a required field that wasn't filled in. Either way, there
+ // isn't anything we can show for it.
+ continue
+ }
+ if fv.Kind() == reflect.Slice && fv.IsNil() {
+ // Repeated field that is empty, or a bytes field that is unused.
+ continue
+ }
+
+ if props.Repeated && fv.Kind() == reflect.Slice {
+ // Repeated field.
+ for j := 0; j < fv.Len(); j++ {
+ if err := writeName(w, props); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ v := fv.Index(j)
+ if v.Kind() == reflect.Ptr && v.IsNil() {
+ // A nil message in a repeated field is not valid,
+ // but we can handle that more gracefully than panicking.
+ if _, err := w.Write([]byte("\n")); err != nil {
+ return err
+ }
+ continue
+ }
+ if err := tm.writeAny(w, v, props); err != nil {
+ return err
+ }
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ continue
+ }
+ if fv.Kind() == reflect.Map {
+ // Map fields are rendered as a repeated struct with key/value fields.
+ keys := fv.MapKeys()
+ sort.Sort(mapKeys(keys))
+ for _, key := range keys {
+ val := fv.MapIndex(key)
+ if err := writeName(w, props); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ // open struct
+ if err := w.WriteByte('<'); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ w.indent()
+ // key
+ if _, err := w.WriteString("key:"); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ if err := tm.writeAny(w, key, props.mkeyprop); err != nil {
+ return err
+ }
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ // nil values aren't legal, but we can avoid panicking because of them.
+ if val.Kind() != reflect.Ptr || !val.IsNil() {
+ // value
+ if _, err := w.WriteString("value:"); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ if err := tm.writeAny(w, val, props.mvalprop); err != nil {
+ return err
+ }
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ // close struct
+ w.unindent()
+ if err := w.WriteByte('>'); err != nil {
+ return err
+ }
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ continue
+ }
+ if props.proto3 && fv.Kind() == reflect.Slice && fv.Len() == 0 {
+ // empty bytes field
+ continue
+ }
+ if fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice {
+ // proto3 non-repeated scalar field; skip if zero value
+ if isProto3Zero(fv) {
+ continue
+ }
+ }
+
+ if fv.Kind() == reflect.Interface {
+ // Check if it is a oneof.
+ if st.Field(i).Tag.Get("protobuf_oneof") != "" {
+ // fv is nil, or holds a pointer to generated struct.
+ // That generated struct has exactly one field,
+ // which has a protobuf struct tag.
+ if fv.IsNil() {
+ continue
+ }
+ inner := fv.Elem().Elem() // interface -> *T -> T
+ tag := inner.Type().Field(0).Tag.Get("protobuf")
+ props = new(Properties) // Overwrite the outer props var, but not its pointee.
+ props.Parse(tag)
+ // Write the value in the oneof, not the oneof itself.
+ fv = inner.Field(0)
+
+ // Special case to cope with malformed messages gracefully:
+ // If the value in the oneof is a nil pointer, don't panic
+ // in writeAny.
+ if fv.Kind() == reflect.Ptr && fv.IsNil() {
+ // Use errors.New so writeAny won't render quotes.
+ msg := errors.New("/* nil */")
+ fv = reflect.ValueOf(&msg).Elem()
+ }
+ }
+ }
+
+ if err := writeName(w, props); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ if b, ok := fv.Interface().(raw); ok {
+ if err := writeRaw(w, b.Bytes()); err != nil {
+ return err
+ }
+ continue
+ }
+
+ // Enums have a String method, so writeAny will work fine.
+ if err := tm.writeAny(w, fv, props); err != nil {
+ return err
+ }
+
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+
+ // Extensions (the XXX_extensions field).
+ pv := sv.Addr()
+ if _, ok := extendable(pv.Interface()); ok {
+ if err := tm.writeExtensions(w, pv); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// writeRaw writes an uninterpreted raw message.
+func writeRaw(w *textWriter, b []byte) error {
+ if err := w.WriteByte('<'); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ w.indent()
+ if err := writeUnknownStruct(w, b); err != nil {
+ return err
+ }
+ w.unindent()
+ if err := w.WriteByte('>'); err != nil {
+ return err
+ }
+ return nil
+}
+
+// writeAny writes an arbitrary field.
+func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error {
+ v = reflect.Indirect(v)
+
+ // Floats have special cases.
+ if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 {
+ x := v.Float()
+ var b []byte
+ switch {
+ case math.IsInf(x, 1):
+ b = posInf
+ case math.IsInf(x, -1):
+ b = negInf
+ case math.IsNaN(x):
+ b = nan
+ }
+ if b != nil {
+ _, err := w.Write(b)
+ return err
+ }
+ // Other values are handled below.
+ }
+
+ // We don't attempt to serialise every possible value type; only those
+ // that can occur in protocol buffers.
+ switch v.Kind() {
+ case reflect.Slice:
+ // Should only be a []byte; repeated fields are handled in writeStruct.
+ if err := writeString(w, string(v.Bytes())); err != nil {
+ return err
+ }
+ case reflect.String:
+ if err := writeString(w, v.String()); err != nil {
+ return err
+ }
+ case reflect.Struct:
+ // Required/optional group/message.
+ var bra, ket byte = '<', '>'
+ if props != nil && props.Wire == "group" {
+ bra, ket = '{', '}'
+ }
+ if err := w.WriteByte(bra); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ w.indent()
+ if etm, ok := v.Interface().(encoding.TextMarshaler); ok {
+ text, err := etm.MarshalText()
+ if err != nil {
+ return err
+ }
+ if _, err = w.Write(text); err != nil {
+ return err
+ }
+ } else if err := tm.writeStruct(w, v); err != nil {
+ return err
+ }
+ w.unindent()
+ if err := w.WriteByte(ket); err != nil {
+ return err
+ }
+ default:
+ _, err := fmt.Fprint(w, v.Interface())
+ return err
+ }
+ return nil
+}
+
+// equivalent to C's isprint.
+func isprint(c byte) bool {
+ return c >= 0x20 && c < 0x7f
+}
+
+// writeString writes a string in the protocol buffer text format.
+// It is similar to strconv.Quote except we don't use Go escape sequences,
+// we treat the string as a byte sequence, and we use octal escapes.
+// These differences are to maintain interoperability with the other
+// languages' implementations of the text format.
+func writeString(w *textWriter, s string) error {
+ // use WriteByte here to get any needed indent
+ if err := w.WriteByte('"'); err != nil {
+ return err
+ }
+ // Loop over the bytes, not the runes.
+ for i := 0; i < len(s); i++ {
+ var err error
+ // Divergence from C++: we don't escape apostrophes.
+ // There's no need to escape them, and the C++ parser
+ // copes with a naked apostrophe.
+ switch c := s[i]; c {
+ case '\n':
+ _, err = w.w.Write(backslashN)
+ case '\r':
+ _, err = w.w.Write(backslashR)
+ case '\t':
+ _, err = w.w.Write(backslashT)
+ case '"':
+ _, err = w.w.Write(backslashDQ)
+ case '\\':
+ _, err = w.w.Write(backslashBS)
+ default:
+ if isprint(c) {
+ err = w.w.WriteByte(c)
+ } else {
+ _, err = fmt.Fprintf(w.w, "\\%03o", c)
+ }
+ }
+ if err != nil {
+ return err
+ }
+ }
+ return w.WriteByte('"')
+}
+
+func writeUnknownStruct(w *textWriter, data []byte) (err error) {
+ if !w.compact {
+ if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil {
+ return err
+ }
+ }
+ b := NewBuffer(data)
+ for b.index < len(b.buf) {
+ x, err := b.DecodeVarint()
+ if err != nil {
+ _, err := fmt.Fprintf(w, "/* %v */\n", err)
+ return err
+ }
+ wire, tag := x&7, x>>3
+ if wire == WireEndGroup {
+ w.unindent()
+ if _, err := w.Write(endBraceNewline); err != nil {
+ return err
+ }
+ continue
+ }
+ if _, err := fmt.Fprint(w, tag); err != nil {
+ return err
+ }
+ if wire != WireStartGroup {
+ if err := w.WriteByte(':'); err != nil {
+ return err
+ }
+ }
+ if !w.compact || wire == WireStartGroup {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ switch wire {
+ case WireBytes:
+ buf, e := b.DecodeRawBytes(false)
+ if e == nil {
+ _, err = fmt.Fprintf(w, "%q", buf)
+ } else {
+ _, err = fmt.Fprintf(w, "/* %v */", e)
+ }
+ case WireFixed32:
+ x, err = b.DecodeFixed32()
+ err = writeUnknownInt(w, x, err)
+ case WireFixed64:
+ x, err = b.DecodeFixed64()
+ err = writeUnknownInt(w, x, err)
+ case WireStartGroup:
+ err = w.WriteByte('{')
+ w.indent()
+ case WireVarint:
+ x, err = b.DecodeVarint()
+ err = writeUnknownInt(w, x, err)
+ default:
+ _, err = fmt.Fprintf(w, "/* unknown wire type %d */", wire)
+ }
+ if err != nil {
+ return err
+ }
+ if err = w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func writeUnknownInt(w *textWriter, x uint64, err error) error {
+ if err == nil {
+ _, err = fmt.Fprint(w, x)
+ } else {
+ _, err = fmt.Fprintf(w, "/* %v */", err)
+ }
+ return err
+}
+
+type int32Slice []int32
+
+func (s int32Slice) Len() int { return len(s) }
+func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] }
+func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+
+// writeExtensions writes all the extensions in pv.
+// pv is assumed to be a pointer to a protocol message struct that is extendable.
+func (tm *TextMarshaler) writeExtensions(w *textWriter, pv reflect.Value) error {
+ emap := extensionMaps[pv.Type().Elem()]
+ ep, _ := extendable(pv.Interface())
+
+ // Order the extensions by ID.
+ // This isn't strictly necessary, but it will give us
+ // canonical output, which will also make testing easier.
+ m, mu := ep.extensionsRead()
+ if m == nil {
+ return nil
+ }
+ mu.Lock()
+ ids := make([]int32, 0, len(m))
+ for id := range m {
+ ids = append(ids, id)
+ }
+ sort.Sort(int32Slice(ids))
+ mu.Unlock()
+
+ for _, extNum := range ids {
+ ext := m[extNum]
+ var desc *ExtensionDesc
+ if emap != nil {
+ desc = emap[extNum]
+ }
+ if desc == nil {
+ // Unknown extension.
+ if err := writeUnknownStruct(w, ext.enc); err != nil {
+ return err
+ }
+ continue
+ }
+
+ pb, err := GetExtension(ep, desc)
+ if err != nil {
+ return fmt.Errorf("failed getting extension: %v", err)
+ }
+
+ // Repeated extensions will appear as a slice.
+ if !desc.repeated() {
+ if err := tm.writeExtension(w, desc.Name, pb); err != nil {
+ return err
+ }
+ } else {
+ v := reflect.ValueOf(pb)
+ for i := 0; i < v.Len(); i++ {
+ if err := tm.writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil {
+ return err
+ }
+ }
+ }
+ }
+ return nil
+}
+
+func (tm *TextMarshaler) writeExtension(w *textWriter, name string, pb interface{}) error {
+ if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ if err := tm.writeAny(w, reflect.ValueOf(pb), nil); err != nil {
+ return err
+ }
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (w *textWriter) writeIndent() {
+ if !w.complete {
+ return
+ }
+ remain := w.ind * 2
+ for remain > 0 {
+ n := remain
+ if n > len(spaces) {
+ n = len(spaces)
+ }
+ w.w.Write(spaces[:n])
+ remain -= n
+ }
+ w.complete = false
+}
+
+// TextMarshaler is a configurable text format marshaler.
+type TextMarshaler struct {
+ Compact bool // use compact text format (one line).
+ ExpandAny bool // expand google.protobuf.Any messages of known types
+}
+
+// Marshal writes a given protocol buffer in text format.
+// The only errors returned are from w.
+func (tm *TextMarshaler) Marshal(w io.Writer, pb Message) error {
+ val := reflect.ValueOf(pb)
+ if pb == nil || val.IsNil() {
+ w.Write([]byte(""))
+ return nil
+ }
+ var bw *bufio.Writer
+ ww, ok := w.(writer)
+ if !ok {
+ bw = bufio.NewWriter(w)
+ ww = bw
+ }
+ aw := &textWriter{
+ w: ww,
+ complete: true,
+ compact: tm.Compact,
+ }
+
+ if etm, ok := pb.(encoding.TextMarshaler); ok {
+ text, err := etm.MarshalText()
+ if err != nil {
+ return err
+ }
+ if _, err = aw.Write(text); err != nil {
+ return err
+ }
+ if bw != nil {
+ return bw.Flush()
+ }
+ return nil
+ }
+ // Dereference the received pointer so we don't have outer < and >.
+ v := reflect.Indirect(val)
+ if err := tm.writeStruct(aw, v); err != nil {
+ return err
+ }
+ if bw != nil {
+ return bw.Flush()
+ }
+ return nil
+}
+
+// Text is the same as Marshal, but returns the string directly.
+func (tm *TextMarshaler) Text(pb Message) string {
+ var buf bytes.Buffer
+ tm.Marshal(&buf, pb)
+ return buf.String()
+}
+
+var (
+ defaultTextMarshaler = TextMarshaler{}
+ compactTextMarshaler = TextMarshaler{Compact: true}
+)
+
+// TODO: consider removing some of the Marshal functions below.
+
+// MarshalText writes a given protocol buffer in text format.
+// The only errors returned are from w.
+func MarshalText(w io.Writer, pb Message) error { return defaultTextMarshaler.Marshal(w, pb) }
+
+// MarshalTextString is the same as MarshalText, but returns the string directly.
+func MarshalTextString(pb Message) string { return defaultTextMarshaler.Text(pb) }
+
+// CompactText writes a given protocol buffer in compact text format (one line).
+func CompactText(w io.Writer, pb Message) error { return compactTextMarshaler.Marshal(w, pb) }
+
+// CompactTextString is the same as CompactText, but returns the string directly.
+func CompactTextString(pb Message) string { return compactTextMarshaler.Text(pb) }
diff --git a/vendor/github.com/golang/protobuf/proto/text_parser.go b/vendor/github.com/golang/protobuf/proto/text_parser.go
new file mode 100644
index 0000000000..61f83c1e10
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/text_parser.go
@@ -0,0 +1,895 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+// Functions for parsing the Text protocol buffer format.
+// TODO: message sets.
+
+import (
+ "encoding"
+ "errors"
+ "fmt"
+ "reflect"
+ "strconv"
+ "strings"
+ "unicode/utf8"
+)
+
+// Error string emitted when deserializing Any and fields are already set
+const anyRepeatedlyUnpacked = "Any message unpacked multiple times, or %q already set"
+
+type ParseError struct {
+ Message string
+ Line int // 1-based line number
+ Offset int // 0-based byte offset from start of input
+}
+
+func (p *ParseError) Error() string {
+ if p.Line == 1 {
+ // show offset only for first line
+ return fmt.Sprintf("line 1.%d: %v", p.Offset, p.Message)
+ }
+ return fmt.Sprintf("line %d: %v", p.Line, p.Message)
+}
+
+type token struct {
+ value string
+ err *ParseError
+ line int // line number
+ offset int // byte number from start of input, not start of line
+ unquoted string // the unquoted version of value, if it was a quoted string
+}
+
+func (t *token) String() string {
+ if t.err == nil {
+ return fmt.Sprintf("%q (line=%d, offset=%d)", t.value, t.line, t.offset)
+ }
+ return fmt.Sprintf("parse error: %v", t.err)
+}
+
+type textParser struct {
+ s string // remaining input
+ done bool // whether the parsing is finished (success or error)
+ backed bool // whether back() was called
+ offset, line int
+ cur token
+}
+
+func newTextParser(s string) *textParser {
+ p := new(textParser)
+ p.s = s
+ p.line = 1
+ p.cur.line = 1
+ return p
+}
+
+func (p *textParser) errorf(format string, a ...interface{}) *ParseError {
+ pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset}
+ p.cur.err = pe
+ p.done = true
+ return pe
+}
+
+// Numbers and identifiers are matched by [-+._A-Za-z0-9]
+func isIdentOrNumberChar(c byte) bool {
+ switch {
+ case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z':
+ return true
+ case '0' <= c && c <= '9':
+ return true
+ }
+ switch c {
+ case '-', '+', '.', '_':
+ return true
+ }
+ return false
+}
+
+func isWhitespace(c byte) bool {
+ switch c {
+ case ' ', '\t', '\n', '\r':
+ return true
+ }
+ return false
+}
+
+func isQuote(c byte) bool {
+ switch c {
+ case '"', '\'':
+ return true
+ }
+ return false
+}
+
+func (p *textParser) skipWhitespace() {
+ i := 0
+ for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') {
+ if p.s[i] == '#' {
+ // comment; skip to end of line or input
+ for i < len(p.s) && p.s[i] != '\n' {
+ i++
+ }
+ if i == len(p.s) {
+ break
+ }
+ }
+ if p.s[i] == '\n' {
+ p.line++
+ }
+ i++
+ }
+ p.offset += i
+ p.s = p.s[i:len(p.s)]
+ if len(p.s) == 0 {
+ p.done = true
+ }
+}
+
+func (p *textParser) advance() {
+ // Skip whitespace
+ p.skipWhitespace()
+ if p.done {
+ return
+ }
+
+ // Start of non-whitespace
+ p.cur.err = nil
+ p.cur.offset, p.cur.line = p.offset, p.line
+ p.cur.unquoted = ""
+ switch p.s[0] {
+ case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/':
+ // Single symbol
+ p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)]
+ case '"', '\'':
+ // Quoted string
+ i := 1
+ for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' {
+ if p.s[i] == '\\' && i+1 < len(p.s) {
+ // skip escaped char
+ i++
+ }
+ i++
+ }
+ if i >= len(p.s) || p.s[i] != p.s[0] {
+ p.errorf("unmatched quote")
+ return
+ }
+ unq, err := unquoteC(p.s[1:i], rune(p.s[0]))
+ if err != nil {
+ p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err)
+ return
+ }
+ p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)]
+ p.cur.unquoted = unq
+ default:
+ i := 0
+ for i < len(p.s) && isIdentOrNumberChar(p.s[i]) {
+ i++
+ }
+ if i == 0 {
+ p.errorf("unexpected byte %#x", p.s[0])
+ return
+ }
+ p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)]
+ }
+ p.offset += len(p.cur.value)
+}
+
+var (
+ errBadUTF8 = errors.New("proto: bad UTF-8")
+ errBadHex = errors.New("proto: bad hexadecimal")
+)
+
+func unquoteC(s string, quote rune) (string, error) {
+ // This is based on C++'s tokenizer.cc.
+ // Despite its name, this is *not* parsing C syntax.
+ // For instance, "\0" is an invalid quoted string.
+
+ // Avoid allocation in trivial cases.
+ simple := true
+ for _, r := range s {
+ if r == '\\' || r == quote {
+ simple = false
+ break
+ }
+ }
+ if simple {
+ return s, nil
+ }
+
+ buf := make([]byte, 0, 3*len(s)/2)
+ for len(s) > 0 {
+ r, n := utf8.DecodeRuneInString(s)
+ if r == utf8.RuneError && n == 1 {
+ return "", errBadUTF8
+ }
+ s = s[n:]
+ if r != '\\' {
+ if r < utf8.RuneSelf {
+ buf = append(buf, byte(r))
+ } else {
+ buf = append(buf, string(r)...)
+ }
+ continue
+ }
+
+ ch, tail, err := unescape(s)
+ if err != nil {
+ return "", err
+ }
+ buf = append(buf, ch...)
+ s = tail
+ }
+ return string(buf), nil
+}
+
+func unescape(s string) (ch string, tail string, err error) {
+ r, n := utf8.DecodeRuneInString(s)
+ if r == utf8.RuneError && n == 1 {
+ return "", "", errBadUTF8
+ }
+ s = s[n:]
+ switch r {
+ case 'a':
+ return "\a", s, nil
+ case 'b':
+ return "\b", s, nil
+ case 'f':
+ return "\f", s, nil
+ case 'n':
+ return "\n", s, nil
+ case 'r':
+ return "\r", s, nil
+ case 't':
+ return "\t", s, nil
+ case 'v':
+ return "\v", s, nil
+ case '?':
+ return "?", s, nil // trigraph workaround
+ case '\'', '"', '\\':
+ return string(r), s, nil
+ case '0', '1', '2', '3', '4', '5', '6', '7', 'x', 'X':
+ if len(s) < 2 {
+ return "", "", fmt.Errorf(`\%c requires 2 following digits`, r)
+ }
+ base := 8
+ ss := s[:2]
+ s = s[2:]
+ if r == 'x' || r == 'X' {
+ base = 16
+ } else {
+ ss = string(r) + ss
+ }
+ i, err := strconv.ParseUint(ss, base, 8)
+ if err != nil {
+ return "", "", err
+ }
+ return string([]byte{byte(i)}), s, nil
+ case 'u', 'U':
+ n := 4
+ if r == 'U' {
+ n = 8
+ }
+ if len(s) < n {
+ return "", "", fmt.Errorf(`\%c requires %d digits`, r, n)
+ }
+
+ bs := make([]byte, n/2)
+ for i := 0; i < n; i += 2 {
+ a, ok1 := unhex(s[i])
+ b, ok2 := unhex(s[i+1])
+ if !ok1 || !ok2 {
+ return "", "", errBadHex
+ }
+ bs[i/2] = a<<4 | b
+ }
+ s = s[n:]
+ return string(bs), s, nil
+ }
+ return "", "", fmt.Errorf(`unknown escape \%c`, r)
+}
+
+// Adapted from src/pkg/strconv/quote.go.
+func unhex(b byte) (v byte, ok bool) {
+ switch {
+ case '0' <= b && b <= '9':
+ return b - '0', true
+ case 'a' <= b && b <= 'f':
+ return b - 'a' + 10, true
+ case 'A' <= b && b <= 'F':
+ return b - 'A' + 10, true
+ }
+ return 0, false
+}
+
+// Back off the parser by one token. Can only be done between calls to next().
+// It makes the next advance() a no-op.
+func (p *textParser) back() { p.backed = true }
+
+// Advances the parser and returns the new current token.
+func (p *textParser) next() *token {
+ if p.backed || p.done {
+ p.backed = false
+ return &p.cur
+ }
+ p.advance()
+ if p.done {
+ p.cur.value = ""
+ } else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) {
+ // Look for multiple quoted strings separated by whitespace,
+ // and concatenate them.
+ cat := p.cur
+ for {
+ p.skipWhitespace()
+ if p.done || !isQuote(p.s[0]) {
+ break
+ }
+ p.advance()
+ if p.cur.err != nil {
+ return &p.cur
+ }
+ cat.value += " " + p.cur.value
+ cat.unquoted += p.cur.unquoted
+ }
+ p.done = false // parser may have seen EOF, but we want to return cat
+ p.cur = cat
+ }
+ return &p.cur
+}
+
+func (p *textParser) consumeToken(s string) error {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value != s {
+ p.back()
+ return p.errorf("expected %q, found %q", s, tok.value)
+ }
+ return nil
+}
+
+// Return a RequiredNotSetError indicating which required field was not set.
+func (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSetError {
+ st := sv.Type()
+ sprops := GetProperties(st)
+ for i := 0; i < st.NumField(); i++ {
+ if !isNil(sv.Field(i)) {
+ continue
+ }
+
+ props := sprops.Prop[i]
+ if props.Required {
+ return &RequiredNotSetError{fmt.Sprintf("%v.%v", st, props.OrigName)}
+ }
+ }
+ return &RequiredNotSetError{fmt.Sprintf("%v.