VOL-381 add unum container to support ONOS cluster formation under swarm
Change-Id: Ic260edda19bb199ed040f05164ab605f28c919d0
diff --git a/unum/vendor/github.com/docker/distribution/LICENSE b/unum/vendor/github.com/docker/distribution/LICENSE
new file mode 100644
index 0000000..e06d208
--- /dev/null
+++ b/unum/vendor/github.com/docker/distribution/LICENSE
@@ -0,0 +1,202 @@
+Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright {yyyy} {name of copyright owner}
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
diff --git a/unum/vendor/github.com/docker/distribution/digestset/set.go b/unum/vendor/github.com/docker/distribution/digestset/set.go
new file mode 100644
index 0000000..71327dc
--- /dev/null
+++ b/unum/vendor/github.com/docker/distribution/digestset/set.go
@@ -0,0 +1,247 @@
+package digestset
+
+import (
+ "errors"
+ "sort"
+ "strings"
+ "sync"
+
+ digest "github.com/opencontainers/go-digest"
+)
+
+var (
+ // ErrDigestNotFound is used when a matching digest
+ // could not be found in a set.
+ ErrDigestNotFound = errors.New("digest not found")
+
+ // ErrDigestAmbiguous is used when multiple digests
+ // are found in a set. None of the matching digests
+ // should be considered valid matches.
+ ErrDigestAmbiguous = errors.New("ambiguous digest string")
+)
+
+// Set is used to hold a unique set of digests which
+// may be easily referenced by easily referenced by a string
+// representation of the digest as well as short representation.
+// The uniqueness of the short representation is based on other
+// digests in the set. If digests are omitted from this set,
+// collisions in a larger set may not be detected, therefore it
+// is important to always do short representation lookups on
+// the complete set of digests. To mitigate collisions, an
+// appropriately long short code should be used.
+type Set struct {
+ mutex sync.RWMutex
+ entries digestEntries
+}
+
+// NewSet creates an empty set of digests
+// which may have digests added.
+func NewSet() *Set {
+ return &Set{
+ entries: digestEntries{},
+ }
+}
+
+// checkShortMatch checks whether two digests match as either whole
+// values or short values. This function does not test equality,
+// rather whether the second value could match against the first
+// value.
+func checkShortMatch(alg digest.Algorithm, hex, shortAlg, shortHex string) bool {
+ if len(hex) == len(shortHex) {
+ if hex != shortHex {
+ return false
+ }
+ if len(shortAlg) > 0 && string(alg) != shortAlg {
+ return false
+ }
+ } else if !strings.HasPrefix(hex, shortHex) {
+ return false
+ } else if len(shortAlg) > 0 && string(alg) != shortAlg {
+ return false
+ }
+ return true
+}
+
+// Lookup looks for a digest matching the given string representation.
+// If no digests could be found ErrDigestNotFound will be returned
+// with an empty digest value. If multiple matches are found
+// ErrDigestAmbiguous will be returned with an empty digest value.
+func (dst *Set) Lookup(d string) (digest.Digest, error) {
+ dst.mutex.RLock()
+ defer dst.mutex.RUnlock()
+ if len(dst.entries) == 0 {
+ return "", ErrDigestNotFound
+ }
+ var (
+ searchFunc func(int) bool
+ alg digest.Algorithm
+ hex string
+ )
+ dgst, err := digest.Parse(d)
+ if err == digest.ErrDigestInvalidFormat {
+ hex = d
+ searchFunc = func(i int) bool {
+ return dst.entries[i].val >= d
+ }
+ } else {
+ hex = dgst.Hex()
+ alg = dgst.Algorithm()
+ searchFunc = func(i int) bool {
+ if dst.entries[i].val == hex {
+ return dst.entries[i].alg >= alg
+ }
+ return dst.entries[i].val >= hex
+ }
+ }
+ idx := sort.Search(len(dst.entries), searchFunc)
+ if idx == len(dst.entries) || !checkShortMatch(dst.entries[idx].alg, dst.entries[idx].val, string(alg), hex) {
+ return "", ErrDigestNotFound
+ }
+ if dst.entries[idx].alg == alg && dst.entries[idx].val == hex {
+ return dst.entries[idx].digest, nil
+ }
+ if idx+1 < len(dst.entries) && checkShortMatch(dst.entries[idx+1].alg, dst.entries[idx+1].val, string(alg), hex) {
+ return "", ErrDigestAmbiguous
+ }
+
+ return dst.entries[idx].digest, nil
+}
+
+// Add adds the given digest to the set. An error will be returned
+// if the given digest is invalid. If the digest already exists in the
+// set, this operation will be a no-op.
+func (dst *Set) Add(d digest.Digest) error {
+ if err := d.Validate(); err != nil {
+ return err
+ }
+ dst.mutex.Lock()
+ defer dst.mutex.Unlock()
+ entry := &digestEntry{alg: d.Algorithm(), val: d.Hex(), digest: d}
+ searchFunc := func(i int) bool {
+ if dst.entries[i].val == entry.val {
+ return dst.entries[i].alg >= entry.alg
+ }
+ return dst.entries[i].val >= entry.val
+ }
+ idx := sort.Search(len(dst.entries), searchFunc)
+ if idx == len(dst.entries) {
+ dst.entries = append(dst.entries, entry)
+ return nil
+ } else if dst.entries[idx].digest == d {
+ return nil
+ }
+
+ entries := append(dst.entries, nil)
+ copy(entries[idx+1:], entries[idx:len(entries)-1])
+ entries[idx] = entry
+ dst.entries = entries
+ return nil
+}
+
+// Remove removes the given digest from the set. An err will be
+// returned if the given digest is invalid. If the digest does
+// not exist in the set, this operation will be a no-op.
+func (dst *Set) Remove(d digest.Digest) error {
+ if err := d.Validate(); err != nil {
+ return err
+ }
+ dst.mutex.Lock()
+ defer dst.mutex.Unlock()
+ entry := &digestEntry{alg: d.Algorithm(), val: d.Hex(), digest: d}
+ searchFunc := func(i int) bool {
+ if dst.entries[i].val == entry.val {
+ return dst.entries[i].alg >= entry.alg
+ }
+ return dst.entries[i].val >= entry.val
+ }
+ idx := sort.Search(len(dst.entries), searchFunc)
+ // Not found if idx is after or value at idx is not digest
+ if idx == len(dst.entries) || dst.entries[idx].digest != d {
+ return nil
+ }
+
+ entries := dst.entries
+ copy(entries[idx:], entries[idx+1:])
+ entries = entries[:len(entries)-1]
+ dst.entries = entries
+
+ return nil
+}
+
+// All returns all the digests in the set
+func (dst *Set) All() []digest.Digest {
+ dst.mutex.RLock()
+ defer dst.mutex.RUnlock()
+ retValues := make([]digest.Digest, len(dst.entries))
+ for i := range dst.entries {
+ retValues[i] = dst.entries[i].digest
+ }
+
+ return retValues
+}
+
+// ShortCodeTable returns a map of Digest to unique short codes. The
+// length represents the minimum value, the maximum length may be the
+// entire value of digest if uniqueness cannot be achieved without the
+// full value. This function will attempt to make short codes as short
+// as possible to be unique.
+func ShortCodeTable(dst *Set, length int) map[digest.Digest]string {
+ dst.mutex.RLock()
+ defer dst.mutex.RUnlock()
+ m := make(map[digest.Digest]string, len(dst.entries))
+ l := length
+ resetIdx := 0
+ for i := 0; i < len(dst.entries); i++ {
+ var short string
+ extended := true
+ for extended {
+ extended = false
+ if len(dst.entries[i].val) <= l {
+ short = dst.entries[i].digest.String()
+ } else {
+ short = dst.entries[i].val[:l]
+ for j := i + 1; j < len(dst.entries); j++ {
+ if checkShortMatch(dst.entries[j].alg, dst.entries[j].val, "", short) {
+ if j > resetIdx {
+ resetIdx = j
+ }
+ extended = true
+ } else {
+ break
+ }
+ }
+ if extended {
+ l++
+ }
+ }
+ }
+ m[dst.entries[i].digest] = short
+ if i >= resetIdx {
+ l = length
+ }
+ }
+ return m
+}
+
+type digestEntry struct {
+ alg digest.Algorithm
+ val string
+ digest digest.Digest
+}
+
+type digestEntries []*digestEntry
+
+func (d digestEntries) Len() int {
+ return len(d)
+}
+
+func (d digestEntries) Less(i, j int) bool {
+ if d[i].val != d[j].val {
+ return d[i].val < d[j].val
+ }
+ return d[i].alg < d[j].alg
+}
+
+func (d digestEntries) Swap(i, j int) {
+ d[i], d[j] = d[j], d[i]
+}
diff --git a/unum/vendor/github.com/docker/distribution/reference/helpers.go b/unum/vendor/github.com/docker/distribution/reference/helpers.go
new file mode 100644
index 0000000..978df7e
--- /dev/null
+++ b/unum/vendor/github.com/docker/distribution/reference/helpers.go
@@ -0,0 +1,42 @@
+package reference
+
+import "path"
+
+// IsNameOnly returns true if reference only contains a repo name.
+func IsNameOnly(ref Named) bool {
+ if _, ok := ref.(NamedTagged); ok {
+ return false
+ }
+ if _, ok := ref.(Canonical); ok {
+ return false
+ }
+ return true
+}
+
+// FamiliarName returns the familiar name string
+// for the given named, familiarizing if needed.
+func FamiliarName(ref Named) string {
+ if nn, ok := ref.(normalizedNamed); ok {
+ return nn.Familiar().Name()
+ }
+ return ref.Name()
+}
+
+// FamiliarString returns the familiar string representation
+// for the given reference, familiarizing if needed.
+func FamiliarString(ref Reference) string {
+ if nn, ok := ref.(normalizedNamed); ok {
+ return nn.Familiar().String()
+ }
+ return ref.String()
+}
+
+// FamiliarMatch reports whether ref matches the specified pattern.
+// See https://godoc.org/path#Match for supported patterns.
+func FamiliarMatch(pattern string, ref Reference) (bool, error) {
+ matched, err := path.Match(pattern, FamiliarString(ref))
+ if namedRef, isNamed := ref.(Named); isNamed && !matched {
+ matched, _ = path.Match(pattern, FamiliarName(namedRef))
+ }
+ return matched, err
+}
diff --git a/unum/vendor/github.com/docker/distribution/reference/normalize.go b/unum/vendor/github.com/docker/distribution/reference/normalize.go
new file mode 100644
index 0000000..2d71fc5
--- /dev/null
+++ b/unum/vendor/github.com/docker/distribution/reference/normalize.go
@@ -0,0 +1,170 @@
+package reference
+
+import (
+ "errors"
+ "fmt"
+ "strings"
+
+ "github.com/docker/distribution/digestset"
+ "github.com/opencontainers/go-digest"
+)
+
+var (
+ legacyDefaultDomain = "index.docker.io"
+ defaultDomain = "docker.io"
+ officialRepoName = "library"
+ defaultTag = "latest"
+)
+
+// normalizedNamed represents a name which has been
+// normalized and has a familiar form. A familiar name
+// is what is used in Docker UI. An example normalized
+// name is "docker.io/library/ubuntu" and corresponding
+// familiar name of "ubuntu".
+type normalizedNamed interface {
+ Named
+ Familiar() Named
+}
+
+// ParseNormalizedNamed parses a string into a named reference
+// transforming a familiar name from Docker UI to a fully
+// qualified reference. If the value may be an identifier
+// use ParseAnyReference.
+func ParseNormalizedNamed(s string) (Named, error) {
+ if ok := anchoredIdentifierRegexp.MatchString(s); ok {
+ return nil, fmt.Errorf("invalid repository name (%s), cannot specify 64-byte hexadecimal strings", s)
+ }
+ domain, remainder := splitDockerDomain(s)
+ var remoteName string
+ if tagSep := strings.IndexRune(remainder, ':'); tagSep > -1 {
+ remoteName = remainder[:tagSep]
+ } else {
+ remoteName = remainder
+ }
+ if strings.ToLower(remoteName) != remoteName {
+ return nil, errors.New("invalid reference format: repository name must be lowercase")
+ }
+
+ ref, err := Parse(domain + "/" + remainder)
+ if err != nil {
+ return nil, err
+ }
+ named, isNamed := ref.(Named)
+ if !isNamed {
+ return nil, fmt.Errorf("reference %s has no name", ref.String())
+ }
+ return named, nil
+}
+
+// splitDockerDomain splits a repository name to domain and remotename string.
+// If no valid domain is found, the default domain is used. Repository name
+// needs to be already validated before.
+func splitDockerDomain(name string) (domain, remainder string) {
+ i := strings.IndexRune(name, '/')
+ if i == -1 || (!strings.ContainsAny(name[:i], ".:") && name[:i] != "localhost") {
+ domain, remainder = defaultDomain, name
+ } else {
+ domain, remainder = name[:i], name[i+1:]
+ }
+ if domain == legacyDefaultDomain {
+ domain = defaultDomain
+ }
+ if domain == defaultDomain && !strings.ContainsRune(remainder, '/') {
+ remainder = officialRepoName + "/" + remainder
+ }
+ return
+}
+
+// familiarizeName returns a shortened version of the name familiar
+// to to the Docker UI. Familiar names have the default domain
+// "docker.io" and "library/" repository prefix removed.
+// For example, "docker.io/library/redis" will have the familiar
+// name "redis" and "docker.io/dmcgowan/myapp" will be "dmcgowan/myapp".
+// Returns a familiarized named only reference.
+func familiarizeName(named namedRepository) repository {
+ repo := repository{
+ domain: named.Domain(),
+ path: named.Path(),
+ }
+
+ if repo.domain == defaultDomain {
+ repo.domain = ""
+ // Handle official repositories which have the pattern "library/<official repo name>"
+ if split := strings.Split(repo.path, "/"); len(split) == 2 && split[0] == officialRepoName {
+ repo.path = split[1]
+ }
+ }
+ return repo
+}
+
+func (r reference) Familiar() Named {
+ return reference{
+ namedRepository: familiarizeName(r.namedRepository),
+ tag: r.tag,
+ digest: r.digest,
+ }
+}
+
+func (r repository) Familiar() Named {
+ return familiarizeName(r)
+}
+
+func (t taggedReference) Familiar() Named {
+ return taggedReference{
+ namedRepository: familiarizeName(t.namedRepository),
+ tag: t.tag,
+ }
+}
+
+func (c canonicalReference) Familiar() Named {
+ return canonicalReference{
+ namedRepository: familiarizeName(c.namedRepository),
+ digest: c.digest,
+ }
+}
+
+// TagNameOnly adds the default tag "latest" to a reference if it only has
+// a repo name.
+func TagNameOnly(ref Named) Named {
+ if IsNameOnly(ref) {
+ namedTagged, err := WithTag(ref, defaultTag)
+ if err != nil {
+ // Default tag must be valid, to create a NamedTagged
+ // type with non-validated input the WithTag function
+ // should be used instead
+ panic(err)
+ }
+ return namedTagged
+ }
+ return ref
+}
+
+// ParseAnyReference parses a reference string as a possible identifier,
+// full digest, or familiar name.
+func ParseAnyReference(ref string) (Reference, error) {
+ if ok := anchoredIdentifierRegexp.MatchString(ref); ok {
+ return digestReference("sha256:" + ref), nil
+ }
+ if dgst, err := digest.Parse(ref); err == nil {
+ return digestReference(dgst), nil
+ }
+
+ return ParseNormalizedNamed(ref)
+}
+
+// ParseAnyReferenceWithSet parses a reference string as a possible short
+// identifier to be matched in a digest set, a full digest, or familiar name.
+func ParseAnyReferenceWithSet(ref string, ds *digestset.Set) (Reference, error) {
+ if ok := anchoredShortIdentifierRegexp.MatchString(ref); ok {
+ dgst, err := ds.Lookup(ref)
+ if err == nil {
+ return digestReference(dgst), nil
+ }
+ } else {
+ if dgst, err := digest.Parse(ref); err == nil {
+ return digestReference(dgst), nil
+ }
+ }
+
+ return ParseNormalizedNamed(ref)
+}
diff --git a/unum/vendor/github.com/docker/distribution/reference/reference.go b/unum/vendor/github.com/docker/distribution/reference/reference.go
new file mode 100644
index 0000000..2f66cca
--- /dev/null
+++ b/unum/vendor/github.com/docker/distribution/reference/reference.go
@@ -0,0 +1,433 @@
+// Package reference provides a general type to represent any way of referencing images within the registry.
+// Its main purpose is to abstract tags and digests (content-addressable hash).
+//
+// Grammar
+//
+// reference := name [ ":" tag ] [ "@" digest ]
+// name := [domain '/'] path-component ['/' path-component]*
+// domain := domain-component ['.' domain-component]* [':' port-number]
+// domain-component := /([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])/
+// port-number := /[0-9]+/
+// path-component := alpha-numeric [separator alpha-numeric]*
+// alpha-numeric := /[a-z0-9]+/
+// separator := /[_.]|__|[-]*/
+//
+// tag := /[\w][\w.-]{0,127}/
+//
+// digest := digest-algorithm ":" digest-hex
+// digest-algorithm := digest-algorithm-component [ digest-algorithm-separator digest-algorithm-component ]*
+// digest-algorithm-separator := /[+.-_]/
+// digest-algorithm-component := /[A-Za-z][A-Za-z0-9]*/
+// digest-hex := /[0-9a-fA-F]{32,}/ ; At least 128 bit digest value
+//
+// identifier := /[a-f0-9]{64}/
+// short-identifier := /[a-f0-9]{6,64}/
+package reference
+
+import (
+ "errors"
+ "fmt"
+ "strings"
+
+ "github.com/opencontainers/go-digest"
+)
+
+const (
+ // NameTotalLengthMax is the maximum total number of characters in a repository name.
+ NameTotalLengthMax = 255
+)
+
+var (
+ // ErrReferenceInvalidFormat represents an error while trying to parse a string as a reference.
+ ErrReferenceInvalidFormat = errors.New("invalid reference format")
+
+ // ErrTagInvalidFormat represents an error while trying to parse a string as a tag.
+ ErrTagInvalidFormat = errors.New("invalid tag format")
+
+ // ErrDigestInvalidFormat represents an error while trying to parse a string as a tag.
+ ErrDigestInvalidFormat = errors.New("invalid digest format")
+
+ // ErrNameContainsUppercase is returned for invalid repository names that contain uppercase characters.
+ ErrNameContainsUppercase = errors.New("repository name must be lowercase")
+
+ // ErrNameEmpty is returned for empty, invalid repository names.
+ ErrNameEmpty = errors.New("repository name must have at least one component")
+
+ // ErrNameTooLong is returned when a repository name is longer than NameTotalLengthMax.
+ ErrNameTooLong = fmt.Errorf("repository name must not be more than %v characters", NameTotalLengthMax)
+
+ // ErrNameNotCanonical is returned when a name is not canonical.
+ ErrNameNotCanonical = errors.New("repository name must be canonical")
+)
+
+// Reference is an opaque object reference identifier that may include
+// modifiers such as a hostname, name, tag, and digest.
+type Reference interface {
+ // String returns the full reference
+ String() string
+}
+
+// Field provides a wrapper type for resolving correct reference types when
+// working with encoding.
+type Field struct {
+ reference Reference
+}
+
+// AsField wraps a reference in a Field for encoding.
+func AsField(reference Reference) Field {
+ return Field{reference}
+}
+
+// Reference unwraps the reference type from the field to
+// return the Reference object. This object should be
+// of the appropriate type to further check for different
+// reference types.
+func (f Field) Reference() Reference {
+ return f.reference
+}
+
+// MarshalText serializes the field to byte text which
+// is the string of the reference.
+func (f Field) MarshalText() (p []byte, err error) {
+ return []byte(f.reference.String()), nil
+}
+
+// UnmarshalText parses text bytes by invoking the
+// reference parser to ensure the appropriately
+// typed reference object is wrapped by field.
+func (f *Field) UnmarshalText(p []byte) error {
+ r, err := Parse(string(p))
+ if err != nil {
+ return err
+ }
+
+ f.reference = r
+ return nil
+}
+
+// Named is an object with a full name
+type Named interface {
+ Reference
+ Name() string
+}
+
+// Tagged is an object which has a tag
+type Tagged interface {
+ Reference
+ Tag() string
+}
+
+// NamedTagged is an object including a name and tag.
+type NamedTagged interface {
+ Named
+ Tag() string
+}
+
+// Digested is an object which has a digest
+// in which it can be referenced by
+type Digested interface {
+ Reference
+ Digest() digest.Digest
+}
+
+// Canonical reference is an object with a fully unique
+// name including a name with domain and digest
+type Canonical interface {
+ Named
+ Digest() digest.Digest
+}
+
+// namedRepository is a reference to a repository with a name.
+// A namedRepository has both domain and path components.
+type namedRepository interface {
+ Named
+ Domain() string
+ Path() string
+}
+
+// Domain returns the domain part of the Named reference
+func Domain(named Named) string {
+ if r, ok := named.(namedRepository); ok {
+ return r.Domain()
+ }
+ domain, _ := splitDomain(named.Name())
+ return domain
+}
+
+// Path returns the name without the domain part of the Named reference
+func Path(named Named) (name string) {
+ if r, ok := named.(namedRepository); ok {
+ return r.Path()
+ }
+ _, path := splitDomain(named.Name())
+ return path
+}
+
+func splitDomain(name string) (string, string) {
+ match := anchoredNameRegexp.FindStringSubmatch(name)
+ if len(match) != 3 {
+ return "", name
+ }
+ return match[1], match[2]
+}
+
+// SplitHostname splits a named reference into a
+// hostname and name string. If no valid hostname is
+// found, the hostname is empty and the full value
+// is returned as name
+// DEPRECATED: Use Domain or Path
+func SplitHostname(named Named) (string, string) {
+ if r, ok := named.(namedRepository); ok {
+ return r.Domain(), r.Path()
+ }
+ return splitDomain(named.Name())
+}
+
+// Parse parses s and returns a syntactically valid Reference.
+// If an error was encountered it is returned, along with a nil Reference.
+// NOTE: Parse will not handle short digests.
+func Parse(s string) (Reference, error) {
+ matches := ReferenceRegexp.FindStringSubmatch(s)
+ if matches == nil {
+ if s == "" {
+ return nil, ErrNameEmpty
+ }
+ if ReferenceRegexp.FindStringSubmatch(strings.ToLower(s)) != nil {
+ return nil, ErrNameContainsUppercase
+ }
+ return nil, ErrReferenceInvalidFormat
+ }
+
+ if len(matches[1]) > NameTotalLengthMax {
+ return nil, ErrNameTooLong
+ }
+
+ var repo repository
+
+ nameMatch := anchoredNameRegexp.FindStringSubmatch(matches[1])
+ if nameMatch != nil && len(nameMatch) == 3 {
+ repo.domain = nameMatch[1]
+ repo.path = nameMatch[2]
+ } else {
+ repo.domain = ""
+ repo.path = matches[1]
+ }
+
+ ref := reference{
+ namedRepository: repo,
+ tag: matches[2],
+ }
+ if matches[3] != "" {
+ var err error
+ ref.digest, err = digest.Parse(matches[3])
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ r := getBestReferenceType(ref)
+ if r == nil {
+ return nil, ErrNameEmpty
+ }
+
+ return r, nil
+}
+
+// ParseNamed parses s and returns a syntactically valid reference implementing
+// the Named interface. The reference must have a name and be in the canonical
+// form, otherwise an error is returned.
+// If an error was encountered it is returned, along with a nil Reference.
+// NOTE: ParseNamed will not handle short digests.
+func ParseNamed(s string) (Named, error) {
+ named, err := ParseNormalizedNamed(s)
+ if err != nil {
+ return nil, err
+ }
+ if named.String() != s {
+ return nil, ErrNameNotCanonical
+ }
+ return named, nil
+}
+
+// WithName returns a named object representing the given string. If the input
+// is invalid ErrReferenceInvalidFormat will be returned.
+func WithName(name string) (Named, error) {
+ if len(name) > NameTotalLengthMax {
+ return nil, ErrNameTooLong
+ }
+
+ match := anchoredNameRegexp.FindStringSubmatch(name)
+ if match == nil || len(match) != 3 {
+ return nil, ErrReferenceInvalidFormat
+ }
+ return repository{
+ domain: match[1],
+ path: match[2],
+ }, nil
+}
+
+// WithTag combines the name from "name" and the tag from "tag" to form a
+// reference incorporating both the name and the tag.
+func WithTag(name Named, tag string) (NamedTagged, error) {
+ if !anchoredTagRegexp.MatchString(tag) {
+ return nil, ErrTagInvalidFormat
+ }
+ var repo repository
+ if r, ok := name.(namedRepository); ok {
+ repo.domain = r.Domain()
+ repo.path = r.Path()
+ } else {
+ repo.path = name.Name()
+ }
+ if canonical, ok := name.(Canonical); ok {
+ return reference{
+ namedRepository: repo,
+ tag: tag,
+ digest: canonical.Digest(),
+ }, nil
+ }
+ return taggedReference{
+ namedRepository: repo,
+ tag: tag,
+ }, nil
+}
+
+// WithDigest combines the name from "name" and the digest from "digest" to form
+// a reference incorporating both the name and the digest.
+func WithDigest(name Named, digest digest.Digest) (Canonical, error) {
+ if !anchoredDigestRegexp.MatchString(digest.String()) {
+ return nil, ErrDigestInvalidFormat
+ }
+ var repo repository
+ if r, ok := name.(namedRepository); ok {
+ repo.domain = r.Domain()
+ repo.path = r.Path()
+ } else {
+ repo.path = name.Name()
+ }
+ if tagged, ok := name.(Tagged); ok {
+ return reference{
+ namedRepository: repo,
+ tag: tagged.Tag(),
+ digest: digest,
+ }, nil
+ }
+ return canonicalReference{
+ namedRepository: repo,
+ digest: digest,
+ }, nil
+}
+
+// TrimNamed removes any tag or digest from the named reference.
+func TrimNamed(ref Named) Named {
+ domain, path := SplitHostname(ref)
+ return repository{
+ domain: domain,
+ path: path,
+ }
+}
+
+func getBestReferenceType(ref reference) Reference {
+ if ref.Name() == "" {
+ // Allow digest only references
+ if ref.digest != "" {
+ return digestReference(ref.digest)
+ }
+ return nil
+ }
+ if ref.tag == "" {
+ if ref.digest != "" {
+ return canonicalReference{
+ namedRepository: ref.namedRepository,
+ digest: ref.digest,
+ }
+ }
+ return ref.namedRepository
+ }
+ if ref.digest == "" {
+ return taggedReference{
+ namedRepository: ref.namedRepository,
+ tag: ref.tag,
+ }
+ }
+
+ return ref
+}
+
+type reference struct {
+ namedRepository
+ tag string
+ digest digest.Digest
+}
+
+func (r reference) String() string {
+ return r.Name() + ":" + r.tag + "@" + r.digest.String()
+}
+
+func (r reference) Tag() string {
+ return r.tag
+}
+
+func (r reference) Digest() digest.Digest {
+ return r.digest
+}
+
+type repository struct {
+ domain string
+ path string
+}
+
+func (r repository) String() string {
+ return r.Name()
+}
+
+func (r repository) Name() string {
+ if r.domain == "" {
+ return r.path
+ }
+ return r.domain + "/" + r.path
+}
+
+func (r repository) Domain() string {
+ return r.domain
+}
+
+func (r repository) Path() string {
+ return r.path
+}
+
+type digestReference digest.Digest
+
+func (d digestReference) String() string {
+ return digest.Digest(d).String()
+}
+
+func (d digestReference) Digest() digest.Digest {
+ return digest.Digest(d)
+}
+
+type taggedReference struct {
+ namedRepository
+ tag string
+}
+
+func (t taggedReference) String() string {
+ return t.Name() + ":" + t.tag
+}
+
+func (t taggedReference) Tag() string {
+ return t.tag
+}
+
+type canonicalReference struct {
+ namedRepository
+ digest digest.Digest
+}
+
+func (c canonicalReference) String() string {
+ return c.Name() + "@" + c.digest.String()
+}
+
+func (c canonicalReference) Digest() digest.Digest {
+ return c.digest
+}
diff --git a/unum/vendor/github.com/docker/distribution/reference/regexp.go b/unum/vendor/github.com/docker/distribution/reference/regexp.go
new file mode 100644
index 0000000..7860349
--- /dev/null
+++ b/unum/vendor/github.com/docker/distribution/reference/regexp.go
@@ -0,0 +1,143 @@
+package reference
+
+import "regexp"
+
+var (
+ // alphaNumericRegexp defines the alpha numeric atom, typically a
+ // component of names. This only allows lower case characters and digits.
+ alphaNumericRegexp = match(`[a-z0-9]+`)
+
+ // separatorRegexp defines the separators allowed to be embedded in name
+ // components. This allow one period, one or two underscore and multiple
+ // dashes.
+ separatorRegexp = match(`(?:[._]|__|[-]*)`)
+
+ // nameComponentRegexp restricts registry path component names to start
+ // with at least one letter or number, with following parts able to be
+ // separated by one period, one or two underscore and multiple dashes.
+ nameComponentRegexp = expression(
+ alphaNumericRegexp,
+ optional(repeated(separatorRegexp, alphaNumericRegexp)))
+
+ // domainComponentRegexp restricts the registry domain component of a
+ // repository name to start with a component as defined by DomainRegexp
+ // and followed by an optional port.
+ domainComponentRegexp = match(`(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])`)
+
+ // DomainRegexp defines the structure of potential domain components
+ // that may be part of image names. This is purposely a subset of what is
+ // allowed by DNS to ensure backwards compatibility with Docker image
+ // names.
+ DomainRegexp = expression(
+ domainComponentRegexp,
+ optional(repeated(literal(`.`), domainComponentRegexp)),
+ optional(literal(`:`), match(`[0-9]+`)))
+
+ // TagRegexp matches valid tag names. From docker/docker:graph/tags.go.
+ TagRegexp = match(`[\w][\w.-]{0,127}`)
+
+ // anchoredTagRegexp matches valid tag names, anchored at the start and
+ // end of the matched string.
+ anchoredTagRegexp = anchored(TagRegexp)
+
+ // DigestRegexp matches valid digests.
+ DigestRegexp = match(`[A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][[:xdigit:]]{32,}`)
+
+ // anchoredDigestRegexp matches valid digests, anchored at the start and
+ // end of the matched string.
+ anchoredDigestRegexp = anchored(DigestRegexp)
+
+ // NameRegexp is the format for the name component of references. The
+ // regexp has capturing groups for the domain and name part omitting
+ // the separating forward slash from either.
+ NameRegexp = expression(
+ optional(DomainRegexp, literal(`/`)),
+ nameComponentRegexp,
+ optional(repeated(literal(`/`), nameComponentRegexp)))
+
+ // anchoredNameRegexp is used to parse a name value, capturing the
+ // domain and trailing components.
+ anchoredNameRegexp = anchored(
+ optional(capture(DomainRegexp), literal(`/`)),
+ capture(nameComponentRegexp,
+ optional(repeated(literal(`/`), nameComponentRegexp))))
+
+ // ReferenceRegexp is the full supported format of a reference. The regexp
+ // is anchored and has capturing groups for name, tag, and digest
+ // components.
+ ReferenceRegexp = anchored(capture(NameRegexp),
+ optional(literal(":"), capture(TagRegexp)),
+ optional(literal("@"), capture(DigestRegexp)))
+
+ // IdentifierRegexp is the format for string identifier used as a
+ // content addressable identifier using sha256. These identifiers
+ // are like digests without the algorithm, since sha256 is used.
+ IdentifierRegexp = match(`([a-f0-9]{64})`)
+
+ // ShortIdentifierRegexp is the format used to represent a prefix
+ // of an identifier. A prefix may be used to match a sha256 identifier
+ // within a list of trusted identifiers.
+ ShortIdentifierRegexp = match(`([a-f0-9]{6,64})`)
+
+ // anchoredIdentifierRegexp is used to check or match an
+ // identifier value, anchored at start and end of string.
+ anchoredIdentifierRegexp = anchored(IdentifierRegexp)
+
+ // anchoredShortIdentifierRegexp is used to check if a value
+ // is a possible identifier prefix, anchored at start and end
+ // of string.
+ anchoredShortIdentifierRegexp = anchored(ShortIdentifierRegexp)
+)
+
+// match compiles the string to a regular expression.
+var match = regexp.MustCompile
+
+// literal compiles s into a literal regular expression, escaping any regexp
+// reserved characters.
+func literal(s string) *regexp.Regexp {
+ re := match(regexp.QuoteMeta(s))
+
+ if _, complete := re.LiteralPrefix(); !complete {
+ panic("must be a literal")
+ }
+
+ return re
+}
+
+// expression defines a full expression, where each regular expression must
+// follow the previous.
+func expression(res ...*regexp.Regexp) *regexp.Regexp {
+ var s string
+ for _, re := range res {
+ s += re.String()
+ }
+
+ return match(s)
+}
+
+// optional wraps the expression in a non-capturing group and makes the
+// production optional.
+func optional(res ...*regexp.Regexp) *regexp.Regexp {
+ return match(group(expression(res...)).String() + `?`)
+}
+
+// repeated wraps the regexp in a non-capturing group to get one or more
+// matches.
+func repeated(res ...*regexp.Regexp) *regexp.Regexp {
+ return match(group(expression(res...)).String() + `+`)
+}
+
+// group wraps the regexp in a non-capturing group.
+func group(res ...*regexp.Regexp) *regexp.Regexp {
+ return match(`(?:` + expression(res...).String() + `)`)
+}
+
+// capture wraps the expression in a capturing group.
+func capture(res ...*regexp.Regexp) *regexp.Regexp {
+ return match(`(` + expression(res...).String() + `)`)
+}
+
+// anchored anchors the regular expression by adding start and end delimiters.
+func anchored(res ...*regexp.Regexp) *regexp.Regexp {
+ return match(`^` + expression(res...).String() + `$`)
+}
diff --git a/unum/vendor/github.com/docker/docker/LICENSE b/unum/vendor/github.com/docker/docker/LICENSE
new file mode 100644
index 0000000..9c8e20a
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/LICENSE
@@ -0,0 +1,191 @@
+
+ Apache License
+ Version 2.0, January 2004
+ https://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ Copyright 2013-2017 Docker, Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ https://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/unum/vendor/github.com/docker/docker/NOTICE b/unum/vendor/github.com/docker/docker/NOTICE
new file mode 100644
index 0000000..0c74e15
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/NOTICE
@@ -0,0 +1,19 @@
+Docker
+Copyright 2012-2017 Docker, Inc.
+
+This product includes software developed at Docker, Inc. (https://www.docker.com).
+
+This product contains software (https://github.com/kr/pty) developed
+by Keith Rarick, licensed under the MIT License.
+
+The following is courtesy of our legal counsel:
+
+
+Use and transfer of Docker may be subject to certain restrictions by the
+United States and other governments.
+It is your responsibility to ensure that your use and/or transfer does not
+violate applicable laws.
+
+For more information, please see https://www.bis.doc.gov
+
+See also https://www.apache.org/dev/crypto.html and/or seek legal counsel.
diff --git a/unum/vendor/github.com/docker/docker/api/README.md b/unum/vendor/github.com/docker/docker/api/README.md
new file mode 100644
index 0000000..bb88132
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/api/README.md
@@ -0,0 +1,42 @@
+# Working on the Engine API
+
+The Engine API is an HTTP API used by the command-line client to communicate with the daemon. It can also be used by third-party software to control the daemon.
+
+It consists of various components in this repository:
+
+- `api/swagger.yaml` A Swagger definition of the API.
+- `api/types/` Types shared by both the client and server, representing various objects, options, responses, etc. Most are written manually, but some are automatically generated from the Swagger definition. See [#27919](https://github.com/docker/docker/issues/27919) for progress on this.
+- `cli/` The command-line client.
+- `client/` The Go client used by the command-line client. It can also be used by third-party Go programs.
+- `daemon/` The daemon, which serves the API.
+
+## Swagger definition
+
+The API is defined by the [Swagger](http://swagger.io/specification/) definition in `api/swagger.yaml`. This definition can be used to:
+
+1. Automatically generate documentation.
+2. Automatically generate the Go server and client. (A work-in-progress.)
+3. Provide a machine readable version of the API for introspecting what it can do, automatically generating clients for other languages, etc.
+
+## Updating the API documentation
+
+The API documentation is generated entirely from `api/swagger.yaml`. If you make updates to the API, you'll need to edit this file to represent the change in the documentation.
+
+The file is split into two main sections:
+
+- `definitions`, which defines re-usable objects used in requests and responses
+- `paths`, which defines the API endpoints (and some inline objects which don't need to be reusable)
+
+To make an edit, first look for the endpoint you want to edit under `paths`, then make the required edits. Endpoints may reference reusable objects with `$ref`, which can be found in the `definitions` section.
+
+There is hopefully enough example material in the file for you to copy a similar pattern from elsewhere in the file (e.g. adding new fields or endpoints), but for the full reference, see the [Swagger specification](https://github.com/docker/docker/issues/27919)
+
+`swagger.yaml` is validated by `hack/validate/swagger` to ensure it is a valid Swagger definition. This is useful for when you are making edits to ensure you are doing the right thing.
+
+## Viewing the API documentation
+
+When you make edits to `swagger.yaml`, you may want to check the generated API documentation to ensure it renders correctly.
+
+Run `make swagger-docs` and a preview will be running at `http://localhost`. Some of the styling may be incorrect, but you'll be able to ensure that it is generating the correct documentation.
+
+The production documentation is generated by vendoring `swagger.yaml` into [docker/docker.github.io](https://github.com/docker/docker.github.io).
diff --git a/unum/vendor/github.com/docker/docker/api/common.go b/unum/vendor/github.com/docker/docker/api/common.go
new file mode 100644
index 0000000..6e462ae
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/api/common.go
@@ -0,0 +1,65 @@
+package api
+
+import (
+ "encoding/json"
+ "encoding/pem"
+ "fmt"
+ "os"
+ "path/filepath"
+
+ "github.com/docker/docker/pkg/ioutils"
+ "github.com/docker/docker/pkg/system"
+ "github.com/docker/libtrust"
+)
+
+// Common constants for daemon and client.
+const (
+ // DefaultVersion of Current REST API
+ DefaultVersion string = "1.32"
+
+ // NoBaseImageSpecifier is the symbol used by the FROM
+ // command to specify that no base image is to be used.
+ NoBaseImageSpecifier string = "scratch"
+)
+
+// LoadOrCreateTrustKey attempts to load the libtrust key at the given path,
+// otherwise generates a new one
+func LoadOrCreateTrustKey(trustKeyPath string) (libtrust.PrivateKey, error) {
+ err := system.MkdirAll(filepath.Dir(trustKeyPath), 0700, "")
+ if err != nil {
+ return nil, err
+ }
+ trustKey, err := libtrust.LoadKeyFile(trustKeyPath)
+ if err == libtrust.ErrKeyFileDoesNotExist {
+ trustKey, err = libtrust.GenerateECP256PrivateKey()
+ if err != nil {
+ return nil, fmt.Errorf("Error generating key: %s", err)
+ }
+ encodedKey, err := serializePrivateKey(trustKey, filepath.Ext(trustKeyPath))
+ if err != nil {
+ return nil, fmt.Errorf("Error serializing key: %s", err)
+ }
+ if err := ioutils.AtomicWriteFile(trustKeyPath, encodedKey, os.FileMode(0600)); err != nil {
+ return nil, fmt.Errorf("Error saving key file: %s", err)
+ }
+ } else if err != nil {
+ return nil, fmt.Errorf("Error loading key file %s: %s", trustKeyPath, err)
+ }
+ return trustKey, nil
+}
+
+func serializePrivateKey(key libtrust.PrivateKey, ext string) (encoded []byte, err error) {
+ if ext == ".json" || ext == ".jwk" {
+ encoded, err = json.Marshal(key)
+ if err != nil {
+ return nil, fmt.Errorf("unable to encode private key JWK: %s", err)
+ }
+ } else {
+ pemBlock, err := key.PEMBlock()
+ if err != nil {
+ return nil, fmt.Errorf("unable to encode private key PEM: %s", err)
+ }
+ encoded = pem.EncodeToMemory(pemBlock)
+ }
+ return
+}
diff --git a/unum/vendor/github.com/docker/docker/api/common_unix.go b/unum/vendor/github.com/docker/docker/api/common_unix.go
new file mode 100644
index 0000000..081e61c
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/api/common_unix.go
@@ -0,0 +1,6 @@
+// +build !windows
+
+package api
+
+// MinVersion represents Minimum REST API version supported
+const MinVersion string = "1.12"
diff --git a/unum/vendor/github.com/docker/docker/api/common_windows.go b/unum/vendor/github.com/docker/docker/api/common_windows.go
new file mode 100644
index 0000000..a6268a4
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/api/common_windows.go
@@ -0,0 +1,8 @@
+package api
+
+// MinVersion represents Minimum REST API version supported
+// Technically the first daemon API version released on Windows is v1.25 in
+// engine version 1.13. However, some clients are explicitly using downlevel
+// APIs (e.g. docker-compose v2.1 file format) and that is just too restrictive.
+// Hence also allowing 1.24 on Windows.
+const MinVersion string = "1.24"
diff --git a/unum/vendor/github.com/docker/docker/api/names.go b/unum/vendor/github.com/docker/docker/api/names.go
new file mode 100644
index 0000000..f147d1f
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/api/names.go
@@ -0,0 +1,9 @@
+package api
+
+import "regexp"
+
+// RestrictedNameChars collects the characters allowed to represent a name, normally used to validate container and volume names.
+const RestrictedNameChars = `[a-zA-Z0-9][a-zA-Z0-9_.-]`
+
+// RestrictedNamePattern is a regular expression to validate names against the collection of restricted characters.
+var RestrictedNamePattern = regexp.MustCompile(`^` + RestrictedNameChars + `+$`)
diff --git a/unum/vendor/github.com/docker/docker/api/swagger-gen.yaml b/unum/vendor/github.com/docker/docker/api/swagger-gen.yaml
new file mode 100644
index 0000000..f07a027
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/api/swagger-gen.yaml
@@ -0,0 +1,12 @@
+
+layout:
+ models:
+ - name: definition
+ source: asset:model
+ target: "{{ joinFilePath .Target .ModelPackage }}"
+ file_name: "{{ (snakize (pascalize .Name)) }}.go"
+ operations:
+ - name: handler
+ source: asset:serverOperation
+ target: "{{ joinFilePath .Target .APIPackage .Package }}"
+ file_name: "{{ (snakize (pascalize .Name)) }}.go"
diff --git a/unum/vendor/github.com/docker/docker/api/swagger.yaml b/unum/vendor/github.com/docker/docker/api/swagger.yaml
new file mode 100644
index 0000000..0c9ee95
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/api/swagger.yaml
@@ -0,0 +1,9930 @@
+# A Swagger 2.0 (a.k.a. OpenAPI) definition of the Engine API.
+#
+# This is used for generating API documentation and the types used by the
+# client/server. See api/README.md for more information.
+#
+# Some style notes:
+# - This file is used by ReDoc, which allows GitHub Flavored Markdown in
+# descriptions.
+# - There is no maximum line length, for ease of editing and pretty diffs.
+# - operationIds are in the format "NounVerb", with a singular noun.
+
+swagger: "2.0"
+schemes:
+ - "http"
+ - "https"
+produces:
+ - "application/json"
+ - "text/plain"
+consumes:
+ - "application/json"
+ - "text/plain"
+basePath: "/v1.32"
+info:
+ title: "Docker Engine API"
+ version: "1.32"
+ x-logo:
+ url: "https://docs.docker.com/images/logo-docker-main.png"
+ description: |
+ The Engine API is an HTTP API served by Docker Engine. It is the API the Docker client uses to communicate with the Engine, so everything the Docker client can do can be done with the API.
+
+ Most of the client's commands map directly to API endpoints (e.g. `docker ps` is `GET /containers/json`). The notable exception is running containers, which consists of several API calls.
+
+ # Errors
+
+ The API uses standard HTTP status codes to indicate the success or failure of the API call. The body of the response will be JSON in the following format:
+
+ ```
+ {
+ "message": "page not found"
+ }
+ ```
+
+ # Versioning
+
+ The API is usually changed in each release of Docker, so API calls are versioned to ensure that clients don't break.
+
+ For Docker Engine 17.07, the API version is 1.31. To lock to this version, you prefix the URL with `/v1.31`. For example, calling `/info` is the same as calling `/v1.31/info`.
+
+ Engine releases in the near future should support this version of the API, so your client will continue to work even if it is talking to a newer Engine.
+
+ In previous versions of Docker, it was possible to access the API without providing a version. This behaviour is now deprecated will be removed in a future version of Docker.
+
+ The API uses an open schema model, which means server may add extra properties to responses. Likewise, the server will ignore any extra query parameters and request body properties. When you write clients, you need to ignore additional properties in responses to ensure they do not break when talking to newer Docker daemons.
+
+ This documentation is for version 1.32 of the API. Use this table to find documentation for previous versions of the API:
+
+ Docker version | API version | Changes
+ ----------------|-------------|---------
+ 17.07.x | [1.31](https://docs.docker.com/engine/api/v1.31/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-31-api-changes)
+ 17.06.x | [1.30](https://docs.docker.com/engine/api/v1.30/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-30-api-changes)
+ 17.05.x | [1.29](https://docs.docker.com/engine/api/v1.29/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-29-api-changes)
+ 17.04.x | [1.28](https://docs.docker.com/engine/api/v1.28/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-28-api-changes)
+ 17.03.1 | [1.27](https://docs.docker.com/engine/api/v1.27/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-27-api-changes)
+ 1.13.1 & 17.03.0 | [1.26](https://docs.docker.com/engine/api/v1.26/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-26-api-changes)
+ 1.13.0 | [1.25](https://docs.docker.com/engine/api/v1.25/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-25-api-changes)
+ 1.12.x | [1.24](https://docs.docker.com/engine/api/v1.24/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-24-api-changes)
+ 1.11.x | [1.23](https://docs.docker.com/engine/api/v1.23/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-23-api-changes)
+ 1.10.x | [1.22](https://docs.docker.com/engine/api/v1.22/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-22-api-changes)
+ 1.9.x | [1.21](https://docs.docker.com/engine/api/v1.21/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-21-api-changes)
+ 1.8.x | [1.20](https://docs.docker.com/engine/api/v1.20/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-20-api-changes)
+ 1.7.x | [1.19](https://docs.docker.com/engine/api/v1.19/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-19-api-changes)
+ 1.6.x | [1.18](https://docs.docker.com/engine/api/v1.18/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-18-api-changes)
+
+ # Authentication
+
+ Authentication for registries is handled client side. The client has to send authentication details to various endpoints that need to communicate with registries, such as `POST /images/(name)/push`. These are sent as `X-Registry-Auth` header as a Base64 encoded (JSON) string with the following structure:
+
+ ```
+ {
+ "username": "string",
+ "password": "string",
+ "email": "string",
+ "serveraddress": "string"
+ }
+ ```
+
+ The `serveraddress` is a domain/IP without a protocol. Throughout this structure, double quotes are required.
+
+ If you have already got an identity token from the [`/auth` endpoint](#operation/SystemAuth), you can just pass this instead of credentials:
+
+ ```
+ {
+ "identitytoken": "9cbaf023786cd7..."
+ }
+ ```
+
+# The tags on paths define the menu sections in the ReDoc documentation, so
+# the usage of tags must make sense for that:
+# - They should be singular, not plural.
+# - There should not be too many tags, or the menu becomes unwieldy. For
+# example, it is preferable to add a path to the "System" tag instead of
+# creating a tag with a single path in it.
+# - The order of tags in this list defines the order in the menu.
+tags:
+ # Primary objects
+ - name: "Container"
+ x-displayName: "Containers"
+ description: |
+ Create and manage containers.
+ - name: "Image"
+ x-displayName: "Images"
+ - name: "Network"
+ x-displayName: "Networks"
+ description: |
+ Networks are user-defined networks that containers can be attached to. See the [networking documentation](https://docs.docker.com/engine/userguide/networking/) for more information.
+ - name: "Volume"
+ x-displayName: "Volumes"
+ description: |
+ Create and manage persistent storage that can be attached to containers.
+ - name: "Exec"
+ x-displayName: "Exec"
+ description: |
+ Run new commands inside running containers. See the [command-line reference](https://docs.docker.com/engine/reference/commandline/exec/) for more information.
+
+ To exec a command in a container, you first need to create an exec instance, then start it. These two API endpoints are wrapped up in a single command-line command, `docker exec`.
+ # Swarm things
+ - name: "Swarm"
+ x-displayName: "Swarm"
+ description: |
+ Engines can be clustered together in a swarm. See [the swarm mode documentation](https://docs.docker.com/engine/swarm/) for more information.
+ - name: "Node"
+ x-displayName: "Nodes"
+ description: |
+ Nodes are instances of the Engine participating in a swarm. Swarm mode must be enabled for these endpoints to work.
+ - name: "Service"
+ x-displayName: "Services"
+ description: |
+ Services are the definitions of tasks to run on a swarm. Swarm mode must be enabled for these endpoints to work.
+ - name: "Task"
+ x-displayName: "Tasks"
+ description: |
+ A task is a container running on a swarm. It is the atomic scheduling unit of swarm. Swarm mode must be enabled for these endpoints to work.
+ - name: "Secret"
+ x-displayName: "Secrets"
+ description: |
+ Secrets are sensitive data that can be used by services. Swarm mode must be enabled for these endpoints to work.
+ # System things
+ - name: "Plugin"
+ x-displayName: "Plugins"
+ - name: "System"
+ x-displayName: "System"
+
+definitions:
+ Port:
+ type: "object"
+ description: "An open port on a container"
+ required: [PrivatePort, Type]
+ properties:
+ IP:
+ type: "string"
+ format: "ip-address"
+ PrivatePort:
+ type: "integer"
+ format: "uint16"
+ x-nullable: false
+ description: "Port on the container"
+ PublicPort:
+ type: "integer"
+ format: "uint16"
+ description: "Port exposed on the host"
+ Type:
+ type: "string"
+ x-nullable: false
+ enum: ["tcp", "udp"]
+ example:
+ PrivatePort: 8080
+ PublicPort: 80
+ Type: "tcp"
+
+ MountPoint:
+ type: "object"
+ description: "A mount point inside a container"
+ properties:
+ Type:
+ type: "string"
+ Name:
+ type: "string"
+ Source:
+ type: "string"
+ Destination:
+ type: "string"
+ Driver:
+ type: "string"
+ Mode:
+ type: "string"
+ RW:
+ type: "boolean"
+ Propagation:
+ type: "string"
+
+ DeviceMapping:
+ type: "object"
+ description: "A device mapping between the host and container"
+ properties:
+ PathOnHost:
+ type: "string"
+ PathInContainer:
+ type: "string"
+ CgroupPermissions:
+ type: "string"
+ example:
+ PathOnHost: "/dev/deviceName"
+ PathInContainer: "/dev/deviceName"
+ CgroupPermissions: "mrw"
+
+ ThrottleDevice:
+ type: "object"
+ properties:
+ Path:
+ description: "Device path"
+ type: "string"
+ Rate:
+ description: "Rate"
+ type: "integer"
+ format: "int64"
+ minimum: 0
+
+ Mount:
+ type: "object"
+ properties:
+ Target:
+ description: "Container path."
+ type: "string"
+ Source:
+ description: "Mount source (e.g. a volume name, a host path)."
+ type: "string"
+ Type:
+ description: |
+ The mount type. Available types:
+
+ - `bind` Mounts a file or directory from the host into the container. Must exist prior to creating the container.
+ - `volume` Creates a volume with the given name and options (or uses a pre-existing volume with the same name and options). These are **not** removed when the container is removed.
+ - `tmpfs` Create a tmpfs with the given options. The mount source cannot be specified for tmpfs.
+ type: "string"
+ enum:
+ - "bind"
+ - "volume"
+ - "tmpfs"
+ ReadOnly:
+ description: "Whether the mount should be read-only."
+ type: "boolean"
+ Consistency:
+ description: "The consistency requirement for the mount: `default`, `consistent`, `cached`, or `delegated`."
+ type: "string"
+ BindOptions:
+ description: "Optional configuration for the `bind` type."
+ type: "object"
+ properties:
+ Propagation:
+ description: "A propagation mode with the value `[r]private`, `[r]shared`, or `[r]slave`."
+ enum:
+ - "private"
+ - "rprivate"
+ - "shared"
+ - "rshared"
+ - "slave"
+ - "rslave"
+ VolumeOptions:
+ description: "Optional configuration for the `volume` type."
+ type: "object"
+ properties:
+ NoCopy:
+ description: "Populate volume with data from the target."
+ type: "boolean"
+ default: false
+ Labels:
+ description: "User-defined key/value metadata."
+ type: "object"
+ additionalProperties:
+ type: "string"
+ DriverConfig:
+ description: "Map of driver specific options"
+ type: "object"
+ properties:
+ Name:
+ description: "Name of the driver to use to create the volume."
+ type: "string"
+ Options:
+ description: "key/value map of driver specific options."
+ type: "object"
+ additionalProperties:
+ type: "string"
+ TmpfsOptions:
+ description: "Optional configuration for the `tmpfs` type."
+ type: "object"
+ properties:
+ SizeBytes:
+ description: "The size for the tmpfs mount in bytes."
+ type: "integer"
+ format: "int64"
+ Mode:
+ description: "The permission mode for the tmpfs mount in an integer."
+ type: "integer"
+
+ RestartPolicy:
+ description: |
+ The behavior to apply when the container exits. The default is not to restart.
+
+ An ever increasing delay (double the previous delay, starting at 100ms) is added before each restart to prevent flooding the server.
+ type: "object"
+ properties:
+ Name:
+ type: "string"
+ description: |
+ - Empty string means not to restart
+ - `always` Always restart
+ - `unless-stopped` Restart always except when the user has manually stopped the container
+ - `on-failure` Restart only when the container exit code is non-zero
+ enum:
+ - ""
+ - "always"
+ - "unless-stopped"
+ - "on-failure"
+ MaximumRetryCount:
+ type: "integer"
+ description: "If `on-failure` is used, the number of times to retry before giving up"
+
+ Resources:
+ description: "A container's resources (cgroups config, ulimits, etc)"
+ type: "object"
+ properties:
+ # Applicable to all platforms
+ CpuShares:
+ description: "An integer value representing this container's relative CPU weight versus other containers."
+ type: "integer"
+ Memory:
+ description: "Memory limit in bytes."
+ type: "integer"
+ default: 0
+ # Applicable to UNIX platforms
+ CgroupParent:
+ description: "Path to `cgroups` under which the container's `cgroup` is created. If the path is not absolute, the path is considered to be relative to the `cgroups` path of the init process. Cgroups are created if they do not already exist."
+ type: "string"
+ BlkioWeight:
+ description: "Block IO weight (relative weight)."
+ type: "integer"
+ minimum: 0
+ maximum: 1000
+ BlkioWeightDevice:
+ description: |
+ Block IO weight (relative device weight) in the form `[{"Path": "device_path", "Weight": weight}]`.
+ type: "array"
+ items:
+ type: "object"
+ properties:
+ Path:
+ type: "string"
+ Weight:
+ type: "integer"
+ minimum: 0
+ BlkioDeviceReadBps:
+ description: |
+ Limit read rate (bytes per second) from a device, in the form `[{"Path": "device_path", "Rate": rate}]`.
+ type: "array"
+ items:
+ $ref: "#/definitions/ThrottleDevice"
+ BlkioDeviceWriteBps:
+ description: |
+ Limit write rate (bytes per second) to a device, in the form `[{"Path": "device_path", "Rate": rate}]`.
+ type: "array"
+ items:
+ $ref: "#/definitions/ThrottleDevice"
+ BlkioDeviceReadIOps:
+ description: |
+ Limit read rate (IO per second) from a device, in the form `[{"Path": "device_path", "Rate": rate}]`.
+ type: "array"
+ items:
+ $ref: "#/definitions/ThrottleDevice"
+ BlkioDeviceWriteIOps:
+ description: |
+ Limit write rate (IO per second) to a device, in the form `[{"Path": "device_path", "Rate": rate}]`.
+ type: "array"
+ items:
+ $ref: "#/definitions/ThrottleDevice"
+ CpuPeriod:
+ description: "The length of a CPU period in microseconds."
+ type: "integer"
+ format: "int64"
+ CpuQuota:
+ description: "Microseconds of CPU time that the container can get in a CPU period."
+ type: "integer"
+ format: "int64"
+ CpuRealtimePeriod:
+ description: "The length of a CPU real-time period in microseconds. Set to 0 to allocate no time allocated to real-time tasks."
+ type: "integer"
+ format: "int64"
+ CpuRealtimeRuntime:
+ description: "The length of a CPU real-time runtime in microseconds. Set to 0 to allocate no time allocated to real-time tasks."
+ type: "integer"
+ format: "int64"
+ CpusetCpus:
+ description: "CPUs in which to allow execution (e.g., `0-3`, `0,1`)"
+ type: "string"
+ example: "0-3"
+ CpusetMems:
+ description: "Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems."
+ type: "string"
+ Devices:
+ description: "A list of devices to add to the container."
+ type: "array"
+ items:
+ $ref: "#/definitions/DeviceMapping"
+ DeviceCgroupRules:
+ description: "a list of cgroup rules to apply to the container"
+ type: "array"
+ items:
+ type: "string"
+ example: "c 13:* rwm"
+ DiskQuota:
+ description: "Disk limit (in bytes)."
+ type: "integer"
+ format: "int64"
+ KernelMemory:
+ description: "Kernel memory limit in bytes."
+ type: "integer"
+ format: "int64"
+ MemoryReservation:
+ description: "Memory soft limit in bytes."
+ type: "integer"
+ format: "int64"
+ MemorySwap:
+ description: "Total memory limit (memory + swap). Set as `-1` to enable unlimited swap."
+ type: "integer"
+ format: "int64"
+ MemorySwappiness:
+ description: "Tune a container's memory swappiness behavior. Accepts an integer between 0 and 100."
+ type: "integer"
+ format: "int64"
+ minimum: 0
+ maximum: 100
+ NanoCPUs:
+ description: "CPU quota in units of 10<sup>-9</sup> CPUs."
+ type: "integer"
+ format: "int64"
+ OomKillDisable:
+ description: "Disable OOM Killer for the container."
+ type: "boolean"
+ PidsLimit:
+ description: "Tune a container's pids limit. Set -1 for unlimited."
+ type: "integer"
+ format: "int64"
+ Ulimits:
+ description: |
+ A list of resource limits to set in the container. For example: `{"Name": "nofile", "Soft": 1024, "Hard": 2048}`"
+ type: "array"
+ items:
+ type: "object"
+ properties:
+ Name:
+ description: "Name of ulimit"
+ type: "string"
+ Soft:
+ description: "Soft limit"
+ type: "integer"
+ Hard:
+ description: "Hard limit"
+ type: "integer"
+ # Applicable to Windows
+ CpuCount:
+ description: |
+ The number of usable CPUs (Windows only).
+
+ On Windows Server containers, the processor resource controls are mutually exclusive. The order of precedence is `CPUCount` first, then `CPUShares`, and `CPUPercent` last.
+ type: "integer"
+ format: "int64"
+ CpuPercent:
+ description: |
+ The usable percentage of the available CPUs (Windows only).
+
+ On Windows Server containers, the processor resource controls are mutually exclusive. The order of precedence is `CPUCount` first, then `CPUShares`, and `CPUPercent` last.
+ type: "integer"
+ format: "int64"
+ IOMaximumIOps:
+ description: "Maximum IOps for the container system drive (Windows only)"
+ type: "integer"
+ format: "int64"
+ IOMaximumBandwidth:
+ description: "Maximum IO in bytes per second for the container system drive (Windows only)"
+ type: "integer"
+ format: "int64"
+
+ ResourceObject:
+ description: "An object describing the resources which can be advertised by a node and requested by a task"
+ type: "object"
+ properties:
+ NanoCPUs:
+ type: "integer"
+ format: "int64"
+ example: 4000000000
+ MemoryBytes:
+ type: "integer"
+ format: "int64"
+ example: 8272408576
+ GenericResources:
+ $ref: "#/definitions/GenericResources"
+
+ GenericResources:
+ description: "User-defined resources can be either Integer resources (e.g, `SSD=3`) or String resources (e.g, `GPU=UUID1`)"
+ type: "array"
+ items:
+ type: "object"
+ properties:
+ NamedResourceSpec:
+ type: "object"
+ properties:
+ Kind:
+ type: "string"
+ Value:
+ type: "string"
+ DiscreteResourceSpec:
+ type: "object"
+ properties:
+ Kind:
+ type: "string"
+ Value:
+ type: "integer"
+ format: "int64"
+ example:
+ - DiscreteResourceSpec:
+ Kind: "SSD"
+ Value: 3
+ - NamedResourceSpec:
+ Kind: "GPU"
+ Value: "UUID1"
+ - NamedResourceSpec:
+ Kind: "GPU"
+ Value: "UUID2"
+
+ HealthConfig:
+ description: "A test to perform to check that the container is healthy."
+ type: "object"
+ properties:
+ Test:
+ description: |
+ The test to perform. Possible values are:
+
+ - `[]` inherit healthcheck from image or parent image
+ - `["NONE"]` disable healthcheck
+ - `["CMD", args...]` exec arguments directly
+ - `["CMD-SHELL", command]` run command with system's default shell
+ type: "array"
+ items:
+ type: "string"
+ Interval:
+ description: "The time to wait between checks in nanoseconds. It should be 0 or at least 1000000 (1 ms). 0 means inherit."
+ type: "integer"
+ Timeout:
+ description: "The time to wait before considering the check to have hung. It should be 0 or at least 1000000 (1 ms). 0 means inherit."
+ type: "integer"
+ Retries:
+ description: "The number of consecutive failures needed to consider a container as unhealthy. 0 means inherit."
+ type: "integer"
+ StartPeriod:
+ description: "Start period for the container to initialize before starting health-retries countdown in nanoseconds. It should be 0 or at least 1000000 (1 ms). 0 means inherit."
+ type: "integer"
+
+ HostConfig:
+ description: "Container configuration that depends on the host we are running on"
+ allOf:
+ - $ref: "#/definitions/Resources"
+ - type: "object"
+ properties:
+ # Applicable to all platforms
+ Binds:
+ type: "array"
+ description: |
+ A list of volume bindings for this container. Each volume binding is a string in one of these forms:
+
+ - `host-src:container-dest` to bind-mount a host path into the container. Both `host-src`, and `container-dest` must be an _absolute_ path.
+ - `host-src:container-dest:ro` to make the bind mount read-only inside the container. Both `host-src`, and `container-dest` must be an _absolute_ path.
+ - `volume-name:container-dest` to bind-mount a volume managed by a volume driver into the container. `container-dest` must be an _absolute_ path.
+ - `volume-name:container-dest:ro` to mount the volume read-only inside the container. `container-dest` must be an _absolute_ path.
+ items:
+ type: "string"
+ ContainerIDFile:
+ type: "string"
+ description: "Path to a file where the container ID is written"
+ LogConfig:
+ type: "object"
+ description: "The logging configuration for this container"
+ properties:
+ Type:
+ type: "string"
+ enum:
+ - "json-file"
+ - "syslog"
+ - "journald"
+ - "gelf"
+ - "fluentd"
+ - "awslogs"
+ - "splunk"
+ - "etwlogs"
+ - "none"
+ Config:
+ type: "object"
+ additionalProperties:
+ type: "string"
+ NetworkMode:
+ type: "string"
+ description: "Network mode to use for this container. Supported standard values are: `bridge`, `host`, `none`, and `container:<name|id>`. Any other value is taken
+ as a custom network's name to which this container should connect to."
+ PortBindings:
+ type: "object"
+ description: "A map of exposed container ports and the host port they should map to."
+ additionalProperties:
+ type: "object"
+ properties:
+ HostIp:
+ type: "string"
+ description: "The host IP address"
+ HostPort:
+ type: "string"
+ description: "The host port number, as a string"
+ RestartPolicy:
+ $ref: "#/definitions/RestartPolicy"
+ AutoRemove:
+ type: "boolean"
+ description: "Automatically remove the container when the container's process exits. This has no effect if `RestartPolicy` is set."
+ VolumeDriver:
+ type: "string"
+ description: "Driver that this container uses to mount volumes."
+ VolumesFrom:
+ type: "array"
+ description: "A list of volumes to inherit from another container, specified in the form `<container name>[:<ro|rw>]`."
+ items:
+ type: "string"
+ Mounts:
+ description: "Specification for mounts to be added to the container."
+ type: "array"
+ items:
+ $ref: "#/definitions/Mount"
+
+ # Applicable to UNIX platforms
+ CapAdd:
+ type: "array"
+ description: "A list of kernel capabilities to add to the container."
+ items:
+ type: "string"
+ CapDrop:
+ type: "array"
+ description: "A list of kernel capabilities to drop from the container."
+ items:
+ type: "string"
+ Dns:
+ type: "array"
+ description: "A list of DNS servers for the container to use."
+ items:
+ type: "string"
+ DnsOptions:
+ type: "array"
+ description: "A list of DNS options."
+ items:
+ type: "string"
+ DnsSearch:
+ type: "array"
+ description: "A list of DNS search domains."
+ items:
+ type: "string"
+ ExtraHosts:
+ type: "array"
+ description: |
+ A list of hostnames/IP mappings to add to the container's `/etc/hosts` file. Specified in the form `["hostname:IP"]`.
+ items:
+ type: "string"
+ GroupAdd:
+ type: "array"
+ description: "A list of additional groups that the container process will run as."
+ items:
+ type: "string"
+ IpcMode:
+ type: "string"
+ description: |
+ IPC sharing mode for the container. Possible values are:
+
+ - `"none"`: own private IPC namespace, with /dev/shm not mounted
+ - `"private"`: own private IPC namespace
+ - `"shareable"`: own private IPC namespace, with a possibility to share it with other containers
+ - `"container:<name|id>"`: join another (shareable) container's IPC namespace
+ - `"host"`: use the host system's IPC namespace
+
+ If not specified, daemon default is used, which can either be `"private"`
+ or `"shareable"`, depending on daemon version and configuration.
+ Cgroup:
+ type: "string"
+ description: "Cgroup to use for the container."
+ Links:
+ type: "array"
+ description: "A list of links for the container in the form `container_name:alias`."
+ items:
+ type: "string"
+ OomScoreAdj:
+ type: "integer"
+ description: "An integer value containing the score given to the container in order to tune OOM killer preferences."
+ example: 500
+ PidMode:
+ type: "string"
+ description: |
+ Set the PID (Process) Namespace mode for the container. It can be either:
+
+ - `"container:<name|id>"`: joins another container's PID namespace
+ - `"host"`: use the host's PID namespace inside the container
+ Privileged:
+ type: "boolean"
+ description: "Gives the container full access to the host."
+ PublishAllPorts:
+ type: "boolean"
+ description: "Allocates a random host port for all of a container's exposed ports."
+ ReadonlyRootfs:
+ type: "boolean"
+ description: "Mount the container's root filesystem as read only."
+ SecurityOpt:
+ type: "array"
+ description: "A list of string values to customize labels for MLS
+ systems, such as SELinux."
+ items:
+ type: "string"
+ StorageOpt:
+ type: "object"
+ description: |
+ Storage driver options for this container, in the form `{"size": "120G"}`.
+ additionalProperties:
+ type: "string"
+ Tmpfs:
+ type: "object"
+ description: |
+ A map of container directories which should be replaced by tmpfs mounts, and their corresponding mount options. For example: `{ "/run": "rw,noexec,nosuid,size=65536k" }`.
+ additionalProperties:
+ type: "string"
+ UTSMode:
+ type: "string"
+ description: "UTS namespace to use for the container."
+ UsernsMode:
+ type: "string"
+ description: "Sets the usernamespace mode for the container when usernamespace remapping option is enabled."
+ ShmSize:
+ type: "integer"
+ description: "Size of `/dev/shm` in bytes. If omitted, the system uses 64MB."
+ minimum: 0
+ Sysctls:
+ type: "object"
+ description: |
+ A list of kernel parameters (sysctls) to set in the container. For example: `{"net.ipv4.ip_forward": "1"}`
+ additionalProperties:
+ type: "string"
+ Runtime:
+ type: "string"
+ description: "Runtime to use with this container."
+ # Applicable to Windows
+ ConsoleSize:
+ type: "array"
+ description: "Initial console size, as an `[height, width]` array. (Windows only)"
+ minItems: 2
+ maxItems: 2
+ items:
+ type: "integer"
+ minimum: 0
+ Isolation:
+ type: "string"
+ description: "Isolation technology of the container. (Windows only)"
+ enum:
+ - "default"
+ - "process"
+ - "hyperv"
+
+ ContainerConfig:
+ description: "Configuration for a container that is portable between hosts"
+ type: "object"
+ properties:
+ Hostname:
+ description: "The hostname to use for the container, as a valid RFC 1123 hostname."
+ type: "string"
+ Domainname:
+ description: "The domain name to use for the container."
+ type: "string"
+ User:
+ description: "The user that commands are run as inside the container."
+ type: "string"
+ AttachStdin:
+ description: "Whether to attach to `stdin`."
+ type: "boolean"
+ default: false
+ AttachStdout:
+ description: "Whether to attach to `stdout`."
+ type: "boolean"
+ default: true
+ AttachStderr:
+ description: "Whether to attach to `stderr`."
+ type: "boolean"
+ default: true
+ ExposedPorts:
+ description: |
+ An object mapping ports to an empty object in the form:
+
+ `{"<port>/<tcp|udp>": {}}`
+ type: "object"
+ additionalProperties:
+ type: "object"
+ enum:
+ - {}
+ default: {}
+ Tty:
+ description: "Attach standard streams to a TTY, including `stdin` if it is not closed."
+ type: "boolean"
+ default: false
+ OpenStdin:
+ description: "Open `stdin`"
+ type: "boolean"
+ default: false
+ StdinOnce:
+ description: "Close `stdin` after one attached client disconnects"
+ type: "boolean"
+ default: false
+ Env:
+ description: |
+ A list of environment variables to set inside the container in the form `["VAR=value", ...]`. A variable without `=` is removed from the environment, rather than to have an empty value.
+ type: "array"
+ items:
+ type: "string"
+ Cmd:
+ description: "Command to run specified as a string or an array of strings."
+ type:
+ - "array"
+ - "string"
+ items:
+ type: "string"
+ Healthcheck:
+ $ref: "#/definitions/HealthConfig"
+ ArgsEscaped:
+ description: "Command is already escaped (Windows only)"
+ type: "boolean"
+ Image:
+ description: "The name of the image to use when creating the container"
+ type: "string"
+ Volumes:
+ description: "An object mapping mount point paths inside the container to empty objects."
+ type: "object"
+ properties:
+ additionalProperties:
+ type: "object"
+ enum:
+ - {}
+ default: {}
+ WorkingDir:
+ description: "The working directory for commands to run in."
+ type: "string"
+ Entrypoint:
+ description: |
+ The entry point for the container as a string or an array of strings.
+
+ If the array consists of exactly one empty string (`[""]`) then the entry point is reset to system default (i.e., the entry point used by docker when there is no `ENTRYPOINT` instruction in the `Dockerfile`).
+ type:
+ - "array"
+ - "string"
+ items:
+ type: "string"
+ NetworkDisabled:
+ description: "Disable networking for the container."
+ type: "boolean"
+ MacAddress:
+ description: "MAC address of the container."
+ type: "string"
+ OnBuild:
+ description: "`ONBUILD` metadata that were defined in the image's `Dockerfile`."
+ type: "array"
+ items:
+ type: "string"
+ Labels:
+ description: "User-defined key/value metadata."
+ type: "object"
+ additionalProperties:
+ type: "string"
+ StopSignal:
+ description: "Signal to stop a container as a string or unsigned integer."
+ type: "string"
+ default: "SIGTERM"
+ StopTimeout:
+ description: "Timeout to stop a container in seconds."
+ type: "integer"
+ default: 10
+ Shell:
+ description: "Shell for when `RUN`, `CMD`, and `ENTRYPOINT` uses a shell."
+ type: "array"
+ items:
+ type: "string"
+
+ NetworkSettings:
+ description: "NetworkSettings exposes the network settings in the API"
+ type: "object"
+ properties:
+ Bridge:
+ description: Name of the network'a bridge (for example, `docker0`).
+ type: "string"
+ example: "docker0"
+ SandboxID:
+ description: SandboxID uniquely represents a container's network stack.
+ type: "string"
+ example: "9d12daf2c33f5959c8bf90aa513e4f65b561738661003029ec84830cd503a0c3"
+ HairpinMode:
+ description: |
+ Indicates if hairpin NAT should be enabled on the virtual interface.
+ type: "boolean"
+ example: false
+ LinkLocalIPv6Address:
+ description: IPv6 unicast address using the link-local prefix.
+ type: "string"
+ example: "fe80::42:acff:fe11:1"
+ LinkLocalIPv6PrefixLen:
+ description: Prefix length of the IPv6 unicast address.
+ type: "integer"
+ example: "64"
+ Ports:
+ $ref: "#/definitions/PortMap"
+ SandboxKey:
+ description: SandboxKey identifies the sandbox
+ type: "string"
+ example: "/var/run/docker/netns/8ab54b426c38"
+
+ # TODO is SecondaryIPAddresses actually used?
+ SecondaryIPAddresses:
+ description: ""
+ type: "array"
+ items:
+ $ref: "#/definitions/Address"
+ x-nullable: true
+
+ # TODO is SecondaryIPv6Addresses actually used?
+ SecondaryIPv6Addresses:
+ description: ""
+ type: "array"
+ items:
+ $ref: "#/definitions/Address"
+ x-nullable: true
+
+ # TODO properties below are part of DefaultNetworkSettings, which is
+ # marked as deprecated since Docker 1.9 and to be removed in Docker v17.12
+ EndpointID:
+ description: |
+ EndpointID uniquely represents a service endpoint in a Sandbox.
+
+ <p><br /></p>
+
+ > **Deprecated**: This field is only propagated when attached to the
+ > default "bridge" network. Use the information from the "bridge"
+ > network inside the `Networks` map instead, which contains the same
+ > information. This field was deprecated in Docker 1.9 and is scheduled
+ > to be removed in Docker 17.12.0
+ type: "string"
+ example: "b88f5b905aabf2893f3cbc4ee42d1ea7980bbc0a92e2c8922b1e1795298afb0b"
+ Gateway:
+ description: |
+ Gateway address for the default "bridge" network.
+
+ <p><br /></p>
+
+ > **Deprecated**: This field is only propagated when attached to the
+ > default "bridge" network. Use the information from the "bridge"
+ > network inside the `Networks` map instead, which contains the same
+ > information. This field was deprecated in Docker 1.9 and is scheduled
+ > to be removed in Docker 17.12.0
+ type: "string"
+ example: "172.17.0.1"
+ GlobalIPv6Address:
+ description: |
+ Global IPv6 address for the default "bridge" network.
+
+ <p><br /></p>
+
+ > **Deprecated**: This field is only propagated when attached to the
+ > default "bridge" network. Use the information from the "bridge"
+ > network inside the `Networks` map instead, which contains the same
+ > information. This field was deprecated in Docker 1.9 and is scheduled
+ > to be removed in Docker 17.12.0
+ type: "string"
+ example: "2001:db8::5689"
+ GlobalIPv6PrefixLen:
+ description: |
+ Mask length of the global IPv6 address.
+
+ <p><br /></p>
+
+ > **Deprecated**: This field is only propagated when attached to the
+ > default "bridge" network. Use the information from the "bridge"
+ > network inside the `Networks` map instead, which contains the same
+ > information. This field was deprecated in Docker 1.9 and is scheduled
+ > to be removed in Docker 17.12.0
+ type: "integer"
+ example: 64
+ IPAddress:
+ description: |
+ IPv4 address for the default "bridge" network.
+
+ <p><br /></p>
+
+ > **Deprecated**: This field is only propagated when attached to the
+ > default "bridge" network. Use the information from the "bridge"
+ > network inside the `Networks` map instead, which contains the same
+ > information. This field was deprecated in Docker 1.9 and is scheduled
+ > to be removed in Docker 17.12.0
+ type: "string"
+ example: "172.17.0.4"
+ IPPrefixLen:
+ description: |
+ Mask length of the IPv4 address.
+
+ <p><br /></p>
+
+ > **Deprecated**: This field is only propagated when attached to the
+ > default "bridge" network. Use the information from the "bridge"
+ > network inside the `Networks` map instead, which contains the same
+ > information. This field was deprecated in Docker 1.9 and is scheduled
+ > to be removed in Docker 17.12.0
+ type: "integer"
+ example: 16
+ IPv6Gateway:
+ description: |
+ IPv6 gateway address for this network.
+
+ <p><br /></p>
+
+ > **Deprecated**: This field is only propagated when attached to the
+ > default "bridge" network. Use the information from the "bridge"
+ > network inside the `Networks` map instead, which contains the same
+ > information. This field was deprecated in Docker 1.9 and is scheduled
+ > to be removed in Docker 17.12.0
+ type: "string"
+ example: "2001:db8:2::100"
+ MacAddress:
+ description: |
+ MAC address for the container on the default "bridge" network.
+
+ <p><br /></p>
+
+ > **Deprecated**: This field is only propagated when attached to the
+ > default "bridge" network. Use the information from the "bridge"
+ > network inside the `Networks` map instead, which contains the same
+ > information. This field was deprecated in Docker 1.9 and is scheduled
+ > to be removed in Docker 17.12.0
+ type: "string"
+ example: "02:42:ac:11:00:04"
+ Networks:
+ description: |
+ Information about all networks that the container is connected to.
+ type: "object"
+ additionalProperties:
+ $ref: "#/definitions/EndpointSettings"
+
+ Address:
+ description: Address represents an IPv4 or IPv6 IP address.
+ type: "object"
+ properties:
+ Addr:
+ description: IP address.
+ type: "string"
+ PrefixLen:
+ description: Mask length of the IP address.
+ type: "integer"
+
+ PortMap:
+ description: |
+ PortMap describes the mapping of container ports to host ports, using the
+ container's port-number and protocol as key in the format `<port>/<protocol>`,
+ for example, `80/udp`.
+
+ If a container's port is mapped for both `tcp` and `udp`, two separate
+ entries are added to the mapping table.
+ type: "object"
+ additionalProperties:
+ type: "array"
+ items:
+ $ref: "#/definitions/PortBinding"
+ example:
+ "443/tcp":
+ - HostIp: "127.0.0.1"
+ HostPort: "4443"
+ "80/tcp":
+ - HostIp: "0.0.0.0"
+ HostPort: "80"
+ - HostIp: "0.0.0.0"
+ HostPort: "8080"
+ "80/udp":
+ - HostIp: "0.0.0.0"
+ HostPort: "80"
+ "53/udp":
+ - HostIp: "0.0.0.0"
+ HostPort: "53"
+ "2377/tcp": null
+
+ PortBinding:
+ description: |
+ PortBinding represents a binding between a host IP address and a host
+ port.
+ type: "object"
+ x-nullable: true
+ properties:
+ HostIp:
+ description: "Host IP address that the container's port is mapped to."
+ type: "string"
+ example: "127.0.0.1"
+ HostPort:
+ description: "Host port number that the container's port is mapped to."
+ type: "string"
+ example: "4443"
+
+ GraphDriverData:
+ description: "Information about a container's graph driver."
+ type: "object"
+ required: [Name, Data]
+ properties:
+ Name:
+ type: "string"
+ x-nullable: false
+ Data:
+ type: "object"
+ x-nullable: false
+ additionalProperties:
+ type: "string"
+
+ Image:
+ type: "object"
+ required:
+ - Id
+ - Parent
+ - Comment
+ - Created
+ - Container
+ - DockerVersion
+ - Author
+ - Architecture
+ - Os
+ - Size
+ - VirtualSize
+ - GraphDriver
+ - RootFS
+ properties:
+ Id:
+ type: "string"
+ x-nullable: false
+ RepoTags:
+ type: "array"
+ items:
+ type: "string"
+ RepoDigests:
+ type: "array"
+ items:
+ type: "string"
+ Parent:
+ type: "string"
+ x-nullable: false
+ Comment:
+ type: "string"
+ x-nullable: false
+ Created:
+ type: "string"
+ x-nullable: false
+ Container:
+ type: "string"
+ x-nullable: false
+ ContainerConfig:
+ $ref: "#/definitions/ContainerConfig"
+ DockerVersion:
+ type: "string"
+ x-nullable: false
+ Author:
+ type: "string"
+ x-nullable: false
+ Config:
+ $ref: "#/definitions/ContainerConfig"
+ Architecture:
+ type: "string"
+ x-nullable: false
+ Os:
+ type: "string"
+ x-nullable: false
+ OsVersion:
+ type: "string"
+ Size:
+ type: "integer"
+ format: "int64"
+ x-nullable: false
+ VirtualSize:
+ type: "integer"
+ format: "int64"
+ x-nullable: false
+ GraphDriver:
+ $ref: "#/definitions/GraphDriverData"
+ RootFS:
+ type: "object"
+ required: [Type]
+ properties:
+ Type:
+ type: "string"
+ x-nullable: false
+ Layers:
+ type: "array"
+ items:
+ type: "string"
+ BaseLayer:
+ type: "string"
+ Metadata:
+ type: "object"
+ properties:
+ LastTagTime:
+ type: "string"
+ format: "dateTime"
+
+ ImageSummary:
+ type: "object"
+ required:
+ - Id
+ - ParentId
+ - RepoTags
+ - RepoDigests
+ - Created
+ - Size
+ - SharedSize
+ - VirtualSize
+ - Labels
+ - Containers
+ properties:
+ Id:
+ type: "string"
+ x-nullable: false
+ ParentId:
+ type: "string"
+ x-nullable: false
+ RepoTags:
+ type: "array"
+ x-nullable: false
+ items:
+ type: "string"
+ RepoDigests:
+ type: "array"
+ x-nullable: false
+ items:
+ type: "string"
+ Created:
+ type: "integer"
+ x-nullable: false
+ Size:
+ type: "integer"
+ x-nullable: false
+ SharedSize:
+ type: "integer"
+ x-nullable: false
+ VirtualSize:
+ type: "integer"
+ x-nullable: false
+ Labels:
+ type: "object"
+ x-nullable: false
+ additionalProperties:
+ type: "string"
+ Containers:
+ x-nullable: false
+ type: "integer"
+
+ AuthConfig:
+ type: "object"
+ properties:
+ username:
+ type: "string"
+ password:
+ type: "string"
+ email:
+ type: "string"
+ serveraddress:
+ type: "string"
+ example:
+ username: "hannibal"
+ password: "xxxx"
+ serveraddress: "https://index.docker.io/v1/"
+
+ ProcessConfig:
+ type: "object"
+ properties:
+ privileged:
+ type: "boolean"
+ user:
+ type: "string"
+ tty:
+ type: "boolean"
+ entrypoint:
+ type: "string"
+ arguments:
+ type: "array"
+ items:
+ type: "string"
+
+ Volume:
+ type: "object"
+ required: [Name, Driver, Mountpoint, Labels, Scope, Options]
+ properties:
+ Name:
+ type: "string"
+ description: "Name of the volume."
+ x-nullable: false
+ Driver:
+ type: "string"
+ description: "Name of the volume driver used by the volume."
+ x-nullable: false
+ Mountpoint:
+ type: "string"
+ description: "Mount path of the volume on the host."
+ x-nullable: false
+ CreatedAt:
+ type: "string"
+ format: "dateTime"
+ description: "Date/Time the volume was created."
+ Status:
+ type: "object"
+ description: |
+ Low-level details about the volume, provided by the volume driver.
+ Details are returned as a map with key/value pairs:
+ `{"key":"value","key2":"value2"}`.
+
+ The `Status` field is optional, and is omitted if the volume driver
+ does not support this feature.
+ additionalProperties:
+ type: "object"
+ Labels:
+ type: "object"
+ description: "User-defined key/value metadata."
+ x-nullable: false
+ additionalProperties:
+ type: "string"
+ Scope:
+ type: "string"
+ description: "The level at which the volume exists. Either `global` for cluster-wide, or `local` for machine level."
+ default: "local"
+ x-nullable: false
+ enum: ["local", "global"]
+ Options:
+ type: "object"
+ description: "The driver specific options used when creating the volume."
+ additionalProperties:
+ type: "string"
+ UsageData:
+ type: "object"
+ x-nullable: true
+ required: [Size, RefCount]
+ description: |
+ Usage details about the volume. This information is used by the
+ `GET /system/df` endpoint, and omitted in other endpoints.
+ properties:
+ Size:
+ type: "integer"
+ default: -1
+ description: |
+ Amount of disk space used by the volume (in bytes). This information
+ is only available for volumes created with the `"local"` volume
+ driver. For volumes created with other volume drivers, this field
+ is set to `-1` ("not available")
+ x-nullable: false
+ RefCount:
+ type: "integer"
+ default: -1
+ description: |
+ The number of containers referencing this volume. This field
+ is set to `-1` if the reference-count is not available.
+ x-nullable: false
+
+ example:
+ Name: "tardis"
+ Driver: "custom"
+ Mountpoint: "/var/lib/docker/volumes/tardis"
+ Status:
+ hello: "world"
+ Labels:
+ com.example.some-label: "some-value"
+ com.example.some-other-label: "some-other-value"
+ Scope: "local"
+ CreatedAt: "2016-06-07T20:31:11.853781916Z"
+
+ Network:
+ type: "object"
+ properties:
+ Name:
+ type: "string"
+ Id:
+ type: "string"
+ Created:
+ type: "string"
+ format: "dateTime"
+ Scope:
+ type: "string"
+ Driver:
+ type: "string"
+ EnableIPv6:
+ type: "boolean"
+ IPAM:
+ $ref: "#/definitions/IPAM"
+ Internal:
+ type: "boolean"
+ Attachable:
+ type: "boolean"
+ Ingress:
+ type: "boolean"
+ Containers:
+ type: "object"
+ additionalProperties:
+ $ref: "#/definitions/NetworkContainer"
+ Options:
+ type: "object"
+ additionalProperties:
+ type: "string"
+ Labels:
+ type: "object"
+ additionalProperties:
+ type: "string"
+ example:
+ Name: "net01"
+ Id: "7d86d31b1478e7cca9ebed7e73aa0fdeec46c5ca29497431d3007d2d9e15ed99"
+ Created: "2016-10-19T04:33:30.360899459Z"
+ Scope: "local"
+ Driver: "bridge"
+ EnableIPv6: false
+ IPAM:
+ Driver: "default"
+ Config:
+ - Subnet: "172.19.0.0/16"
+ Gateway: "172.19.0.1"
+ Options:
+ foo: "bar"
+ Internal: false
+ Attachable: false
+ Ingress: false
+ Containers:
+ 19a4d5d687db25203351ed79d478946f861258f018fe384f229f2efa4b23513c:
+ Name: "test"
+ EndpointID: "628cadb8bcb92de107b2a1e516cbffe463e321f548feb37697cce00ad694f21a"
+ MacAddress: "02:42:ac:13:00:02"
+ IPv4Address: "172.19.0.2/16"
+ IPv6Address: ""
+ Options:
+ com.docker.network.bridge.default_bridge: "true"
+ com.docker.network.bridge.enable_icc: "true"
+ com.docker.network.bridge.enable_ip_masquerade: "true"
+ com.docker.network.bridge.host_binding_ipv4: "0.0.0.0"
+ com.docker.network.bridge.name: "docker0"
+ com.docker.network.driver.mtu: "1500"
+ Labels:
+ com.example.some-label: "some-value"
+ com.example.some-other-label: "some-other-value"
+ IPAM:
+ type: "object"
+ properties:
+ Driver:
+ description: "Name of the IPAM driver to use."
+ type: "string"
+ default: "default"
+ Config:
+ description: "List of IPAM configuration options, specified as a map: `{\"Subnet\": <CIDR>, \"IPRange\": <CIDR>, \"Gateway\": <IP address>, \"AuxAddress\": <device_name:IP address>}`"
+ type: "array"
+ items:
+ type: "object"
+ additionalProperties:
+ type: "string"
+ Options:
+ description: "Driver-specific options, specified as a map."
+ type: "array"
+ items:
+ type: "object"
+ additionalProperties:
+ type: "string"
+
+ NetworkContainer:
+ type: "object"
+ properties:
+ Name:
+ type: "string"
+ EndpointID:
+ type: "string"
+ MacAddress:
+ type: "string"
+ IPv4Address:
+ type: "string"
+ IPv6Address:
+ type: "string"
+
+ BuildInfo:
+ type: "object"
+ properties:
+ id:
+ type: "string"
+ stream:
+ type: "string"
+ error:
+ type: "string"
+ errorDetail:
+ $ref: "#/definitions/ErrorDetail"
+ status:
+ type: "string"
+ progress:
+ type: "string"
+ progressDetail:
+ $ref: "#/definitions/ProgressDetail"
+
+ CreateImageInfo:
+ type: "object"
+ properties:
+ error:
+ type: "string"
+ status:
+ type: "string"
+ progress:
+ type: "string"
+ progressDetail:
+ $ref: "#/definitions/ProgressDetail"
+
+ PushImageInfo:
+ type: "object"
+ properties:
+ error:
+ type: "string"
+ status:
+ type: "string"
+ progress:
+ type: "string"
+ progressDetail:
+ $ref: "#/definitions/ProgressDetail"
+
+ ErrorDetail:
+ type: "object"
+ properties:
+ code:
+ type: "integer"
+ message:
+ type: "string"
+
+ ProgressDetail:
+ type: "object"
+ properties:
+ code:
+ type: "integer"
+ message:
+ type: "integer"
+
+ ErrorResponse:
+ description: "Represents an error."
+ type: "object"
+ required: ["message"]
+ properties:
+ message:
+ description: "The error message."
+ type: "string"
+ x-nullable: false
+ example:
+ message: "Something went wrong."
+
+ IdResponse:
+ description: "Response to an API call that returns just an Id"
+ type: "object"
+ required: ["Id"]
+ properties:
+ Id:
+ description: "The id of the newly created object."
+ type: "string"
+ x-nullable: false
+
+ EndpointSettings:
+ description: "Configuration for a network endpoint."
+ type: "object"
+ properties:
+ # Configurations
+ IPAMConfig:
+ $ref: "#/definitions/EndpointIPAMConfig"
+ Links:
+ type: "array"
+ items:
+ type: "string"
+ example:
+ - "container_1"
+ - "container_2"
+ Aliases:
+ type: "array"
+ items:
+ type: "string"
+ example:
+ - "server_x"
+ - "server_y"
+
+ # Operational data
+ NetworkID:
+ description: |
+ Unique ID of the network.
+ type: "string"
+ example: "08754567f1f40222263eab4102e1c733ae697e8e354aa9cd6e18d7402835292a"
+ EndpointID:
+ description: |
+ Unique ID for the service endpoint in a Sandbox.
+ type: "string"
+ example: "b88f5b905aabf2893f3cbc4ee42d1ea7980bbc0a92e2c8922b1e1795298afb0b"
+ Gateway:
+ description: |
+ Gateway address for this network.
+ type: "string"
+ example: "172.17.0.1"
+ IPAddress:
+ description: |
+ IPv4 address.
+ type: "string"
+ example: "172.17.0.4"
+ IPPrefixLen:
+ description: |
+ Mask length of the IPv4 address.
+ type: "integer"
+ example: 16
+ IPv6Gateway:
+ description: |
+ IPv6 gateway address.
+ type: "string"
+ example: "2001:db8:2::100"
+ GlobalIPv6Address:
+ description: |
+ Global IPv6 address.
+ type: "string"
+ example: "2001:db8::5689"
+ GlobalIPv6PrefixLen:
+ description: |
+ Mask length of the global IPv6 address.
+ type: "integer"
+ format: "int64"
+ example: 64
+ MacAddress:
+ description: |
+ MAC address for the endpoint on this network.
+ type: "string"
+ example: "02:42:ac:11:00:04"
+ DriverOpts:
+ description: |
+ DriverOpts is a mapping of driver options and values. These options
+ are passed directly to the driver and are driver specific.
+ type: "object"
+ x-nullable: true
+ additionalProperties:
+ type: "string"
+ example:
+ com.example.some-label: "some-value"
+ com.example.some-other-label: "some-other-value"
+
+ EndpointIPAMConfig:
+ description: |
+ EndpointIPAMConfig represents an endpoint's IPAM configuration.
+ type: "object"
+ x-nullable: true
+ properties:
+ IPv4Address:
+ type: "string"
+ example: "172.20.30.33"
+ IPv6Address:
+ type: "string"
+ example: "2001:db8:abcd::3033"
+ LinkLocalIPs:
+ type: "array"
+ items:
+ type: "string"
+ example:
+ - "169.254.34.68"
+ - "fe80::3468"
+
+ PluginMount:
+ type: "object"
+ x-nullable: false
+ required: [Name, Description, Settable, Source, Destination, Type, Options]
+ properties:
+ Name:
+ type: "string"
+ x-nullable: false
+ example: "some-mount"
+ Description:
+ type: "string"
+ x-nullable: false
+ example: "This is a mount that's used by the plugin."
+ Settable:
+ type: "array"
+ items:
+ type: "string"
+ Source:
+ type: "string"
+ example: "/var/lib/docker/plugins/"
+ Destination:
+ type: "string"
+ x-nullable: false
+ example: "/mnt/state"
+ Type:
+ type: "string"
+ x-nullable: false
+ example: "bind"
+ Options:
+ type: "array"
+ items:
+ type: "string"
+ example:
+ - "rbind"
+ - "rw"
+
+ PluginDevice:
+ type: "object"
+ required: [Name, Description, Settable, Path]
+ x-nullable: false
+ properties:
+ Name:
+ type: "string"
+ x-nullable: false
+ Description:
+ type: "string"
+ x-nullable: false
+ Settable:
+ type: "array"
+ items:
+ type: "string"
+ Path:
+ type: "string"
+ example: "/dev/fuse"
+
+ PluginEnv:
+ type: "object"
+ x-nullable: false
+ required: [Name, Description, Settable, Value]
+ properties:
+ Name:
+ x-nullable: false
+ type: "string"
+ Description:
+ x-nullable: false
+ type: "string"
+ Settable:
+ type: "array"
+ items:
+ type: "string"
+ Value:
+ type: "string"
+
+ PluginInterfaceType:
+ type: "object"
+ x-nullable: false
+ required: [Prefix, Capability, Version]
+ properties:
+ Prefix:
+ type: "string"
+ x-nullable: false
+ Capability:
+ type: "string"
+ x-nullable: false
+ Version:
+ type: "string"
+ x-nullable: false
+
+ Plugin:
+ description: "A plugin for the Engine API"
+ type: "object"
+ required: [Settings, Enabled, Config, Name]
+ properties:
+ Id:
+ type: "string"
+ example: "5724e2c8652da337ab2eedd19fc6fc0ec908e4bd907c7421bf6a8dfc70c4c078"
+ Name:
+ type: "string"
+ x-nullable: false
+ example: "tiborvass/sample-volume-plugin"
+ Enabled:
+ description: "True if the plugin is running. False if the plugin is not running, only installed."
+ type: "boolean"
+ x-nullable: false
+ example: true
+ Settings:
+ description: "Settings that can be modified by users."
+ type: "object"
+ x-nullable: false
+ required: [Args, Devices, Env, Mounts]
+ properties:
+ Mounts:
+ type: "array"
+ items:
+ $ref: "#/definitions/PluginMount"
+ Env:
+ type: "array"
+ items:
+ type: "string"
+ example:
+ - "DEBUG=0"
+ Args:
+ type: "array"
+ items:
+ type: "string"
+ Devices:
+ type: "array"
+ items:
+ $ref: "#/definitions/PluginDevice"
+ PluginReference:
+ description: "plugin remote reference used to push/pull the plugin"
+ type: "string"
+ x-nullable: false
+ example: "localhost:5000/tiborvass/sample-volume-plugin:latest"
+ Config:
+ description: "The config of a plugin."
+ type: "object"
+ x-nullable: false
+ required:
+ - Description
+ - Documentation
+ - Interface
+ - Entrypoint
+ - WorkDir
+ - Network
+ - Linux
+ - PidHost
+ - PropagatedMount
+ - IpcHost
+ - Mounts
+ - Env
+ - Args
+ properties:
+ DockerVersion:
+ description: "Docker Version used to create the plugin"
+ type: "string"
+ x-nullable: false
+ example: "17.06.0-ce"
+ Description:
+ type: "string"
+ x-nullable: false
+ example: "A sample volume plugin for Docker"
+ Documentation:
+ type: "string"
+ x-nullable: false
+ example: "https://docs.docker.com/engine/extend/plugins/"
+ Interface:
+ description: "The interface between Docker and the plugin"
+ x-nullable: false
+ type: "object"
+ required: [Types, Socket]
+ properties:
+ Types:
+ type: "array"
+ items:
+ $ref: "#/definitions/PluginInterfaceType"
+ example:
+ - "docker.volumedriver/1.0"
+ Socket:
+ type: "string"
+ x-nullable: false
+ example: "plugins.sock"
+ Entrypoint:
+ type: "array"
+ items:
+ type: "string"
+ example:
+ - "/usr/bin/sample-volume-plugin"
+ - "/data"
+ WorkDir:
+ type: "string"
+ x-nullable: false
+ example: "/bin/"
+ User:
+ type: "object"
+ x-nullable: false
+ properties:
+ UID:
+ type: "integer"
+ format: "uint32"
+ example: 1000
+ GID:
+ type: "integer"
+ format: "uint32"
+ example: 1000
+ Network:
+ type: "object"
+ x-nullable: false
+ required: [Type]
+ properties:
+ Type:
+ x-nullable: false
+ type: "string"
+ example: "host"
+ Linux:
+ type: "object"
+ x-nullable: false
+ required: [Capabilities, AllowAllDevices, Devices]
+ properties:
+ Capabilities:
+ type: "array"
+ items:
+ type: "string"
+ example:
+ - "CAP_SYS_ADMIN"
+ - "CAP_SYSLOG"
+ AllowAllDevices:
+ type: "boolean"
+ x-nullable: false
+ example: false
+ Devices:
+ type: "array"
+ items:
+ $ref: "#/definitions/PluginDevice"
+ PropagatedMount:
+ type: "string"
+ x-nullable: false
+ example: "/mnt/volumes"
+ IpcHost:
+ type: "boolean"
+ x-nullable: false
+ example: false
+ PidHost:
+ type: "boolean"
+ x-nullable: false
+ example: false
+ Mounts:
+ type: "array"
+ items:
+ $ref: "#/definitions/PluginMount"
+ Env:
+ type: "array"
+ items:
+ $ref: "#/definitions/PluginEnv"
+ example:
+ - Name: "DEBUG"
+ Description: "If set, prints debug messages"
+ Settable: null
+ Value: "0"
+ Args:
+ type: "object"
+ x-nullable: false
+ required: [Name, Description, Settable, Value]
+ properties:
+ Name:
+ x-nullable: false
+ type: "string"
+ example: "args"
+ Description:
+ x-nullable: false
+ type: "string"
+ example: "command line arguments"
+ Settable:
+ type: "array"
+ items:
+ type: "string"
+ Value:
+ type: "array"
+ items:
+ type: "string"
+ rootfs:
+ type: "object"
+ properties:
+ type:
+ type: "string"
+ example: "layers"
+ diff_ids:
+ type: "array"
+ items:
+ type: "string"
+ example:
+ - "sha256:675532206fbf3030b8458f88d6e26d4eb1577688a25efec97154c94e8b6b4887"
+ - "sha256:e216a057b1cb1efc11f8a268f37ef62083e70b1b38323ba252e25ac88904a7e8"
+
+ ObjectVersion:
+ description: |
+ The version number of the object such as node, service, etc. This is needed to avoid conflicting writes.
+ The client must send the version number along with the modified specification when updating these objects.
+ This approach ensures safe concurrency and determinism in that the change on the object
+ may not be applied if the version number has changed from the last read. In other words,
+ if two update requests specify the same base version, only one of the requests can succeed.
+ As a result, two separate update requests that happen at the same time will not
+ unintentionally overwrite each other.
+ type: "object"
+ properties:
+ Index:
+ type: "integer"
+ format: "uint64"
+ example: 373531
+
+ NodeSpec:
+ type: "object"
+ properties:
+ Name:
+ description: "Name for the node."
+ type: "string"
+ example: "my-node"
+ Labels:
+ description: "User-defined key/value metadata."
+ type: "object"
+ additionalProperties:
+ type: "string"
+ Role:
+ description: "Role of the node."
+ type: "string"
+ enum:
+ - "worker"
+ - "manager"
+ example: "manager"
+ Availability:
+ description: "Availability of the node."
+ type: "string"
+ enum:
+ - "active"
+ - "pause"
+ - "drain"
+ example: "active"
+ example:
+ Availability: "active"
+ Name: "node-name"
+ Role: "manager"
+ Labels:
+ foo: "bar"
+
+ Node:
+ type: "object"
+ properties:
+ ID:
+ type: "string"
+ example: "24ifsmvkjbyhk"
+ Version:
+ $ref: "#/definitions/ObjectVersion"
+ CreatedAt:
+ description: |
+ Date and time at which the node was added to the swarm in
+ [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds.
+ type: "string"
+ format: "dateTime"
+ example: "2016-08-18T10:44:24.496525531Z"
+ UpdatedAt:
+ description: |
+ Date and time at which the node was last updated in
+ [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds.
+ type: "string"
+ format: "dateTime"
+ example: "2017-08-09T07:09:37.632105588Z"
+ Spec:
+ $ref: "#/definitions/NodeSpec"
+ Description:
+ $ref: "#/definitions/NodeDescription"
+ Status:
+ $ref: "#/definitions/NodeStatus"
+ ManagerStatus:
+ $ref: "#/definitions/ManagerStatus"
+
+ NodeDescription:
+ description: |
+ NodeDescription encapsulates the properties of the Node as reported by the
+ agent.
+ type: "object"
+ properties:
+ Hostname:
+ type: "string"
+ example: "bf3067039e47"
+ Platform:
+ $ref: "#/definitions/Platform"
+ Resources:
+ $ref: "#/definitions/ResourceObject"
+ Engine:
+ $ref: "#/definitions/EngineDescription"
+ TLSInfo:
+ $ref: "#/definitions/TLSInfo"
+
+ Platform:
+ description: |
+ Platform represents the platform (Arch/OS).
+ type: "object"
+ properties:
+ Architecture:
+ description: |
+ Architecture represents the hardware architecture (for example,
+ `x86_64`).
+ type: "string"
+ example: "x86_64"
+ OS:
+ description: |
+ OS represents the Operating System (for example, `linux` or `windows`).
+ type: "string"
+ example: "linux"
+
+ EngineDescription:
+ description: "EngineDescription provides information about an engine."
+ type: "object"
+ properties:
+ EngineVersion:
+ type: "string"
+ example: "17.06.0"
+ Labels:
+ type: "object"
+ additionalProperties:
+ type: "string"
+ example:
+ foo: "bar"
+ Plugins:
+ type: "array"
+ items:
+ type: "object"
+ properties:
+ Type:
+ type: "string"
+ Name:
+ type: "string"
+ example:
+ - Type: "Log"
+ Name: "awslogs"
+ - Type: "Log"
+ Name: "fluentd"
+ - Type: "Log"
+ Name: "gcplogs"
+ - Type: "Log"
+ Name: "gelf"
+ - Type: "Log"
+ Name: "journald"
+ - Type: "Log"
+ Name: "json-file"
+ - Type: "Log"
+ Name: "logentries"
+ - Type: "Log"
+ Name: "splunk"
+ - Type: "Log"
+ Name: "syslog"
+ - Type: "Network"
+ Name: "bridge"
+ - Type: "Network"
+ Name: "host"
+ - Type: "Network"
+ Name: "ipvlan"
+ - Type: "Network"
+ Name: "macvlan"
+ - Type: "Network"
+ Name: "null"
+ - Type: "Network"
+ Name: "overlay"
+ - Type: "Volume"
+ Name: "local"
+ - Type: "Volume"
+ Name: "localhost:5000/vieux/sshfs:latest"
+ - Type: "Volume"
+ Name: "vieux/sshfs:latest"
+
+ TLSInfo:
+ description: "Information about the issuer of leaf TLS certificates and the trusted root CA certificate"
+ type: "object"
+ properties:
+ TrustRoot:
+ description: "The root CA certificate(s) that are used to validate leaf TLS certificates"
+ type: "string"
+ CertIssuerSubject:
+ description: "The base64-url-safe-encoded raw subject bytes of the issuer"
+ type: "string"
+ CertIssuerPublicKey:
+ description: "The base64-url-safe-encoded raw public key bytes of the issuer"
+ type: "string"
+ example:
+ TrustRoot: |
+ -----BEGIN CERTIFICATE-----
+ MIIBajCCARCgAwIBAgIUbYqrLSOSQHoxD8CwG6Bi2PJi9c8wCgYIKoZIzj0EAwIw
+ EzERMA8GA1UEAxMIc3dhcm0tY2EwHhcNMTcwNDI0MjE0MzAwWhcNMzcwNDE5MjE0
+ MzAwWjATMREwDwYDVQQDEwhzd2FybS1jYTBZMBMGByqGSM49AgEGCCqGSM49AwEH
+ A0IABJk/VyMPYdaqDXJb/VXh5n/1Yuv7iNrxV3Qb3l06XD46seovcDWs3IZNV1lf
+ 3Skyr0ofcchipoiHkXBODojJydSjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMB
+ Af8EBTADAQH/MB0GA1UdDgQWBBRUXxuRcnFjDfR/RIAUQab8ZV/n4jAKBggqhkjO
+ PQQDAgNIADBFAiAy+JTe6Uc3KyLCMiqGl2GyWGQqQDEcO3/YG36x7om65AIhAJvz
+ pxv6zFeVEkAEEkqIYi0omA9+CjanB/6Bz4n1uw8H
+ -----END CERTIFICATE-----
+ CertIssuerSubject: "MBMxETAPBgNVBAMTCHN3YXJtLWNh"
+ CertIssuerPublicKey: "MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEmT9XIw9h1qoNclv9VeHmf/Vi6/uI2vFXdBveXTpcPjqx6i9wNazchk1XWV/dKTKvSh9xyGKmiIeRcE4OiMnJ1A=="
+
+ NodeStatus:
+ description: |
+ NodeStatus represents the status of a node.
+
+ It provides the current status of the node, as seen by the manager.
+ type: "object"
+ properties:
+ State:
+ $ref: "#/definitions/NodeState"
+ Message:
+ type: "string"
+ example: ""
+ Addr:
+ description: "IP address of the node."
+ type: "string"
+ example: "172.17.0.2"
+
+ NodeState:
+ description: "NodeState represents the state of a node."
+ type: "string"
+ enum:
+ - "unknown"
+ - "down"
+ - "ready"
+ - "disconnected"
+ example: "ready"
+
+ ManagerStatus:
+ description: |
+ ManagerStatus represents the status of a manager.
+
+ It provides the current status of a node's manager component, if the node
+ is a manager.
+ x-nullable: true
+ type: "object"
+ properties:
+ Leader:
+ type: "boolean"
+ default: false
+ example: true
+ Reachability:
+ $ref: "#/definitions/Reachability"
+ Addr:
+ description: |
+ The IP address and port at which the manager is reachable.
+ type: "string"
+ example: "10.0.0.46:2377"
+
+ Reachability:
+ description: "Reachability represents the reachability of a node."
+ type: "string"
+ enum:
+ - "unknown"
+ - "unreachable"
+ - "reachable"
+ example: "reachable"
+
+ SwarmSpec:
+ description: "User modifiable swarm configuration."
+ type: "object"
+ properties:
+ Name:
+ description: "Name of the swarm."
+ type: "string"
+ example: "default"
+ Labels:
+ description: "User-defined key/value metadata."
+ type: "object"
+ additionalProperties:
+ type: "string"
+ example:
+ com.example.corp.type: "production"
+ com.example.corp.department: "engineering"
+ Orchestration:
+ description: "Orchestration configuration."
+ type: "object"
+ x-nullable: true
+ properties:
+ TaskHistoryRetentionLimit:
+ description: "The number of historic tasks to keep per instance or node. If negative, never remove completed or failed tasks."
+ type: "integer"
+ format: "int64"
+ example: 10
+ Raft:
+ description: "Raft configuration."
+ type: "object"
+ properties:
+ SnapshotInterval:
+ description: "The number of log entries between snapshots."
+ type: "integer"
+ format: "uint64"
+ example: 10000
+ KeepOldSnapshots:
+ description: "The number of snapshots to keep beyond the current snapshot."
+ type: "integer"
+ format: "uint64"
+ LogEntriesForSlowFollowers:
+ description: "The number of log entries to keep around to sync up slow followers after a snapshot is created."
+ type: "integer"
+ format: "uint64"
+ example: 500
+ ElectionTick:
+ description: |
+ The number of ticks that a follower will wait for a message from the leader before becoming a candidate and starting an election. `ElectionTick` must be greater than `HeartbeatTick`.
+
+ A tick currently defaults to one second, so these translate directly to seconds currently, but this is NOT guaranteed.
+ type: "integer"
+ example: 3
+ HeartbeatTick:
+ description: |
+ The number of ticks between heartbeats. Every HeartbeatTick ticks, the leader will send a heartbeat to the followers.
+
+ A tick currently defaults to one second, so these translate directly to seconds currently, but this is NOT guaranteed.
+ type: "integer"
+ example: 1
+ Dispatcher:
+ description: "Dispatcher configuration."
+ type: "object"
+ x-nullable: true
+ properties:
+ HeartbeatPeriod:
+ description: "The delay for an agent to send a heartbeat to the dispatcher."
+ type: "integer"
+ format: "int64"
+ example: 5000000000
+ CAConfig:
+ description: "CA configuration."
+ type: "object"
+ x-nullable: true
+ properties:
+ NodeCertExpiry:
+ description: "The duration node certificates are issued for."
+ type: "integer"
+ format: "int64"
+ example: 7776000000000000
+ ExternalCAs:
+ description: "Configuration for forwarding signing requests to an external certificate authority."
+ type: "array"
+ items:
+ type: "object"
+ properties:
+ Protocol:
+ description: "Protocol for communication with the external CA (currently only `cfssl` is supported)."
+ type: "string"
+ enum:
+ - "cfssl"
+ default: "cfssl"
+ URL:
+ description: "URL where certificate signing requests should be sent."
+ type: "string"
+ Options:
+ description: "An object with key/value pairs that are interpreted as protocol-specific options for the external CA driver."
+ type: "object"
+ additionalProperties:
+ type: "string"
+ CACert:
+ description: "The root CA certificate (in PEM format) this external CA uses to issue TLS certificates (assumed to be to the current swarm root CA certificate if not provided)."
+ type: "string"
+ SigningCACert:
+ description: "The desired signing CA certificate for all swarm node TLS leaf certificates, in PEM format."
+ type: "string"
+ SigningCAKey:
+ description: "The desired signing CA key for all swarm node TLS leaf certificates, in PEM format."
+ type: "string"
+ ForceRotate:
+ description: "An integer whose purpose is to force swarm to generate a new signing CA certificate and key, if none have been specified in `SigningCACert` and `SigningCAKey`"
+ format: "uint64"
+ type: "integer"
+ EncryptionConfig:
+ description: "Parameters related to encryption-at-rest."
+ type: "object"
+ properties:
+ AutoLockManagers:
+ description: "If set, generate a key and use it to lock data stored on the managers."
+ type: "boolean"
+ example: false
+ TaskDefaults:
+ description: "Defaults for creating tasks in this cluster."
+ type: "object"
+ properties:
+ LogDriver:
+ description: |
+ The log driver to use for tasks created in the orchestrator if
+ unspecified by a service.
+
+ Updating this value only affects new tasks. Existing tasks continue
+ to use their previously configured log driver until recreated.
+ type: "object"
+ properties:
+ Name:
+ description: |
+ The log driver to use as a default for new tasks.
+ type: "string"
+ example: "json-file"
+ Options:
+ description: |
+ Driver-specific options for the selectd log driver, specified
+ as key/value pairs.
+ type: "object"
+ additionalProperties:
+ type: "string"
+ example:
+ "max-file": "10"
+ "max-size": "100m"
+
+ # The Swarm information for `GET /info`. It is the same as `GET /swarm`, but
+ # without `JoinTokens`.
+ ClusterInfo:
+ description: |
+ ClusterInfo represents information about the swarm as is returned by the
+ "/info" endpoint. Join-tokens are not included.
+ x-nullable: true
+ type: "object"
+ properties:
+ ID:
+ description: "The ID of the swarm."
+ type: "string"
+ example: "abajmipo7b4xz5ip2nrla6b11"
+ Version:
+ $ref: "#/definitions/ObjectVersion"
+ CreatedAt:
+ description: |
+ Date and time at which the swarm was initialised in
+ [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds.
+ type: "string"
+ format: "dateTime"
+ example: "2016-08-18T10:44:24.496525531Z"
+ UpdatedAt:
+ description: |
+ Date and time at which the swarm was last updated in
+ [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds.
+ type: "string"
+ format: "dateTime"
+ example: "2017-08-09T07:09:37.632105588Z"
+ Spec:
+ $ref: "#/definitions/SwarmSpec"
+ TLSInfo:
+ $ref: "#/definitions/TLSInfo"
+ RootRotationInProgress:
+ description: "Whether there is currently a root CA rotation in progress for the swarm"
+ type: "boolean"
+ example: false
+
+ JoinTokens:
+ description: |
+ JoinTokens contains the tokens workers and managers need to join the swarm.
+ type: "object"
+ properties:
+ Worker:
+ description: |
+ The token workers can use to join the swarm.
+ type: "string"
+ example: "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-1awxwuwd3z9j1z3puu7rcgdbx"
+ Manager:
+ description: |
+ The token managers can use to join the swarm.
+ type: "string"
+ example: "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2"
+
+ Swarm:
+ type: "object"
+ allOf:
+ - $ref: "#/definitions/ClusterInfo"
+ - type: "object"
+ properties:
+ JoinTokens:
+ $ref: "#/definitions/JoinTokens"
+
+ TaskSpec:
+ description: "User modifiable task configuration."
+ type: "object"
+ properties:
+ PluginSpec:
+ type: "object"
+ description: "Invalid when specified with `ContainerSpec`. *(Experimental release only.)*"
+ properties:
+ Name:
+ description: "The name or 'alias' to use for the plugin."
+ type: "string"
+ Remote:
+ description: "The plugin image reference to use."
+ type: "string"
+ Disabled:
+ description: "Disable the plugin once scheduled."
+ type: "boolean"
+ PluginPrivilege:
+ type: "array"
+ items:
+ description: "Describes a permission accepted by the user upon installing the plugin."
+ type: "object"
+ properties:
+ Name:
+ type: "string"
+ Description:
+ type: "string"
+ Value:
+ type: "array"
+ items:
+ type: "string"
+ ContainerSpec:
+ type: "object"
+ description: "Invalid when specified with `PluginSpec`."
+ properties:
+ Image:
+ description: "The image name to use for the container"
+ type: "string"
+ Labels:
+ description: "User-defined key/value data."
+ type: "object"
+ additionalProperties:
+ type: "string"
+ Command:
+ description: "The command to be run in the image."
+ type: "array"
+ items:
+ type: "string"
+ Args:
+ description: "Arguments to the command."
+ type: "array"
+ items:
+ type: "string"
+ Hostname:
+ description: "The hostname to use for the container, as a valid RFC 1123 hostname."
+ type: "string"
+ Env:
+ description: "A list of environment variables in the form `VAR=value`."
+ type: "array"
+ items:
+ type: "string"
+ Dir:
+ description: "The working directory for commands to run in."
+ type: "string"
+ User:
+ description: "The user inside the container."
+ type: "string"
+ Groups:
+ type: "array"
+ description: "A list of additional groups that the container process will run as."
+ items:
+ type: "string"
+ Privileges:
+ type: "object"
+ description: "Security options for the container"
+ properties:
+ CredentialSpec:
+ type: "object"
+ description: "CredentialSpec for managed service account (Windows only)"
+ properties:
+ File:
+ type: "string"
+ description: |
+ Load credential spec from this file. The file is read by the daemon, and must be present in the
+ `CredentialSpecs` subdirectory in the docker data directory, which defaults to
+ `C:\ProgramData\Docker\` on Windows.
+
+ For example, specifying `spec.json` loads `C:\ProgramData\Docker\CredentialSpecs\spec.json`.
+
+ <p><br /></p>
+
+ > **Note**: `CredentialSpec.File` and `CredentialSpec.Registry` are mutually exclusive.
+ Registry:
+ type: "string"
+ description: |
+ Load credential spec from this value in the Windows registry. The specified registry value must be
+ located in:
+
+ `HKLM\SOFTWARE\Microsoft\Windows NT\CurrentVersion\Virtualization\Containers\CredentialSpecs`
+
+ <p><br /></p>
+
+
+ > **Note**: `CredentialSpec.File` and `CredentialSpec.Registry` are mutually exclusive.
+ SELinuxContext:
+ type: "object"
+ description: "SELinux labels of the container"
+ properties:
+ Disable:
+ type: "boolean"
+ description: "Disable SELinux"
+ User:
+ type: "string"
+ description: "SELinux user label"
+ Role:
+ type: "string"
+ description: "SELinux role label"
+ Type:
+ type: "string"
+ description: "SELinux type label"
+ Level:
+ type: "string"
+ description: "SELinux level label"
+ TTY:
+ description: "Whether a pseudo-TTY should be allocated."
+ type: "boolean"
+ OpenStdin:
+ description: "Open `stdin`"
+ type: "boolean"
+ ReadOnly:
+ description: "Mount the container's root filesystem as read only."
+ type: "boolean"
+ Mounts:
+ description: "Specification for mounts to be added to containers created as part of the service."
+ type: "array"
+ items:
+ $ref: "#/definitions/Mount"
+ StopSignal:
+ description: "Signal to stop the container."
+ type: "string"
+ StopGracePeriod:
+ description: "Amount of time to wait for the container to terminate before forcefully killing it."
+ type: "integer"
+ format: "int64"
+ HealthCheck:
+ $ref: "#/definitions/HealthConfig"
+ Hosts:
+ type: "array"
+ description: |
+ A list of hostname/IP mappings to add to the container's `hosts`
+ file. The format of extra hosts is specified in the
+ [hosts(5)](http://man7.org/linux/man-pages/man5/hosts.5.html)
+ man page:
+
+ IP_address canonical_hostname [aliases...]
+ items:
+ type: "string"
+ DNSConfig:
+ description: "Specification for DNS related configurations in resolver configuration file (`resolv.conf`)."
+ type: "object"
+ properties:
+ Nameservers:
+ description: "The IP addresses of the name servers."
+ type: "array"
+ items:
+ type: "string"
+ Search:
+ description: "A search list for host-name lookup."
+ type: "array"
+ items:
+ type: "string"
+ Options:
+ description: "A list of internal resolver variables to be modified (e.g., `debug`, `ndots:3`, etc.)."
+ type: "array"
+ items:
+ type: "string"
+ Secrets:
+ description: "Secrets contains references to zero or more secrets that will be exposed to the service."
+ type: "array"
+ items:
+ type: "object"
+ properties:
+ File:
+ description: "File represents a specific target that is backed by a file."
+ type: "object"
+ properties:
+ Name:
+ description: "Name represents the final filename in the filesystem."
+ type: "string"
+ UID:
+ description: "UID represents the file UID."
+ type: "string"
+ GID:
+ description: "GID represents the file GID."
+ type: "string"
+ Mode:
+ description: "Mode represents the FileMode of the file."
+ type: "integer"
+ format: "uint32"
+ SecretID:
+ description: "SecretID represents the ID of the specific secret that we're referencing."
+ type: "string"
+ SecretName:
+ description: |
+ SecretName is the name of the secret that this references, but this is just provided for
+ lookup/display purposes. The secret in the reference will be identified by its ID.
+ type: "string"
+ Configs:
+ description: "Configs contains references to zero or more configs that will be exposed to the service."
+ type: "array"
+ items:
+ type: "object"
+ properties:
+ File:
+ description: "File represents a specific target that is backed by a file."
+ type: "object"
+ properties:
+ Name:
+ description: "Name represents the final filename in the filesystem."
+ type: "string"
+ UID:
+ description: "UID represents the file UID."
+ type: "string"
+ GID:
+ description: "GID represents the file GID."
+ type: "string"
+ Mode:
+ description: "Mode represents the FileMode of the file."
+ type: "integer"
+ format: "uint32"
+ ConfigID:
+ description: "ConfigID represents the ID of the specific config that we're referencing."
+ type: "string"
+ ConfigName:
+ description: |
+ ConfigName is the name of the config that this references, but this is just provided for
+ lookup/display purposes. The config in the reference will be identified by its ID.
+ type: "string"
+
+ Resources:
+ description: "Resource requirements which apply to each individual container created as part of the service."
+ type: "object"
+ properties:
+ Limits:
+ description: "Define resources limits."
+ $ref: "#/definitions/ResourceObject"
+ Reservation:
+ description: "Define resources reservation."
+ $ref: "#/definitions/ResourceObject"
+ RestartPolicy:
+ description: "Specification for the restart policy which applies to containers created as part of this service."
+ type: "object"
+ properties:
+ Condition:
+ description: "Condition for restart."
+ type: "string"
+ enum:
+ - "none"
+ - "on-failure"
+ - "any"
+ Delay:
+ description: "Delay between restart attempts."
+ type: "integer"
+ format: "int64"
+ MaxAttempts:
+ description: "Maximum attempts to restart a given container before giving up (default value is 0, which is ignored)."
+ type: "integer"
+ format: "int64"
+ default: 0
+ Window:
+ description: "Windows is the time window used to evaluate the restart policy (default value is 0, which is unbounded)."
+ type: "integer"
+ format: "int64"
+ default: 0
+ Placement:
+ type: "object"
+ properties:
+ Constraints:
+ description: "An array of constraints."
+ type: "array"
+ items:
+ type: "string"
+ example:
+ - "node.hostname!=node3.corp.example.com"
+ - "node.role!=manager"
+ - "node.labels.type==production"
+ Preferences:
+ description: "Preferences provide a way to make the scheduler aware of factors such as topology. They are provided in order from highest to lowest precedence."
+ type: "array"
+ items:
+ type: "object"
+ properties:
+ Spread:
+ type: "object"
+ properties:
+ SpreadDescriptor:
+ description: "label descriptor, such as engine.labels.az"
+ type: "string"
+ example:
+ - Spread:
+ SpreadDescriptor: "node.labels.datacenter"
+ - Spread:
+ SpreadDescriptor: "node.labels.rack"
+ Platforms:
+ description: |
+ Platforms stores all the platforms that the service's image can
+ run on. This field is used in the platform filter for scheduling.
+ If empty, then the platform filter is off, meaning there are no
+ scheduling restrictions.
+ type: "array"
+ items:
+ $ref: "#/definitions/Platform"
+ ForceUpdate:
+ description: "A counter that triggers an update even if no relevant parameters have been changed."
+ type: "integer"
+ Runtime:
+ description: "Runtime is the type of runtime specified for the task executor."
+ type: "string"
+ Networks:
+ type: "array"
+ items:
+ type: "object"
+ properties:
+ Target:
+ type: "string"
+ Aliases:
+ type: "array"
+ items:
+ type: "string"
+ LogDriver:
+ description: "Specifies the log driver to use for tasks created from this spec. If not present, the default one for the swarm will be used, finally falling back to the engine default if not specified."
+ type: "object"
+ properties:
+ Name:
+ type: "string"
+ Options:
+ type: "object"
+ additionalProperties:
+ type: "string"
+
+ TaskState:
+ type: "string"
+ enum:
+ - "new"
+ - "allocated"
+ - "pending"
+ - "assigned"
+ - "accepted"
+ - "preparing"
+ - "ready"
+ - "starting"
+ - "running"
+ - "complete"
+ - "shutdown"
+ - "failed"
+ - "rejected"
+
+ Task:
+ type: "object"
+ properties:
+ ID:
+ description: "The ID of the task."
+ type: "string"
+ Version:
+ $ref: "#/definitions/ObjectVersion"
+ CreatedAt:
+ type: "string"
+ format: "dateTime"
+ UpdatedAt:
+ type: "string"
+ format: "dateTime"
+ Name:
+ description: "Name of the task."
+ type: "string"
+ Labels:
+ description: "User-defined key/value metadata."
+ type: "object"
+ additionalProperties:
+ type: "string"
+ Spec:
+ $ref: "#/definitions/TaskSpec"
+ ServiceID:
+ description: "The ID of the service this task is part of."
+ type: "string"
+ Slot:
+ type: "integer"
+ NodeID:
+ description: "The ID of the node that this task is on."
+ type: "string"
+ AssignedGenericResources:
+ $ref: "#/definitions/GenericResources"
+ Status:
+ type: "object"
+ properties:
+ Timestamp:
+ type: "string"
+ format: "dateTime"
+ State:
+ $ref: "#/definitions/TaskState"
+ Message:
+ type: "string"
+ Err:
+ type: "string"
+ ContainerStatus:
+ type: "object"
+ properties:
+ ContainerID:
+ type: "string"
+ PID:
+ type: "integer"
+ ExitCode:
+ type: "integer"
+ DesiredState:
+ $ref: "#/definitions/TaskState"
+ example:
+ ID: "0kzzo1i0y4jz6027t0k7aezc7"
+ Version:
+ Index: 71
+ CreatedAt: "2016-06-07T21:07:31.171892745Z"
+ UpdatedAt: "2016-06-07T21:07:31.376370513Z"
+ Spec:
+ ContainerSpec:
+ Image: "redis"
+ Resources:
+ Limits: {}
+ Reservations: {}
+ RestartPolicy:
+ Condition: "any"
+ MaxAttempts: 0
+ Placement: {}
+ ServiceID: "9mnpnzenvg8p8tdbtq4wvbkcz"
+ Slot: 1
+ NodeID: "60gvrl6tm78dmak4yl7srz94v"
+ Status:
+ Timestamp: "2016-06-07T21:07:31.290032978Z"
+ State: "running"
+ Message: "started"
+ ContainerStatus:
+ ContainerID: "e5d62702a1b48d01c3e02ca1e0212a250801fa8d67caca0b6f35919ebc12f035"
+ PID: 677
+ DesiredState: "running"
+ NetworksAttachments:
+ - Network:
+ ID: "4qvuz4ko70xaltuqbt8956gd1"
+ Version:
+ Index: 18
+ CreatedAt: "2016-06-07T20:31:11.912919752Z"
+ UpdatedAt: "2016-06-07T21:07:29.955277358Z"
+ Spec:
+ Name: "ingress"
+ Labels:
+ com.docker.swarm.internal: "true"
+ DriverConfiguration: {}
+ IPAMOptions:
+ Driver: {}
+ Configs:
+ - Subnet: "10.255.0.0/16"
+ Gateway: "10.255.0.1"
+ DriverState:
+ Name: "overlay"
+ Options:
+ com.docker.network.driver.overlay.vxlanid_list: "256"
+ IPAMOptions:
+ Driver:
+ Name: "default"
+ Configs:
+ - Subnet: "10.255.0.0/16"
+ Gateway: "10.255.0.1"
+ Addresses:
+ - "10.255.0.10/16"
+ AssignedGenericResources:
+ - DiscreteResourceSpec:
+ Kind: "SSD"
+ Value: 3
+ - NamedResourceSpec:
+ Kind: "GPU"
+ Value: "UUID1"
+ - NamedResourceSpec:
+ Kind: "GPU"
+ Value: "UUID2"
+
+ ServiceSpec:
+ description: "User modifiable configuration for a service."
+ properties:
+ Name:
+ description: "Name of the service."
+ type: "string"
+ Labels:
+ description: "User-defined key/value metadata."
+ type: "object"
+ additionalProperties:
+ type: "string"
+ TaskTemplate:
+ $ref: "#/definitions/TaskSpec"
+ Mode:
+ description: "Scheduling mode for the service."
+ type: "object"
+ properties:
+ Replicated:
+ type: "object"
+ properties:
+ Replicas:
+ type: "integer"
+ format: "int64"
+ Global:
+ type: "object"
+ UpdateConfig:
+ description: "Specification for the update strategy of the service."
+ type: "object"
+ properties:
+ Parallelism:
+ description: "Maximum number of tasks to be updated in one iteration (0 means unlimited parallelism)."
+ type: "integer"
+ format: "int64"
+ Delay:
+ description: "Amount of time between updates, in nanoseconds."
+ type: "integer"
+ format: "int64"
+ FailureAction:
+ description: "Action to take if an updated task fails to run, or stops running during the update."
+ type: "string"
+ enum:
+ - "continue"
+ - "pause"
+ - "rollback"
+ Monitor:
+ description: "Amount of time to monitor each updated task for failures, in nanoseconds."
+ type: "integer"
+ format: "int64"
+ MaxFailureRatio:
+ description: "The fraction of tasks that may fail during an update before the failure action is invoked, specified as a floating point number between 0 and 1."
+ type: "number"
+ default: 0
+ Order:
+ description: "The order of operations when rolling out an updated task. Either the old task is shut down before the new task is started, or the new task is started before the old task is shut down."
+ type: "string"
+ enum:
+ - "stop-first"
+ - "start-first"
+ RollbackConfig:
+ description: "Specification for the rollback strategy of the service."
+ type: "object"
+ properties:
+ Parallelism:
+ description: "Maximum number of tasks to be rolled back in one iteration (0 means unlimited parallelism)."
+ type: "integer"
+ format: "int64"
+ Delay:
+ description: "Amount of time between rollback iterations, in nanoseconds."
+ type: "integer"
+ format: "int64"
+ FailureAction:
+ description: "Action to take if an rolled back task fails to run, or stops running during the rollback."
+ type: "string"
+ enum:
+ - "continue"
+ - "pause"
+ Monitor:
+ description: "Amount of time to monitor each rolled back task for failures, in nanoseconds."
+ type: "integer"
+ format: "int64"
+ MaxFailureRatio:
+ description: "The fraction of tasks that may fail during a rollback before the failure action is invoked, specified as a floating point number between 0 and 1."
+ type: "number"
+ default: 0
+ Order:
+ description: "The order of operations when rolling back a task. Either the old task is shut down before the new task is started, or the new task is started before the old task is shut down."
+ type: "string"
+ enum:
+ - "stop-first"
+ - "start-first"
+ Networks:
+ description: "Array of network names or IDs to attach the service to."
+ type: "array"
+ items:
+ type: "object"
+ properties:
+ Target:
+ type: "string"
+ Aliases:
+ type: "array"
+ items:
+ type: "string"
+ EndpointSpec:
+ $ref: "#/definitions/EndpointSpec"
+
+ EndpointPortConfig:
+ type: "object"
+ properties:
+ Name:
+ type: "string"
+ Protocol:
+ type: "string"
+ enum:
+ - "tcp"
+ - "udp"
+ TargetPort:
+ description: "The port inside the container."
+ type: "integer"
+ PublishedPort:
+ description: "The port on the swarm hosts."
+ type: "integer"
+
+ EndpointSpec:
+ description: "Properties that can be configured to access and load balance a service."
+ type: "object"
+ properties:
+ Mode:
+ description: "The mode of resolution to use for internal load balancing
+ between tasks."
+ type: "string"
+ enum:
+ - "vip"
+ - "dnsrr"
+ default: "vip"
+ Ports:
+ description: "List of exposed ports that this service is accessible on from the outside. Ports can only be provided if `vip` resolution mode is used."
+ type: "array"
+ items:
+ $ref: "#/definitions/EndpointPortConfig"
+
+ Service:
+ type: "object"
+ properties:
+ ID:
+ type: "string"
+ Version:
+ $ref: "#/definitions/ObjectVersion"
+ CreatedAt:
+ type: "string"
+ format: "dateTime"
+ UpdatedAt:
+ type: "string"
+ format: "dateTime"
+ Spec:
+ $ref: "#/definitions/ServiceSpec"
+ Endpoint:
+ type: "object"
+ properties:
+ Spec:
+ $ref: "#/definitions/EndpointSpec"
+ Ports:
+ type: "array"
+ items:
+ $ref: "#/definitions/EndpointPortConfig"
+ VirtualIPs:
+ type: "array"
+ items:
+ type: "object"
+ properties:
+ NetworkID:
+ type: "string"
+ Addr:
+ type: "string"
+ UpdateStatus:
+ description: "The status of a service update."
+ type: "object"
+ properties:
+ State:
+ type: "string"
+ enum:
+ - "updating"
+ - "paused"
+ - "completed"
+ StartedAt:
+ type: "string"
+ format: "dateTime"
+ CompletedAt:
+ type: "string"
+ format: "dateTime"
+ Message:
+ type: "string"
+ example:
+ ID: "9mnpnzenvg8p8tdbtq4wvbkcz"
+ Version:
+ Index: 19
+ CreatedAt: "2016-06-07T21:05:51.880065305Z"
+ UpdatedAt: "2016-06-07T21:07:29.962229872Z"
+ Spec:
+ Name: "hopeful_cori"
+ TaskTemplate:
+ ContainerSpec:
+ Image: "redis"
+ Resources:
+ Limits: {}
+ Reservations: {}
+ RestartPolicy:
+ Condition: "any"
+ MaxAttempts: 0
+ Placement: {}
+ ForceUpdate: 0
+ Mode:
+ Replicated:
+ Replicas: 1
+ UpdateConfig:
+ Parallelism: 1
+ Delay: 1000000000
+ FailureAction: "pause"
+ Monitor: 15000000000
+ MaxFailureRatio: 0.15
+ RollbackConfig:
+ Parallelism: 1
+ Delay: 1000000000
+ FailureAction: "pause"
+ Monitor: 15000000000
+ MaxFailureRatio: 0.15
+ EndpointSpec:
+ Mode: "vip"
+ Ports:
+ -
+ Protocol: "tcp"
+ TargetPort: 6379
+ PublishedPort: 30001
+ Endpoint:
+ Spec:
+ Mode: "vip"
+ Ports:
+ -
+ Protocol: "tcp"
+ TargetPort: 6379
+ PublishedPort: 30001
+ Ports:
+ -
+ Protocol: "tcp"
+ TargetPort: 6379
+ PublishedPort: 30001
+ VirtualIPs:
+ -
+ NetworkID: "4qvuz4ko70xaltuqbt8956gd1"
+ Addr: "10.255.0.2/16"
+ -
+ NetworkID: "4qvuz4ko70xaltuqbt8956gd1"
+ Addr: "10.255.0.3/16"
+
+ ImageDeleteResponseItem:
+ type: "object"
+ properties:
+ Untagged:
+ description: "The image ID of an image that was untagged"
+ type: "string"
+ Deleted:
+ description: "The image ID of an image that was deleted"
+ type: "string"
+
+ ServiceUpdateResponse:
+ type: "object"
+ properties:
+ Warnings:
+ description: "Optional warning messages"
+ type: "array"
+ items:
+ type: "string"
+ example:
+ Warning: "unable to pin image doesnotexist:latest to digest: image library/doesnotexist:latest not found"
+
+ ContainerSummary:
+ type: "array"
+ items:
+ type: "object"
+ properties:
+ Id:
+ description: "The ID of this container"
+ type: "string"
+ x-go-name: "ID"
+ Names:
+ description: "The names that this container has been given"
+ type: "array"
+ items:
+ type: "string"
+ Image:
+ description: "The name of the image used when creating this container"
+ type: "string"
+ ImageID:
+ description: "The ID of the image that this container was created from"
+ type: "string"
+ Command:
+ description: "Command to run when starting the container"
+ type: "string"
+ Created:
+ description: "When the container was created"
+ type: "integer"
+ format: "int64"
+ Ports:
+ description: "The ports exposed by this container"
+ type: "array"
+ items:
+ $ref: "#/definitions/Port"
+ SizeRw:
+ description: "The size of files that have been created or changed by this container"
+ type: "integer"
+ format: "int64"
+ SizeRootFs:
+ description: "The total size of all the files in this container"
+ type: "integer"
+ format: "int64"
+ Labels:
+ description: "User-defined key/value metadata."
+ type: "object"
+ additionalProperties:
+ type: "string"
+ State:
+ description: "The state of this container (e.g. `Exited`)"
+ type: "string"
+ Status:
+ description: "Additional human-readable status of this container (e.g. `Exit 0`)"
+ type: "string"
+ HostConfig:
+ type: "object"
+ properties:
+ NetworkMode:
+ type: "string"
+ NetworkSettings:
+ description: "A summary of the container's network settings"
+ type: "object"
+ properties:
+ Networks:
+ type: "object"
+ additionalProperties:
+ $ref: "#/definitions/EndpointSettings"
+ Mounts:
+ type: "array"
+ items:
+ $ref: "#/definitions/Mount"
+
+ Driver:
+ description: "Driver represents a driver (network, logging, secrets)."
+ type: "object"
+ required: [Name]
+ properties:
+ Name:
+ description: "Name of the driver."
+ type: "string"
+ x-nullable: false
+ example: "some-driver"
+ Options:
+ description: "Key/value map of driver-specific options."
+ type: "object"
+ x-nullable: false
+ additionalProperties:
+ type: "string"
+ example:
+ OptionA: "value for driver-specific option A"
+ OptionB: "value for driver-specific option B"
+
+ SecretSpec:
+ type: "object"
+ properties:
+ Name:
+ description: "User-defined name of the secret."
+ type: "string"
+ Labels:
+ description: "User-defined key/value metadata."
+ type: "object"
+ additionalProperties:
+ type: "string"
+ example:
+ com.example.some-label: "some-value"
+ com.example.some-other-label: "some-other-value"
+ Data:
+ description: |
+ Base64-url-safe-encoded ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-3.2))
+ data to store as secret.
+
+ This field is only used to _create_ a secret, and is not returned by
+ other endpoints.
+ type: "string"
+ example: ""
+ Driver:
+ description: "Name of the secrets driver used to fetch the secret's value from an external secret store"
+ $ref: "#/definitions/Driver"
+
+ Secret:
+ type: "object"
+ properties:
+ ID:
+ type: "string"
+ example: "blt1owaxmitz71s9v5zh81zun"
+ Version:
+ $ref: "#/definitions/ObjectVersion"
+ CreatedAt:
+ type: "string"
+ format: "dateTime"
+ example: "2017-07-20T13:55:28.678958722Z"
+ UpdatedAt:
+ type: "string"
+ format: "dateTime"
+ example: "2017-07-20T13:55:28.678958722Z"
+ Spec:
+ $ref: "#/definitions/SecretSpec"
+
+ ConfigSpec:
+ type: "object"
+ properties:
+ Name:
+ description: "User-defined name of the config."
+ type: "string"
+ Labels:
+ description: "User-defined key/value metadata."
+ type: "object"
+ additionalProperties:
+ type: "string"
+ Data:
+ description: |
+ Base64-url-safe-encoded ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-3.2))
+ config data.
+ type: "string"
+
+ Config:
+ type: "object"
+ properties:
+ ID:
+ type: "string"
+ Version:
+ $ref: "#/definitions/ObjectVersion"
+ CreatedAt:
+ type: "string"
+ format: "dateTime"
+ UpdatedAt:
+ type: "string"
+ format: "dateTime"
+ Spec:
+ $ref: "#/definitions/ConfigSpec"
+
+ SystemInfo:
+ type: "object"
+ properties:
+ ID:
+ description: |
+ Unique identifier of the daemon.
+
+ <p><br /></p>
+
+ > **Note**: The format of the ID itself is not part of the API, and
+ > should not be considered stable.
+ type: "string"
+ example: "7TRN:IPZB:QYBB:VPBQ:UMPP:KARE:6ZNR:XE6T:7EWV:PKF4:ZOJD:TPYS"
+ Containers:
+ description: "Total number of containers on the host."
+ type: "integer"
+ example: 14
+ ContainersRunning:
+ description: |
+ Number of containers with status `"running"`.
+ type: "integer"
+ example: 3
+ ContainersPaused:
+ description: |
+ Number of containers with status `"paused"`.
+ type: "integer"
+ example: 1
+ ContainersStopped:
+ description: |
+ Number of containers with status `"stopped"`.
+ type: "integer"
+ example: 10
+ Images:
+ description: |
+ Total number of images on the host.
+
+ Both _tagged_ and _untagged_ (dangling) images are counted.
+ type: "integer"
+ example: 508
+ Driver:
+ description: "Name of the storage driver in use."
+ type: "string"
+ example: "overlay2"
+ DriverStatus:
+ description: |
+ Information specific to the storage driver, provided as
+ "label" / "value" pairs.
+
+ This information is provided by the storage driver, and formatted
+ in a way consistent with the output of `docker info` on the command
+ line.
+
+ <p><br /></p>
+
+ > **Note**: The information returned in this field, including the
+ > formatting of values and labels, should not be considered stable,
+ > and may change without notice.
+ type: "array"
+ items:
+ type: "array"
+ items:
+ type: "string"
+ example:
+ - ["Backing Filesystem", "extfs"]
+ - ["Supports d_type", "true"]
+ - ["Native Overlay Diff", "true"]
+ DockerRootDir:
+ description: |
+ Root directory of persistent Docker state.
+
+ Defaults to `/var/lib/docker` on Linux, and `C:\ProgramData\docker`
+ on Windows.
+ type: "string"
+ example: "/var/lib/docker"
+ SystemStatus:
+ description: |
+ Status information about this node (standalone Swarm API).
+
+ <p><br /></p>
+
+ > **Note**: The information returned in this field is only propagated
+ > by the Swarm standalone API, and is empty (`null`) when using
+ > built-in swarm mode.
+ type: "array"
+ items:
+ type: "array"
+ items:
+ type: "string"
+ example:
+ - ["Role", "primary"]
+ - ["State", "Healthy"]
+ - ["Strategy", "spread"]
+ - ["Filters", "health, port, containerslots, dependency, affinity, constraint, whitelist"]
+ - ["Nodes", "2"]
+ - [" swarm-agent-00", "192.168.99.102:2376"]
+ - [" └ ID", "5CT6:FBGO:RVGO:CZL4:PB2K:WCYN:2JSV:KSHH:GGFW:QOPG:6J5Q:IOZ2|192.168.99.102:2376"]
+ - [" └ Status", "Healthy"]
+ - [" └ Containers", "1 (1 Running, 0 Paused, 0 Stopped)"]
+ - [" └ Reserved CPUs", "0 / 1"]
+ - [" └ Reserved Memory", "0 B / 1.021 GiB"]
+ - [" └ Labels", "kernelversion=4.4.74-boot2docker, operatingsystem=Boot2Docker 17.06.0-ce (TCL 7.2); HEAD : 0672754 - Thu Jun 29 00:06:31 UTC 2017, ostype=linux, provider=virtualbox, storagedriver=aufs"]
+ - [" └ UpdatedAt", "2017-08-09T10:03:46Z"]
+ - [" └ ServerVersion", "17.06.0-ce"]
+ - [" swarm-manager", "192.168.99.101:2376"]
+ - [" └ ID", "TAMD:7LL3:SEF7:LW2W:4Q2X:WVFH:RTXX:JSYS:XY2P:JEHL:ZMJK:JGIW|192.168.99.101:2376"]
+ - [" └ Status", "Healthy"]
+ - [" └ Containers", "2 (2 Running, 0 Paused, 0 Stopped)"]
+ - [" └ Reserved CPUs", "0 / 1"]
+ - [" └ Reserved Memory", "0 B / 1.021 GiB"]
+ - [" └ Labels", "kernelversion=4.4.74-boot2docker, operatingsystem=Boot2Docker 17.06.0-ce (TCL 7.2); HEAD : 0672754 - Thu Jun 29 00:06:31 UTC 2017, ostype=linux, provider=virtualbox, storagedriver=aufs"]
+ - [" └ UpdatedAt", "2017-08-09T10:04:11Z"]
+ - [" └ ServerVersion", "17.06.0-ce"]
+ Plugins:
+ $ref: "#/definitions/PluginsInfo"
+ MemoryLimit:
+ description: "Indicates if the host has memory limit support enabled."
+ type: "boolean"
+ example: true
+ SwapLimit:
+ description: "Indicates if the host has memory swap limit support enabled."
+ type: "boolean"
+ example: true
+ KernelMemory:
+ description: "Indicates if the host has kernel memory limit support enabled."
+ type: "boolean"
+ example: true
+ CpuCfsPeriod:
+ description: "Indicates if CPU CFS(Completely Fair Scheduler) period is supported by the host."
+ type: "boolean"
+ example: true
+ CpuCfsQuota:
+ description: "Indicates if CPU CFS(Completely Fair Scheduler) quota is supported by the host."
+ type: "boolean"
+ example: true
+ CPUShares:
+ description: "Indicates if CPU Shares limiting is supported by the host."
+ type: "boolean"
+ example: true
+ CPUSet:
+ description: |
+ Indicates if CPUsets (cpuset.cpus, cpuset.mems) are supported by the host.
+
+ See [cpuset(7)](https://www.kernel.org/doc/Documentation/cgroup-v1/cpusets.txt)
+ type: "boolean"
+ example: true
+ OomKillDisable:
+ description: "Indicates if OOM killer disable is supported on the host."
+ type: "boolean"
+ IPv4Forwarding:
+ description: "Indicates IPv4 forwarding is enabled."
+ type: "boolean"
+ example: true
+ BridgeNfIptables:
+ description: "Indicates if `bridge-nf-call-iptables` is available on the host."
+ type: "boolean"
+ example: true
+ BridgeNfIp6tables:
+ description: "Indicates if `bridge-nf-call-ip6tables` is available on the host."
+ type: "boolean"
+ example: true
+ Debug:
+ description: "Indicates if the daemon is running in debug-mode / with debug-level logging enabled."
+ type: "boolean"
+ example: true
+ NFd:
+ description: |
+ The total number of file Descriptors in use by the daemon process.
+
+ This information is only returned if debug-mode is enabled.
+ type: "integer"
+ example: 64
+ NGoroutines:
+ description: |
+ The number of goroutines that currently exist.
+
+ This information is only returned if debug-mode is enabled.
+ type: "integer"
+ example: 174
+ SystemTime:
+ description: |
+ Current system-time in [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt)
+ format with nano-seconds.
+ type: "string"
+ example: "2017-08-08T20:28:29.06202363Z"
+ LoggingDriver:
+ description: |
+ The logging driver to use as a default for new containers.
+ type: "string"
+ CgroupDriver:
+ description: |
+ The driver to use for managing cgroups.
+ type: "string"
+ enum: ["cgroupfs", "systemd"]
+ default: "cgroupfs"
+ example: "cgroupfs"
+ NEventsListener:
+ description: "Number of event listeners subscribed."
+ type: "integer"
+ example: 30
+ KernelVersion:
+ description: |
+ Kernel version of the host.
+
+ On Linux, this information obtained from `uname`. On Windows this
+ information is queried from the <kbd>HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\</kbd>
+ registry value, for example _"10.0 14393 (14393.1198.amd64fre.rs1_release_sec.170427-1353)"_.
+ type: "string"
+ example: "4.9.38-moby"
+ OperatingSystem:
+ description: |
+ Name of the host's operating system, for example: "Ubuntu 16.04.2 LTS"
+ or "Windows Server 2016 Datacenter"
+ type: "string"
+ example: "Alpine Linux v3.5"
+ OSType:
+ description: |
+ Generic type of the operating system of the host, as returned by the
+ Go runtime (`GOOS`).
+
+ Currently returned values are "linux" and "windows". A full list of
+ possible values can be found in the [Go documentation](https://golang.org/doc/install/source#environment).
+ type: "string"
+ example: "linux"
+ Architecture:
+ description: |
+ Hardware architecture of the host, as returned by the Go runtime
+ (`GOARCH`).
+
+ A full list of possible values can be found in the [Go documentation](https://golang.org/doc/install/source#environment).
+ type: "string"
+ example: "x86_64"
+ NCPU:
+ description: |
+ The number of logical CPUs usable by the daemon.
+
+ The number of available CPUs is checked by querying the operating
+ system when the daemon starts. Changes to operating system CPU
+ allocation after the daemon is started are not reflected.
+ type: "integer"
+ example: 4
+ MemTotal:
+ description: |
+ Total amount of physical memory available on the host, in kilobytes (kB).
+ type: "integer"
+ format: "int64"
+ example: 2095882240
+
+ IndexServerAddress:
+ description: |
+ Address / URL of the index server that is used for image search,
+ and as a default for user authentication for Docker Hub and Docker Cloud.
+ default: "https://index.docker.io/v1/"
+ type: "string"
+ example: "https://index.docker.io/v1/"
+ RegistryConfig:
+ $ref: "#/definitions/RegistryServiceConfig"
+ GenericResources:
+ $ref: "#/definitions/GenericResources"
+ HttpProxy:
+ description: |
+ HTTP-proxy configured for the daemon. This value is obtained from the
+ [`HTTP_PROXY`](https://www.gnu.org/software/wget/manual/html_node/Proxies.html) environment variable.
+
+ Containers do not automatically inherit this configuration.
+ type: "string"
+ example: "http://user:pass@proxy.corp.example.com:8080"
+ HttpsProxy:
+ description: |
+ HTTPS-proxy configured for the daemon. This value is obtained from the
+ [`HTTPS_PROXY`](https://www.gnu.org/software/wget/manual/html_node/Proxies.html) environment variable.
+
+ Containers do not automatically inherit this configuration.
+ type: "string"
+ example: "https://user:pass@proxy.corp.example.com:4443"
+ NoProxy:
+ description: |
+ Comma-separated list of domain extensions for which no proxy should be
+ used. This value is obtained from the [`NO_PROXY`](https://www.gnu.org/software/wget/manual/html_node/Proxies.html)
+ environment variable.
+
+ Containers do not automatically inherit this configuration.
+ type: "string"
+ example: "*.local, 169.254/16"
+ Name:
+ description: "Hostname of the host."
+ type: "string"
+ example: "node5.corp.example.com"
+ Labels:
+ description: |
+ User-defined labels (key/value metadata) as set on the daemon.
+
+ <p><br /></p>
+
+ > **Note**: When part of a Swarm, nodes can both have _daemon_ labels,
+ > set through the daemon configuration, and _node_ labels, set from a
+ > manager node in the Swarm. Node labels are not included in this
+ > field. Node labels can be retrieved using the `/nodes/(id)` endpoint
+ > on a manager node in the Swarm.
+ type: "array"
+ items:
+ type: "string"
+ example: ["storage=ssd", "production"]
+ ExperimentalBuild:
+ description: |
+ Indicates if experimental features are enabled on the daemon.
+ type: "boolean"
+ example: true
+ ServerVersion:
+ description: |
+ Version string of the daemon.
+
+ > **Note**: the [standalone Swarm API](https://docs.docker.com/swarm/swarm-api/)
+ > returns the Swarm version instead of the daemon version, for example
+ > `swarm/1.2.8`.
+ type: "string"
+ example: "17.06.0-ce"
+ ClusterStore:
+ description: |
+ URL of the distributed storage backend.
+
+
+ The storage backend is used for multihost networking (to store
+ network and endpoint information) and by the node discovery mechanism.
+
+ <p><br /></p>
+
+ > **Note**: This field is only propagated when using standalone Swarm
+ > mode, and overlay networking using an external k/v store. Overlay
+ > networks with Swarm mode enabled use the built-in raft store, and
+ > this field will be empty.
+ type: "string"
+ example: "consul://consul.corp.example.com:8600/some/path"
+ ClusterAdvertise:
+ description: |
+ The network endpoint that the Engine advertises for the purpose of
+ node discovery. ClusterAdvertise is a `host:port` combination on which
+ the daemon is reachable by other hosts.
+
+ <p><br /></p>
+
+ > **Note**: This field is only propagated when using standalone Swarm
+ > mode, and overlay networking using an external k/v store. Overlay
+ > networks with Swarm mode enabled use the built-in raft store, and
+ > this field will be empty.
+ type: "string"
+ example: "node5.corp.example.com:8000"
+ Runtimes:
+ description: |
+ List of [OCI compliant](https://github.com/opencontainers/runtime-spec)
+ runtimes configured on the daemon. Keys hold the "name" used to
+ reference the runtime.
+
+ The Docker daemon relies on an OCI compliant runtime (invoked via the
+ `containerd` daemon) as its interface to the Linux kernel namespaces,
+ cgroups, and SELinux.
+
+ The default runtime is `runc`, and automatically configured. Additional
+ runtimes can be configured by the user and will be listed here.
+ type: "object"
+ additionalProperties:
+ $ref: "#/definitions/Runtime"
+ default:
+ runc:
+ path: "docker-runc"
+ example:
+ runc:
+ path: "docker-runc"
+ runc-master:
+ path: "/go/bin/runc"
+ custom:
+ path: "/usr/local/bin/my-oci-runtime"
+ runtimeArgs: ["--debug", "--systemd-cgroup=false"]
+ DefaultRuntime:
+ description: |
+ Name of the default OCI runtime that is used when starting containers.
+
+ The default can be overridden per-container at create time.
+ type: "string"
+ default: "runc"
+ example: "runc"
+ Swarm:
+ $ref: "#/definitions/SwarmInfo"
+ LiveRestoreEnabled:
+ description: |
+ Indicates if live restore is enabled.
+
+ If enabled, containers are kept running when the daemon is shutdown
+ or upon daemon start if running containers are detected.
+ type: "boolean"
+ default: false
+ example: false
+ Isolation:
+ description: |
+ Represents the isolation technology to use as a default for containers.
+ The supported values are platform-specific.
+
+ If no isolation value is specified on daemon start, on Windows client,
+ the default is `hyperv`, and on Windows server, the default is `process`.
+
+ This option is currently not used on other platforms.
+ default: "default"
+ type: "string"
+ enum:
+ - "default"
+ - "hyperv"
+ - "process"
+ InitBinary:
+ description: |
+ Name and, optional, path of the the `docker-init` binary.
+
+ If the path is omitted, the daemon searches the host's `$PATH` for the
+ binary and uses the first result.
+ type: "string"
+ example: "docker-init"
+ ContainerdCommit:
+ $ref: "#/definitions/Commit"
+ RuncCommit:
+ $ref: "#/definitions/Commit"
+ InitCommit:
+ $ref: "#/definitions/Commit"
+ SecurityOptions:
+ description: |
+ List of security features that are enabled on the daemon, such as
+ apparmor, seccomp, SELinux, and user-namespaces (userns).
+
+ Additional configuration options for each security feature may
+ be present, and are included as a comma-separated list of key/value
+ pairs.
+ type: "array"
+ items:
+ type: "string"
+ example:
+ - "name=apparmor"
+ - "name=seccomp,profile=default"
+ - "name=selinux"
+ - "name=userns"
+
+
+ # PluginsInfo is a temp struct holding Plugins name
+ # registered with docker daemon. It is used by Info struct
+ PluginsInfo:
+ description: |
+ Available plugins per type.
+
+ <p><br /></p>
+
+ > **Note**: Only unmanaged (V1) plugins are included in this list.
+ > V1 plugins are "lazily" loaded, and are not returned in this list
+ > if there is no resource using the plugin.
+ type: "object"
+ properties:
+ Volume:
+ description: "Names of available volume-drivers, and network-driver plugins."
+ type: "array"
+ items:
+ type: "string"
+ example: ["local"]
+ Network:
+ description: "Names of available network-drivers, and network-driver plugins."
+ type: "array"
+ items:
+ type: "string"
+ example: ["bridge", "host", "ipvlan", "macvlan", "null", "overlay"]
+ Authorization:
+ description: "Names of available authorization plugins."
+ type: "array"
+ items:
+ type: "string"
+ example: ["img-authz-plugin", "hbm"]
+ Log:
+ description: "Names of available logging-drivers, and logging-driver plugins."
+ type: "array"
+ items:
+ type: "string"
+ example: ["awslogs", "fluentd", "gcplogs", "gelf", "journald", "json-file", "logentries", "splunk", "syslog"]
+
+
+ RegistryServiceConfig:
+ description: |
+ RegistryServiceConfig stores daemon registry services configuration.
+ type: "object"
+ x-nullable: true
+ properties:
+ AllowNondistributableArtifactsCIDRs:
+ description: |
+ List of IP ranges to which nondistributable artifacts can be pushed,
+ using the CIDR syntax [RFC 4632](https://tools.ietf.org/html/4632).
+
+ Some images (for example, Windows base images) contain artifacts
+ whose distribution is restricted by license. When these images are
+ pushed to a registry, restricted artifacts are not included.
+
+ This configuration override this behavior, and enables the daemon to
+ push nondistributable artifacts to all registries whose resolved IP
+ address is within the subnet described by the CIDR syntax.
+
+ This option is useful when pushing images containing
+ nondistributable artifacts to a registry on an air-gapped network so
+ hosts on that network can pull the images without connecting to
+ another server.
+
+ > **Warning**: Nondistributable artifacts typically have restrictions
+ > on how and where they can be distributed and shared. Only use this
+ > feature to push artifacts to private registries and ensure that you
+ > are in compliance with any terms that cover redistributing
+ > nondistributable artifacts.
+
+ type: "array"
+ items:
+ type: "string"
+ example: ["::1/128", "127.0.0.0/8"]
+ AllowNondistributableArtifactsHostnames:
+ description: |
+ List of registry hostnames to which nondistributable artifacts can be
+ pushed, using the format `<hostname>[:<port>]` or `<IP address>[:<port>]`.
+
+ Some images (for example, Windows base images) contain artifacts
+ whose distribution is restricted by license. When these images are
+ pushed to a registry, restricted artifacts are not included.
+
+ This configuration override this behavior for the specified
+ registries.
+
+ This option is useful when pushing images containing
+ nondistributable artifacts to a registry on an air-gapped network so
+ hosts on that network can pull the images without connecting to
+ another server.
+
+ > **Warning**: Nondistributable artifacts typically have restrictions
+ > on how and where they can be distributed and shared. Only use this
+ > feature to push artifacts to private registries and ensure that you
+ > are in compliance with any terms that cover redistributing
+ > nondistributable artifacts.
+ type: "array"
+ items:
+ type: "string"
+ example: ["registry.internal.corp.example.com:3000", "[2001:db8:a0b:12f0::1]:443"]
+ InsecureRegistryCIDRs:
+ description: |
+ List of IP ranges of insecure registries, using the CIDR syntax
+ ([RFC 4632](https://tools.ietf.org/html/4632)). Insecure registries
+ accept un-encrypted (HTTP) and/or untrusted (HTTPS with certificates
+ from unknown CAs) communication.
+
+ By default, local registries (`127.0.0.0/8`) are configured as
+ insecure. All other registries are secure. Communicating with an
+ insecure registry is not possible if the daemon assumes that registry
+ is secure.
+
+ This configuration override this behavior, insecure communication with
+ registries whose resolved IP address is within the subnet described by
+ the CIDR syntax.
+
+ Registries can also be marked insecure by hostname. Those registries
+ are listed under `IndexConfigs` and have their `Secure` field set to
+ `false`.
+
+ > **Warning**: Using this option can be useful when running a local
+ > registry, but introduces security vulnerabilities. This option
+ > should therefore ONLY be used for testing purposes. For increased
+ > security, users should add their CA to their system's list of trusted
+ > CAs instead of enabling this option.
+ type: "array"
+ items:
+ type: "string"
+ example: ["::1/128", "127.0.0.0/8"]
+ IndexConfigs:
+ type: "object"
+ additionalProperties:
+ $ref: "#/definitions/IndexInfo"
+ example:
+ "127.0.0.1:5000":
+ "Name": "127.0.0.1:5000"
+ "Mirrors": []
+ "Secure": false
+ "Official": false
+ "[2001:db8:a0b:12f0::1]:80":
+ "Name": "[2001:db8:a0b:12f0::1]:80"
+ "Mirrors": []
+ "Secure": false
+ "Official": false
+ "docker.io":
+ Name: "docker.io"
+ Mirrors: ["https://hub-mirror.corp.example.com:5000/"]
+ Secure: true
+ Official: true
+ "registry.internal.corp.example.com:3000":
+ Name: "registry.internal.corp.example.com:3000"
+ Mirrors: []
+ Secure: false
+ Official: false
+ Mirrors:
+ description: |
+ List of registry URLs that act as a mirror for the official
+ (`docker.io`) registry.
+
+ type: "array"
+ items:
+ type: "string"
+ example:
+ - "https://hub-mirror.corp.example.com:5000/"
+ - "https://[2001:db8:a0b:12f0::1]/"
+
+ IndexInfo:
+ description:
+ IndexInfo contains information about a registry.
+ type: "object"
+ x-nullable: true
+ properties:
+ Name:
+ description: |
+ Name of the registry, such as "docker.io".
+ type: "string"
+ example: "docker.io"
+ Mirrors:
+ description: |
+ List of mirrors, expressed as URIs.
+ type: "array"
+ items:
+ type: "string"
+ example:
+ - "https://hub-mirror.corp.example.com:5000/"
+ - "https://registry-2.docker.io/"
+ - "https://registry-3.docker.io/"
+ Secure:
+ description: |
+ Indicates if the the registry is part of the list of insecure
+ registries.
+
+ If `false`, the registry is insecure. Insecure registries accept
+ un-encrypted (HTTP) and/or untrusted (HTTPS with certificates from
+ unknown CAs) communication.
+
+ > **Warning**: Insecure registries can be useful when running a local
+ > registry. However, because its use creates security vulnerabilities
+ > it should ONLY be enabled for testing purposes. For increased
+ > security, users should add their CA to their system's list of
+ > trusted CAs instead of enabling this option.
+ type: "boolean"
+ example: true
+ Official:
+ description: |
+ Indicates whether this is an official registry (i.e., Docker Hub / docker.io)
+ type: "boolean"
+ example: true
+
+ Runtime:
+ description: |
+ Runtime describes an [OCI compliant](https://github.com/opencontainers/runtime-spec)
+ runtime.
+
+ The runtime is invoked by the daemon via the `containerd` daemon. OCI
+ runtimes act as an interface to the Linux kernel namespaces, cgroups,
+ and SELinux.
+ type: "object"
+ properties:
+ path:
+ description: |
+ Name and, optional, path, of the OCI executable binary.
+
+ If the path is omitted, the daemon searches the host's `$PATH` for the
+ binary and uses the first result.
+ type: "string"
+ example: "/usr/local/bin/my-oci-runtime"
+ runtimeArgs:
+ description: |
+ List of command-line arguments to pass to the runtime when invoked.
+ type: "array"
+ x-nullable: true
+ items:
+ type: "string"
+ example: ["--debug", "--systemd-cgroup=false"]
+
+ Commit:
+ description: |
+ Commit holds the Git-commit (SHA1) that a binary was built from, as
+ reported in the version-string of external tools, such as `containerd`,
+ or `runC`.
+ type: "object"
+ properties:
+ ID:
+ description: "Actual commit ID of external tool."
+ type: "string"
+ example: "cfb82a876ecc11b5ca0977d1733adbe58599088a"
+ Expected:
+ description: |
+ Commit ID of external tool expected by dockerd as set at build time.
+ type: "string"
+ example: "2d41c047c83e09a6d61d464906feb2a2f3c52aa4"
+
+ SwarmInfo:
+ description: |
+ Represents generic information about swarm.
+ type: "object"
+ properties:
+ NodeID:
+ description: "Unique identifier of for this node in the swarm."
+ type: "string"
+ default: ""
+ example: "k67qz4598weg5unwwffg6z1m1"
+ NodeAddr:
+ description: |
+ IP address at which this node can be reached by other nodes in the
+ swarm.
+ type: "string"
+ default: ""
+ example: "10.0.0.46"
+ LocalNodeState:
+ $ref: "#/definitions/LocalNodeState"
+ ControlAvailable:
+ type: "boolean"
+ default: false
+ example: true
+ Error:
+ type: "string"
+ default: ""
+ RemoteManagers:
+ description: |
+ List of ID's and addresses of other managers in the swarm.
+ type: "array"
+ default: null
+ x-nullable: true
+ items:
+ $ref: "#/definitions/PeerNode"
+ example:
+ - NodeID: "71izy0goik036k48jg985xnds"
+ Addr: "10.0.0.158:2377"
+ - NodeID: "79y6h1o4gv8n120drcprv5nmc"
+ Addr: "10.0.0.159:2377"
+ - NodeID: "k67qz4598weg5unwwffg6z1m1"
+ Addr: "10.0.0.46:2377"
+ Nodes:
+ description: "Total number of nodes in the swarm."
+ type: "integer"
+ x-nullable: true
+ example: 4
+ Managers:
+ description: "Total number of managers in the swarm."
+ type: "integer"
+ x-nullable: true
+ example: 3
+ Cluster:
+ $ref: "#/definitions/ClusterInfo"
+
+ LocalNodeState:
+ description: "Current local status of this node."
+ type: "string"
+ default: ""
+ enum:
+ - ""
+ - "inactive"
+ - "pending"
+ - "active"
+ - "error"
+ - "locked"
+ example: "active"
+
+ PeerNode:
+ description: "Represents a peer-node in the swarm"
+ properties:
+ NodeID:
+ description: "Unique identifier of for this node in the swarm."
+ type: "string"
+ Addr:
+ description: |
+ IP address and ports at which this node can be reached.
+ type: "string"
+
+paths:
+ /containers/json:
+ get:
+ summary: "List containers"
+ description: |
+ Returns a list of containers. For details on the format, see [the inspect endpoint](#operation/ContainerInspect).
+
+ Note that it uses a different, smaller representation of a container than inspecting a single container. For example,
+ the list of linked containers is not propagated .
+ operationId: "ContainerList"
+ produces:
+ - "application/json"
+ parameters:
+ - name: "all"
+ in: "query"
+ description: "Return all containers. By default, only running containers are shown"
+ type: "boolean"
+ default: false
+ - name: "limit"
+ in: "query"
+ description: "Return this number of most recently created containers, including non-running ones."
+ type: "integer"
+ - name: "size"
+ in: "query"
+ description: "Return the size of container as fields `SizeRw` and `SizeRootFs`."
+ type: "boolean"
+ default: false
+ - name: "filters"
+ in: "query"
+ description: |
+ Filters to process on the container list, encoded as JSON (a `map[string][]string`). For example, `{"status": ["paused"]}` will only return paused containers. Available filters:
+
+ - `ancestor`=(`<image-name>[:<tag>]`, `<image id>`, or `<image@digest>`)
+ - `before`=(`<container id>` or `<container name>`)
+ - `expose`=(`<port>[/<proto>]`|`<startport-endport>/[<proto>]`)
+ - `exited=<int>` containers with exit code of `<int>`
+ - `health`=(`starting`|`healthy`|`unhealthy`|`none`)
+ - `id=<ID>` a container's ID
+ - `isolation=`(`default`|`process`|`hyperv`) (Windows daemon only)
+ - `is-task=`(`true`|`false`)
+ - `label=key` or `label="key=value"` of a container label
+ - `name=<name>` a container's name
+ - `network`=(`<network id>` or `<network name>`)
+ - `publish`=(`<port>[/<proto>]`|`<startport-endport>/[<proto>]`)
+ - `since`=(`<container id>` or `<container name>`)
+ - `status=`(`created`|`restarting`|`running`|`removing`|`paused`|`exited`|`dead`)
+ - `volume`=(`<volume name>` or `<mount point destination>`)
+ type: "string"
+ responses:
+ 200:
+ description: "no error"
+ schema:
+ $ref: "#/definitions/ContainerSummary"
+ examples:
+ application/json:
+ - Id: "8dfafdbc3a40"
+ Names:
+ - "/boring_feynman"
+ Image: "ubuntu:latest"
+ ImageID: "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82"
+ Command: "echo 1"
+ Created: 1367854155
+ State: "Exited"
+ Status: "Exit 0"
+ Ports:
+ - PrivatePort: 2222
+ PublicPort: 3333
+ Type: "tcp"
+ Labels:
+ com.example.vendor: "Acme"
+ com.example.license: "GPL"
+ com.example.version: "1.0"
+ SizeRw: 12288
+ SizeRootFs: 0
+ HostConfig:
+ NetworkMode: "default"
+ NetworkSettings:
+ Networks:
+ bridge:
+ NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812"
+ EndpointID: "2cdc4edb1ded3631c81f57966563e5c8525b81121bb3706a9a9a3ae102711f3f"
+ Gateway: "172.17.0.1"
+ IPAddress: "172.17.0.2"
+ IPPrefixLen: 16
+ IPv6Gateway: ""
+ GlobalIPv6Address: ""
+ GlobalIPv6PrefixLen: 0
+ MacAddress: "02:42:ac:11:00:02"
+ Mounts:
+ - Name: "fac362...80535"
+ Source: "/data"
+ Destination: "/data"
+ Driver: "local"
+ Mode: "ro,Z"
+ RW: false
+ Propagation: ""
+ - Id: "9cd87474be90"
+ Names:
+ - "/coolName"
+ Image: "ubuntu:latest"
+ ImageID: "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82"
+ Command: "echo 222222"
+ Created: 1367854155
+ State: "Exited"
+ Status: "Exit 0"
+ Ports: []
+ Labels: {}
+ SizeRw: 12288
+ SizeRootFs: 0
+ HostConfig:
+ NetworkMode: "default"
+ NetworkSettings:
+ Networks:
+ bridge:
+ NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812"
+ EndpointID: "88eaed7b37b38c2a3f0c4bc796494fdf51b270c2d22656412a2ca5d559a64d7a"
+ Gateway: "172.17.0.1"
+ IPAddress: "172.17.0.8"
+ IPPrefixLen: 16
+ IPv6Gateway: ""
+ GlobalIPv6Address: ""
+ GlobalIPv6PrefixLen: 0
+ MacAddress: "02:42:ac:11:00:08"
+ Mounts: []
+ - Id: "3176a2479c92"
+ Names:
+ - "/sleepy_dog"
+ Image: "ubuntu:latest"
+ ImageID: "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82"
+ Command: "echo 3333333333333333"
+ Created: 1367854154
+ State: "Exited"
+ Status: "Exit 0"
+ Ports: []
+ Labels: {}
+ SizeRw: 12288
+ SizeRootFs: 0
+ HostConfig:
+ NetworkMode: "default"
+ NetworkSettings:
+ Networks:
+ bridge:
+ NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812"
+ EndpointID: "8b27c041c30326d59cd6e6f510d4f8d1d570a228466f956edf7815508f78e30d"
+ Gateway: "172.17.0.1"
+ IPAddress: "172.17.0.6"
+ IPPrefixLen: 16
+ IPv6Gateway: ""
+ GlobalIPv6Address: ""
+ GlobalIPv6PrefixLen: 0
+ MacAddress: "02:42:ac:11:00:06"
+ Mounts: []
+ - Id: "4cb07b47f9fb"
+ Names:
+ - "/running_cat"
+ Image: "ubuntu:latest"
+ ImageID: "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82"
+ Command: "echo 444444444444444444444444444444444"
+ Created: 1367854152
+ State: "Exited"
+ Status: "Exit 0"
+ Ports: []
+ Labels: {}
+ SizeRw: 12288
+ SizeRootFs: 0
+ HostConfig:
+ NetworkMode: "default"
+ NetworkSettings:
+ Networks:
+ bridge:
+ NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812"
+ EndpointID: "d91c7b2f0644403d7ef3095985ea0e2370325cd2332ff3a3225c4247328e66e9"
+ Gateway: "172.17.0.1"
+ IPAddress: "172.17.0.5"
+ IPPrefixLen: 16
+ IPv6Gateway: ""
+ GlobalIPv6Address: ""
+ GlobalIPv6PrefixLen: 0
+ MacAddress: "02:42:ac:11:00:05"
+ Mounts: []
+ 400:
+ description: "bad parameter"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ tags: ["Container"]
+ /containers/create:
+ post:
+ summary: "Create a container"
+ operationId: "ContainerCreate"
+ consumes:
+ - "application/json"
+ - "application/octet-stream"
+ produces:
+ - "application/json"
+ parameters:
+ - name: "name"
+ in: "query"
+ description: "Assign the specified name to the container. Must match `/?[a-zA-Z0-9_-]+`."
+ type: "string"
+ pattern: "/?[a-zA-Z0-9_-]+"
+ - name: "body"
+ in: "body"
+ description: "Container to create"
+ schema:
+ allOf:
+ - $ref: "#/definitions/ContainerConfig"
+ - type: "object"
+ properties:
+ HostConfig:
+ $ref: "#/definitions/HostConfig"
+ NetworkingConfig:
+ description: "This container's networking configuration."
+ type: "object"
+ properties:
+ EndpointsConfig:
+ description: "A mapping of network name to endpoint configuration for that network."
+ type: "object"
+ additionalProperties:
+ $ref: "#/definitions/EndpointSettings"
+ example:
+ Hostname: ""
+ Domainname: ""
+ User: ""
+ AttachStdin: false
+ AttachStdout: true
+ AttachStderr: true
+ Tty: false
+ OpenStdin: false
+ StdinOnce: false
+ Env:
+ - "FOO=bar"
+ - "BAZ=quux"
+ Cmd:
+ - "date"
+ Entrypoint: ""
+ Image: "ubuntu"
+ Labels:
+ com.example.vendor: "Acme"
+ com.example.license: "GPL"
+ com.example.version: "1.0"
+ Volumes:
+ /volumes/data: {}
+ WorkingDir: ""
+ NetworkDisabled: false
+ MacAddress: "12:34:56:78:9a:bc"
+ ExposedPorts:
+ 22/tcp: {}
+ StopSignal: "SIGTERM"
+ StopTimeout: 10
+ HostConfig:
+ Binds:
+ - "/tmp:/tmp"
+ Links:
+ - "redis3:redis"
+ Memory: 0
+ MemorySwap: 0
+ MemoryReservation: 0
+ KernelMemory: 0
+ NanoCPUs: 500000
+ CpuPercent: 80
+ CpuShares: 512
+ CpuPeriod: 100000
+ CpuRealtimePeriod: 1000000
+ CpuRealtimeRuntime: 10000
+ CpuQuota: 50000
+ CpusetCpus: "0,1"
+ CpusetMems: "0,1"
+ MaximumIOps: 0
+ MaximumIOBps: 0
+ BlkioWeight: 300
+ BlkioWeightDevice:
+ - {}
+ BlkioDeviceReadBps:
+ - {}
+ BlkioDeviceReadIOps:
+ - {}
+ BlkioDeviceWriteBps:
+ - {}
+ BlkioDeviceWriteIOps:
+ - {}
+ MemorySwappiness: 60
+ OomKillDisable: false
+ OomScoreAdj: 500
+ PidMode: ""
+ PidsLimit: -1
+ PortBindings:
+ 22/tcp:
+ - HostPort: "11022"
+ PublishAllPorts: false
+ Privileged: false
+ ReadonlyRootfs: false
+ Dns:
+ - "8.8.8.8"
+ DnsOptions:
+ - ""
+ DnsSearch:
+ - ""
+ VolumesFrom:
+ - "parent"
+ - "other:ro"
+ CapAdd:
+ - "NET_ADMIN"
+ CapDrop:
+ - "MKNOD"
+ GroupAdd:
+ - "newgroup"
+ RestartPolicy:
+ Name: ""
+ MaximumRetryCount: 0
+ AutoRemove: true
+ NetworkMode: "bridge"
+ Devices: []
+ Ulimits:
+ - {}
+ LogConfig:
+ Type: "json-file"
+ Config: {}
+ SecurityOpt: []
+ StorageOpt: {}
+ CgroupParent: ""
+ VolumeDriver: ""
+ ShmSize: 67108864
+ NetworkingConfig:
+ EndpointsConfig:
+ isolated_nw:
+ IPAMConfig:
+ IPv4Address: "172.20.30.33"
+ IPv6Address: "2001:db8:abcd::3033"
+ LinkLocalIPs:
+ - "169.254.34.68"
+ - "fe80::3468"
+ Links:
+ - "container_1"
+ - "container_2"
+ Aliases:
+ - "server_x"
+ - "server_y"
+
+ required: true
+ responses:
+ 201:
+ description: "Container created successfully"
+ schema:
+ type: "object"
+ required: [Id, Warnings]
+ properties:
+ Id:
+ description: "The ID of the created container"
+ type: "string"
+ x-nullable: false
+ Warnings:
+ description: "Warnings encountered when creating the container"
+ type: "array"
+ x-nullable: false
+ items:
+ type: "string"
+ examples:
+ application/json:
+ Id: "e90e34656806"
+ Warnings: []
+ 400:
+ description: "bad parameter"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 404:
+ description: "no such container"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ examples:
+ application/json:
+ message: "No such container: c2ada9df5af8"
+ 409:
+ description: "conflict"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ tags: ["Container"]
+ /containers/{id}/json:
+ get:
+ summary: "Inspect a container"
+ description: "Return low-level information about a container."
+ operationId: "ContainerInspect"
+ produces:
+ - "application/json"
+ responses:
+ 200:
+ description: "no error"
+ schema:
+ type: "object"
+ properties:
+ Id:
+ description: "The ID of the container"
+ type: "string"
+ Created:
+ description: "The time the container was created"
+ type: "string"
+ Path:
+ description: "The path to the command being run"
+ type: "string"
+ Args:
+ description: "The arguments to the command being run"
+ type: "array"
+ items:
+ type: "string"
+ State:
+ description: "The state of the container."
+ type: "object"
+ properties:
+ Status:
+ description: |
+ The status of the container. For example, `"running"` or `"exited"`.
+ type: "string"
+ enum: ["created", "running", "paused", "restarting", "removing", "exited", "dead"]
+ Running:
+ description: |
+ Whether this container is running.
+
+ Note that a running container can be _paused_. The `Running` and `Paused`
+ booleans are not mutually exclusive:
+
+ When pausing a container (on Linux), the cgroups freezer is used to suspend
+ all processes in the container. Freezing the process requires the process to
+ be running. As a result, paused containers are both `Running` _and_ `Paused`.
+
+ Use the `Status` field instead to determine if a container's state is "running".
+ type: "boolean"
+ Paused:
+ description: "Whether this container is paused."
+ type: "boolean"
+ Restarting:
+ description: "Whether this container is restarting."
+ type: "boolean"
+ OOMKilled:
+ description: "Whether this container has been killed because it ran out of memory."
+ type: "boolean"
+ Dead:
+ type: "boolean"
+ Pid:
+ description: "The process ID of this container"
+ type: "integer"
+ ExitCode:
+ description: "The last exit code of this container"
+ type: "integer"
+ Error:
+ type: "string"
+ StartedAt:
+ description: "The time when this container was last started."
+ type: "string"
+ FinishedAt:
+ description: "The time when this container last exited."
+ type: "string"
+ Image:
+ description: "The container's image"
+ type: "string"
+ ResolvConfPath:
+ type: "string"
+ HostnamePath:
+ type: "string"
+ HostsPath:
+ type: "string"
+ LogPath:
+ type: "string"
+ Node:
+ description: "TODO"
+ type: "object"
+ Name:
+ type: "string"
+ RestartCount:
+ type: "integer"
+ Driver:
+ type: "string"
+ MountLabel:
+ type: "string"
+ ProcessLabel:
+ type: "string"
+ AppArmorProfile:
+ type: "string"
+ ExecIDs:
+ type: "string"
+ HostConfig:
+ $ref: "#/definitions/HostConfig"
+ GraphDriver:
+ $ref: "#/definitions/GraphDriverData"
+ SizeRw:
+ description: "The size of files that have been created or changed by this container."
+ type: "integer"
+ format: "int64"
+ SizeRootFs:
+ description: "The total size of all the files in this container."
+ type: "integer"
+ format: "int64"
+ Mounts:
+ type: "array"
+ items:
+ $ref: "#/definitions/MountPoint"
+ Config:
+ $ref: "#/definitions/ContainerConfig"
+ NetworkSettings:
+ $ref: "#/definitions/NetworkSettings"
+ examples:
+ application/json:
+ AppArmorProfile: ""
+ Args:
+ - "-c"
+ - "exit 9"
+ Config:
+ AttachStderr: true
+ AttachStdin: false
+ AttachStdout: true
+ Cmd:
+ - "/bin/sh"
+ - "-c"
+ - "exit 9"
+ Domainname: ""
+ Env:
+ - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
+ Hostname: "ba033ac44011"
+ Image: "ubuntu"
+ Labels:
+ com.example.vendor: "Acme"
+ com.example.license: "GPL"
+ com.example.version: "1.0"
+ MacAddress: ""
+ NetworkDisabled: false
+ OpenStdin: false
+ StdinOnce: false
+ Tty: false
+ User: ""
+ Volumes:
+ /volumes/data: {}
+ WorkingDir: ""
+ StopSignal: "SIGTERM"
+ StopTimeout: 10
+ Created: "2015-01-06T15:47:31.485331387Z"
+ Driver: "devicemapper"
+ HostConfig:
+ MaximumIOps: 0
+ MaximumIOBps: 0
+ BlkioWeight: 0
+ BlkioWeightDevice:
+ - {}
+ BlkioDeviceReadBps:
+ - {}
+ BlkioDeviceWriteBps:
+ - {}
+ BlkioDeviceReadIOps:
+ - {}
+ BlkioDeviceWriteIOps:
+ - {}
+ ContainerIDFile: ""
+ CpusetCpus: ""
+ CpusetMems: ""
+ CpuPercent: 80
+ CpuShares: 0
+ CpuPeriod: 100000
+ CpuRealtimePeriod: 1000000
+ CpuRealtimeRuntime: 10000
+ Devices: []
+ IpcMode: ""
+ LxcConf: []
+ Memory: 0
+ MemorySwap: 0
+ MemoryReservation: 0
+ KernelMemory: 0
+ OomKillDisable: false
+ OomScoreAdj: 500
+ NetworkMode: "bridge"
+ PidMode: ""
+ PortBindings: {}
+ Privileged: false
+ ReadonlyRootfs: false
+ PublishAllPorts: false
+ RestartPolicy:
+ MaximumRetryCount: 2
+ Name: "on-failure"
+ LogConfig:
+ Type: "json-file"
+ Sysctls:
+ net.ipv4.ip_forward: "1"
+ Ulimits:
+ - {}
+ VolumeDriver: ""
+ ShmSize: 67108864
+ HostnamePath: "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hostname"
+ HostsPath: "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hosts"
+ LogPath: "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b-json.log"
+ Id: "ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39"
+ Image: "04c5d3b7b0656168630d3ba35d8889bd0e9caafcaeb3004d2bfbc47e7c5d35d2"
+ MountLabel: ""
+ Name: "/boring_euclid"
+ NetworkSettings:
+ Bridge: ""
+ SandboxID: ""
+ HairpinMode: false
+ LinkLocalIPv6Address: ""
+ LinkLocalIPv6PrefixLen: 0
+ SandboxKey: ""
+ EndpointID: ""
+ Gateway: ""
+ GlobalIPv6Address: ""
+ GlobalIPv6PrefixLen: 0
+ IPAddress: ""
+ IPPrefixLen: 0
+ IPv6Gateway: ""
+ MacAddress: ""
+ Networks:
+ bridge:
+ NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812"
+ EndpointID: "7587b82f0dada3656fda26588aee72630c6fab1536d36e394b2bfbcf898c971d"
+ Gateway: "172.17.0.1"
+ IPAddress: "172.17.0.2"
+ IPPrefixLen: 16
+ IPv6Gateway: ""
+ GlobalIPv6Address: ""
+ GlobalIPv6PrefixLen: 0
+ MacAddress: "02:42:ac:12:00:02"
+ Path: "/bin/sh"
+ ProcessLabel: ""
+ ResolvConfPath: "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/resolv.conf"
+ RestartCount: 1
+ State:
+ Error: ""
+ ExitCode: 9
+ FinishedAt: "2015-01-06T15:47:32.080254511Z"
+ OOMKilled: false
+ Dead: false
+ Paused: false
+ Pid: 0
+ Restarting: false
+ Running: true
+ StartedAt: "2015-01-06T15:47:32.072697474Z"
+ Status: "running"
+ Mounts:
+ - Name: "fac362...80535"
+ Source: "/data"
+ Destination: "/data"
+ Driver: "local"
+ Mode: "ro,Z"
+ RW: false
+ Propagation: ""
+ 404:
+ description: "no such container"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ examples:
+ application/json:
+ message: "No such container: c2ada9df5af8"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "id"
+ in: "path"
+ required: true
+ description: "ID or name of the container"
+ type: "string"
+ - name: "size"
+ in: "query"
+ type: "boolean"
+ default: false
+ description: "Return the size of container as fields `SizeRw` and `SizeRootFs`"
+ tags: ["Container"]
+ /containers/{id}/top:
+ get:
+ summary: "List processes running inside a container"
+ description: "On Unix systems, this is done by running the `ps` command. This endpoint is not supported on Windows."
+ operationId: "ContainerTop"
+ responses:
+ 200:
+ description: "no error"
+ schema:
+ type: "object"
+ properties:
+ Titles:
+ description: "The ps column titles"
+ type: "array"
+ items:
+ type: "string"
+ Processes:
+ description: "Each process running in the container, where each is process is an array of values corresponding to the titles"
+ type: "array"
+ items:
+ type: "array"
+ items:
+ type: "string"
+ examples:
+ application/json:
+ Titles:
+ - "UID"
+ - "PID"
+ - "PPID"
+ - "C"
+ - "STIME"
+ - "TTY"
+ - "TIME"
+ - "CMD"
+ Processes:
+ -
+ - "root"
+ - "13642"
+ - "882"
+ - "0"
+ - "17:03"
+ - "pts/0"
+ - "00:00:00"
+ - "/bin/bash"
+ -
+ - "root"
+ - "13735"
+ - "13642"
+ - "0"
+ - "17:06"
+ - "pts/0"
+ - "00:00:00"
+ - "sleep 10"
+ 404:
+ description: "no such container"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ examples:
+ application/json:
+ message: "No such container: c2ada9df5af8"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "id"
+ in: "path"
+ required: true
+ description: "ID or name of the container"
+ type: "string"
+ - name: "ps_args"
+ in: "query"
+ description: "The arguments to pass to `ps`. For example, `aux`"
+ type: "string"
+ default: "-ef"
+ tags: ["Container"]
+ /containers/{id}/logs:
+ get:
+ summary: "Get container logs"
+ description: |
+ Get `stdout` and `stderr` logs from a container.
+
+ Note: This endpoint works only for containers with the `json-file` or `journald` logging driver.
+ operationId: "ContainerLogs"
+ responses:
+ 101:
+ description: "logs returned as a stream"
+ schema:
+ type: "string"
+ format: "binary"
+ 200:
+ description: "logs returned as a string in response body"
+ schema:
+ type: "string"
+ 404:
+ description: "no such container"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ examples:
+ application/json:
+ message: "No such container: c2ada9df5af8"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "id"
+ in: "path"
+ required: true
+ description: "ID or name of the container"
+ type: "string"
+ - name: "follow"
+ in: "query"
+ description: |
+ Return the logs as a stream.
+
+ This will return a `101` HTTP response with a `Connection: upgrade` header, then hijack the HTTP connection to send raw output. For more information about hijacking and the stream format, [see the documentation for the attach endpoint](#operation/ContainerAttach).
+ type: "boolean"
+ default: false
+ - name: "stdout"
+ in: "query"
+ description: "Return logs from `stdout`"
+ type: "boolean"
+ default: false
+ - name: "stderr"
+ in: "query"
+ description: "Return logs from `stderr`"
+ type: "boolean"
+ default: false
+ - name: "since"
+ in: "query"
+ description: "Only return logs since this time, as a UNIX timestamp"
+ type: "integer"
+ default: 0
+ - name: "timestamps"
+ in: "query"
+ description: "Add timestamps to every log line"
+ type: "boolean"
+ default: false
+ - name: "tail"
+ in: "query"
+ description: "Only return this number of log lines from the end of the logs. Specify as an integer or `all` to output all log lines."
+ type: "string"
+ default: "all"
+ tags: ["Container"]
+ /containers/{id}/changes:
+ get:
+ summary: "Get changes on a container’s filesystem"
+ description: |
+ Returns which files in a container's filesystem have been added, deleted,
+ or modified. The `Kind` of modification can be one of:
+
+ - `0`: Modified
+ - `1`: Added
+ - `2`: Deleted
+ operationId: "ContainerChanges"
+ produces: ["application/json"]
+ responses:
+ 200:
+ description: "The list of changes"
+ schema:
+ type: "array"
+ items:
+ type: "object"
+ x-go-name: "ContainerChangeResponseItem"
+ required: [Path, Kind]
+ properties:
+ Path:
+ description: "Path to file that has changed"
+ type: "string"
+ x-nullable: false
+ Kind:
+ description: "Kind of change"
+ type: "integer"
+ format: "uint8"
+ enum: [0, 1, 2]
+ x-nullable: false
+ examples:
+ application/json:
+ - Path: "/dev"
+ Kind: 0
+ - Path: "/dev/kmsg"
+ Kind: 1
+ - Path: "/test"
+ Kind: 1
+ 404:
+ description: "no such container"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ examples:
+ application/json:
+ message: "No such container: c2ada9df5af8"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "id"
+ in: "path"
+ required: true
+ description: "ID or name of the container"
+ type: "string"
+ tags: ["Container"]
+ /containers/{id}/export:
+ get:
+ summary: "Export a container"
+ description: "Export the contents of a container as a tarball."
+ operationId: "ContainerExport"
+ produces:
+ - "application/octet-stream"
+ responses:
+ 200:
+ description: "no error"
+ 404:
+ description: "no such container"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ examples:
+ application/json:
+ message: "No such container: c2ada9df5af8"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "id"
+ in: "path"
+ required: true
+ description: "ID or name of the container"
+ type: "string"
+ tags: ["Container"]
+ /containers/{id}/stats:
+ get:
+ summary: "Get container stats based on resource usage"
+ description: |
+ This endpoint returns a live stream of a container’s resource usage
+ statistics.
+
+ The `precpu_stats` is the CPU statistic of last read, which is used
+ for calculating the CPU usage percentage. It is not the same as the
+ `cpu_stats` field.
+
+ If either `precpu_stats.online_cpus` or `cpu_stats.online_cpus` is
+ nil then for compatibility with older daemons the length of the
+ corresponding `cpu_usage.percpu_usage` array should be used.
+ operationId: "ContainerStats"
+ produces: ["application/json"]
+ responses:
+ 200:
+ description: "no error"
+ schema:
+ type: "object"
+ examples:
+ application/json:
+ read: "2015-01-08T22:57:31.547920715Z"
+ pids_stats:
+ current: 3
+ networks:
+ eth0:
+ rx_bytes: 5338
+ rx_dropped: 0
+ rx_errors: 0
+ rx_packets: 36
+ tx_bytes: 648
+ tx_dropped: 0
+ tx_errors: 0
+ tx_packets: 8
+ eth5:
+ rx_bytes: 4641
+ rx_dropped: 0
+ rx_errors: 0
+ rx_packets: 26
+ tx_bytes: 690
+ tx_dropped: 0
+ tx_errors: 0
+ tx_packets: 9
+ memory_stats:
+ stats:
+ total_pgmajfault: 0
+ cache: 0
+ mapped_file: 0
+ total_inactive_file: 0
+ pgpgout: 414
+ rss: 6537216
+ total_mapped_file: 0
+ writeback: 0
+ unevictable: 0
+ pgpgin: 477
+ total_unevictable: 0
+ pgmajfault: 0
+ total_rss: 6537216
+ total_rss_huge: 6291456
+ total_writeback: 0
+ total_inactive_anon: 0
+ rss_huge: 6291456
+ hierarchical_memory_limit: 67108864
+ total_pgfault: 964
+ total_active_file: 0
+ active_anon: 6537216
+ total_active_anon: 6537216
+ total_pgpgout: 414
+ total_cache: 0
+ inactive_anon: 0
+ active_file: 0
+ pgfault: 964
+ inactive_file: 0
+ total_pgpgin: 477
+ max_usage: 6651904
+ usage: 6537216
+ failcnt: 0
+ limit: 67108864
+ blkio_stats: {}
+ cpu_stats:
+ cpu_usage:
+ percpu_usage:
+ - 8646879
+ - 24472255
+ - 36438778
+ - 30657443
+ usage_in_usermode: 50000000
+ total_usage: 100215355
+ usage_in_kernelmode: 30000000
+ system_cpu_usage: 739306590000000
+ online_cpus: 4
+ throttling_data:
+ periods: 0
+ throttled_periods: 0
+ throttled_time: 0
+ precpu_stats:
+ cpu_usage:
+ percpu_usage:
+ - 8646879
+ - 24350896
+ - 36438778
+ - 30657443
+ usage_in_usermode: 50000000
+ total_usage: 100093996
+ usage_in_kernelmode: 30000000
+ system_cpu_usage: 9492140000000
+ online_cpus: 4
+ throttling_data:
+ periods: 0
+ throttled_periods: 0
+ throttled_time: 0
+ 404:
+ description: "no such container"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ examples:
+ application/json:
+ message: "No such container: c2ada9df5af8"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "id"
+ in: "path"
+ required: true
+ description: "ID or name of the container"
+ type: "string"
+ - name: "stream"
+ in: "query"
+ description: "Stream the output. If false, the stats will be output once and then it will disconnect."
+ type: "boolean"
+ default: true
+ tags: ["Container"]
+ /containers/{id}/resize:
+ post:
+ summary: "Resize a container TTY"
+ description: "Resize the TTY for a container. You must restart the container for the resize to take effect."
+ operationId: "ContainerResize"
+ consumes:
+ - "application/octet-stream"
+ produces:
+ - "text/plain"
+ responses:
+ 200:
+ description: "no error"
+ 404:
+ description: "no such container"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ examples:
+ application/json:
+ message: "No such container: c2ada9df5af8"
+ 500:
+ description: "cannot resize container"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "id"
+ in: "path"
+ required: true
+ description: "ID or name of the container"
+ type: "string"
+ - name: "h"
+ in: "query"
+ description: "Height of the tty session in characters"
+ type: "integer"
+ - name: "w"
+ in: "query"
+ description: "Width of the tty session in characters"
+ type: "integer"
+ tags: ["Container"]
+ /containers/{id}/start:
+ post:
+ summary: "Start a container"
+ operationId: "ContainerStart"
+ responses:
+ 204:
+ description: "no error"
+ 304:
+ description: "container already started"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 404:
+ description: "no such container"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ examples:
+ application/json:
+ message: "No such container: c2ada9df5af8"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "id"
+ in: "path"
+ required: true
+ description: "ID or name of the container"
+ type: "string"
+ - name: "detachKeys"
+ in: "query"
+ description: "Override the key sequence for detaching a container. Format is a single character `[a-Z]` or `ctrl-<value>` where `<value>` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`."
+ type: "string"
+ tags: ["Container"]
+ /containers/{id}/stop:
+ post:
+ summary: "Stop a container"
+ operationId: "ContainerStop"
+ responses:
+ 204:
+ description: "no error"
+ 304:
+ description: "container already stopped"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 404:
+ description: "no such container"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ examples:
+ application/json:
+ message: "No such container: c2ada9df5af8"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "id"
+ in: "path"
+ required: true
+ description: "ID or name of the container"
+ type: "string"
+ - name: "t"
+ in: "query"
+ description: "Number of seconds to wait before killing the container"
+ type: "integer"
+ tags: ["Container"]
+ /containers/{id}/restart:
+ post:
+ summary: "Restart a container"
+ operationId: "ContainerRestart"
+ responses:
+ 204:
+ description: "no error"
+ 404:
+ description: "no such container"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ examples:
+ application/json:
+ message: "No such container: c2ada9df5af8"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "id"
+ in: "path"
+ required: true
+ description: "ID or name of the container"
+ type: "string"
+ - name: "t"
+ in: "query"
+ description: "Number of seconds to wait before killing the container"
+ type: "integer"
+ tags: ["Container"]
+ /containers/{id}/kill:
+ post:
+ summary: "Kill a container"
+ description: "Send a POSIX signal to a container, defaulting to killing to the container."
+ operationId: "ContainerKill"
+ responses:
+ 204:
+ description: "no error"
+ 404:
+ description: "no such container"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ examples:
+ application/json:
+ message: "No such container: c2ada9df5af8"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "id"
+ in: "path"
+ required: true
+ description: "ID or name of the container"
+ type: "string"
+ - name: "signal"
+ in: "query"
+ description: "Signal to send to the container as an integer or string (e.g. `SIGINT`)"
+ type: "string"
+ default: "SIGKILL"
+ tags: ["Container"]
+ /containers/{id}/update:
+ post:
+ summary: "Update a container"
+ description: "Change various configuration options of a container without having to recreate it."
+ operationId: "ContainerUpdate"
+ consumes: ["application/json"]
+ produces: ["application/json"]
+ responses:
+ 200:
+ description: "The container has been updated."
+ schema:
+ type: "object"
+ properties:
+ Warnings:
+ type: "array"
+ items:
+ type: "string"
+ 404:
+ description: "no such container"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ examples:
+ application/json:
+ message: "No such container: c2ada9df5af8"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "id"
+ in: "path"
+ required: true
+ description: "ID or name of the container"
+ type: "string"
+ - name: "update"
+ in: "body"
+ required: true
+ schema:
+ allOf:
+ - $ref: "#/definitions/Resources"
+ - type: "object"
+ properties:
+ RestartPolicy:
+ $ref: "#/definitions/RestartPolicy"
+ example:
+ BlkioWeight: 300
+ CpuShares: 512
+ CpuPeriod: 100000
+ CpuQuota: 50000
+ CpuRealtimePeriod: 1000000
+ CpuRealtimeRuntime: 10000
+ CpusetCpus: "0,1"
+ CpusetMems: "0"
+ Memory: 314572800
+ MemorySwap: 514288000
+ MemoryReservation: 209715200
+ KernelMemory: 52428800
+ RestartPolicy:
+ MaximumRetryCount: 4
+ Name: "on-failure"
+ tags: ["Container"]
+ /containers/{id}/rename:
+ post:
+ summary: "Rename a container"
+ operationId: "ContainerRename"
+ responses:
+ 204:
+ description: "no error"
+ 404:
+ description: "no such container"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ examples:
+ application/json:
+ message: "No such container: c2ada9df5af8"
+ 409:
+ description: "name already in use"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "id"
+ in: "path"
+ required: true
+ description: "ID or name of the container"
+ type: "string"
+ - name: "name"
+ in: "query"
+ required: true
+ description: "New name for the container"
+ type: "string"
+ tags: ["Container"]
+ /containers/{id}/pause:
+ post:
+ summary: "Pause a container"
+ description: |
+ Use the cgroups freezer to suspend all processes in a container.
+
+ Traditionally, when suspending a process the `SIGSTOP` signal is used, which is observable by the process being suspended. With the cgroups freezer the process is unaware, and unable to capture, that it is being suspended, and subsequently resumed.
+ operationId: "ContainerPause"
+ responses:
+ 204:
+ description: "no error"
+ 404:
+ description: "no such container"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ examples:
+ application/json:
+ message: "No such container: c2ada9df5af8"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "id"
+ in: "path"
+ required: true
+ description: "ID or name of the container"
+ type: "string"
+ tags: ["Container"]
+ /containers/{id}/unpause:
+ post:
+ summary: "Unpause a container"
+ description: "Resume a container which has been paused."
+ operationId: "ContainerUnpause"
+ responses:
+ 204:
+ description: "no error"
+ 404:
+ description: "no such container"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ examples:
+ application/json:
+ message: "No such container: c2ada9df5af8"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "id"
+ in: "path"
+ required: true
+ description: "ID or name of the container"
+ type: "string"
+ tags: ["Container"]
+ /containers/{id}/attach:
+ post:
+ summary: "Attach to a container"
+ description: |
+ Attach to a container to read its output or send it input. You can attach to the same container multiple times and you can reattach to containers that have been detached.
+
+ Either the `stream` or `logs` parameter must be `true` for this endpoint to do anything.
+
+ See [the documentation for the `docker attach` command](https://docs.docker.com/engine/reference/commandline/attach/) for more details.
+
+ ### Hijacking
+
+ This endpoint hijacks the HTTP connection to transport `stdin`, `stdout`, and `stderr` on the same socket.
+
+ This is the response from the daemon for an attach request:
+
+ ```
+ HTTP/1.1 200 OK
+ Content-Type: application/vnd.docker.raw-stream
+
+ [STREAM]
+ ```
+
+ After the headers and two new lines, the TCP connection can now be used for raw, bidirectional communication between the client and server.
+
+ To hint potential proxies about connection hijacking, the Docker client can also optionally send connection upgrade headers.
+
+ For example, the client sends this request to upgrade the connection:
+
+ ```
+ POST /containers/16253994b7c4/attach?stream=1&stdout=1 HTTP/1.1
+ Upgrade: tcp
+ Connection: Upgrade
+ ```
+
+ The Docker daemon will respond with a `101 UPGRADED` response, and will similarly follow with the raw stream:
+
+ ```
+ HTTP/1.1 101 UPGRADED
+ Content-Type: application/vnd.docker.raw-stream
+ Connection: Upgrade
+ Upgrade: tcp
+
+ [STREAM]
+ ```
+
+ ### Stream format
+
+ When the TTY setting is disabled in [`POST /containers/create`](#operation/ContainerCreate), the stream over the hijacked connected is multiplexed to separate out `stdout` and `stderr`. The stream consists of a series of frames, each containing a header and a payload.
+
+ The header contains the information which the stream writes (`stdout` or `stderr`). It also contains the size of the associated frame encoded in the last four bytes (`uint32`).
+
+ It is encoded on the first eight bytes like this:
+
+ ```go
+ header := [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4}
+ ```
+
+ `STREAM_TYPE` can be:
+
+ - 0: `stdin` (is written on `stdout`)
+ - 1: `stdout`
+ - 2: `stderr`
+
+ `SIZE1, SIZE2, SIZE3, SIZE4` are the four bytes of the `uint32` size encoded as big endian.
+
+ Following the header is the payload, which is the specified number of bytes of `STREAM_TYPE`.
+
+ The simplest way to implement this protocol is the following:
+
+ 1. Read 8 bytes.
+ 2. Choose `stdout` or `stderr` depending on the first byte.
+ 3. Extract the frame size from the last four bytes.
+ 4. Read the extracted size and output it on the correct output.
+ 5. Goto 1.
+
+ ### Stream format when using a TTY
+
+ When the TTY setting is enabled in [`POST /containers/create`](#operation/ContainerCreate), the stream is not multiplexed. The data exchanged over the hijacked connection is simply the raw data from the process PTY and client's `stdin`.
+
+ operationId: "ContainerAttach"
+ produces:
+ - "application/vnd.docker.raw-stream"
+ responses:
+ 101:
+ description: "no error, hints proxy about hijacking"
+ 200:
+ description: "no error, no upgrade header found"
+ 400:
+ description: "bad parameter"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 404:
+ description: "no such container"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ examples:
+ application/json:
+ message: "No such container: c2ada9df5af8"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "id"
+ in: "path"
+ required: true
+ description: "ID or name of the container"
+ type: "string"
+ - name: "detachKeys"
+ in: "query"
+ description: "Override the key sequence for detaching a container.Format is a single character `[a-Z]` or `ctrl-<value>` where `<value>` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`."
+ type: "string"
+ - name: "logs"
+ in: "query"
+ description: |
+ Replay previous logs from the container.
+
+ This is useful for attaching to a container that has started and you want to output everything since the container started.
+
+ If `stream` is also enabled, once all the previous output has been returned, it will seamlessly transition into streaming current output.
+ type: "boolean"
+ default: false
+ - name: "stream"
+ in: "query"
+ description: "Stream attached streams from the time the request was made onwards"
+ type: "boolean"
+ default: false
+ - name: "stdin"
+ in: "query"
+ description: "Attach to `stdin`"
+ type: "boolean"
+ default: false
+ - name: "stdout"
+ in: "query"
+ description: "Attach to `stdout`"
+ type: "boolean"
+ default: false
+ - name: "stderr"
+ in: "query"
+ description: "Attach to `stderr`"
+ type: "boolean"
+ default: false
+ tags: ["Container"]
+ /containers/{id}/attach/ws:
+ get:
+ summary: "Attach to a container via a websocket"
+ operationId: "ContainerAttachWebsocket"
+ responses:
+ 101:
+ description: "no error, hints proxy about hijacking"
+ 200:
+ description: "no error, no upgrade header found"
+ 400:
+ description: "bad parameter"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 404:
+ description: "no such container"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ examples:
+ application/json:
+ message: "No such container: c2ada9df5af8"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "id"
+ in: "path"
+ required: true
+ description: "ID or name of the container"
+ type: "string"
+ - name: "detachKeys"
+ in: "query"
+ description: "Override the key sequence for detaching a container.Format is a single character `[a-Z]` or `ctrl-<value>` where `<value>` is one of: `a-z`, `@`, `^`, `[`, `,`, or `_`."
+ type: "string"
+ - name: "logs"
+ in: "query"
+ description: "Return logs"
+ type: "boolean"
+ default: false
+ - name: "stream"
+ in: "query"
+ description: "Return stream"
+ type: "boolean"
+ default: false
+ - name: "stdin"
+ in: "query"
+ description: "Attach to `stdin`"
+ type: "boolean"
+ default: false
+ - name: "stdout"
+ in: "query"
+ description: "Attach to `stdout`"
+ type: "boolean"
+ default: false
+ - name: "stderr"
+ in: "query"
+ description: "Attach to `stderr`"
+ type: "boolean"
+ default: false
+ tags: ["Container"]
+ /containers/{id}/wait:
+ post:
+ summary: "Wait for a container"
+ description: "Block until a container stops, then returns the exit code."
+ operationId: "ContainerWait"
+ produces: ["application/json"]
+ responses:
+ 200:
+ description: "The container has exit."
+ schema:
+ type: "object"
+ required: [StatusCode]
+ properties:
+ StatusCode:
+ description: "Exit code of the container"
+ type: "integer"
+ x-nullable: false
+ 404:
+ description: "no such container"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ examples:
+ application/json:
+ message: "No such container: c2ada9df5af8"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "id"
+ in: "path"
+ required: true
+ description: "ID or name of the container"
+ type: "string"
+ - name: "condition"
+ in: "query"
+ description: "Wait until a container state reaches the given condition, either 'not-running' (default), 'next-exit', or 'removed'."
+ type: "string"
+ default: "not-running"
+ tags: ["Container"]
+ /containers/{id}:
+ delete:
+ summary: "Remove a container"
+ operationId: "ContainerDelete"
+ responses:
+ 204:
+ description: "no error"
+ 400:
+ description: "bad parameter"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 404:
+ description: "no such container"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ examples:
+ application/json:
+ message: "No such container: c2ada9df5af8"
+ 409:
+ description: "conflict"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ examples:
+ application/json:
+ message: "You cannot remove a running container: c2ada9df5af8. Stop the container before attempting removal or force remove"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "id"
+ in: "path"
+ required: true
+ description: "ID or name of the container"
+ type: "string"
+ - name: "v"
+ in: "query"
+ description: "Remove the volumes associated with the container."
+ type: "boolean"
+ default: false
+ - name: "force"
+ in: "query"
+ description: "If the container is running, kill it before removing it."
+ type: "boolean"
+ default: false
+ - name: "link"
+ in: "query"
+ description: "Remove the specified link associated with the container."
+ type: "boolean"
+ default: false
+ tags: ["Container"]
+ /containers/{id}/archive:
+ head:
+ summary: "Get information about files in a container"
+ description: "A response header `X-Docker-Container-Path-Stat` is return containing a base64 - encoded JSON object with some filesystem header information about the path."
+ operationId: "ContainerArchiveInfo"
+ responses:
+ 200:
+ description: "no error"
+ headers:
+ X-Docker-Container-Path-Stat:
+ type: "string"
+ description: "TODO"
+ 400:
+ description: "Bad parameter"
+ schema:
+ allOf:
+ - $ref: "#/definitions/ErrorResponse"
+ - type: "object"
+ properties:
+ message:
+ description: "The error message. Either \"must specify path parameter\" (path cannot be empty) or \"not a directory\" (path was asserted to be a directory but exists as a file)."
+ type: "string"
+ x-nullable: false
+ 404:
+ description: "Container or path does not exist"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ examples:
+ application/json:
+ message: "No such container: c2ada9df5af8"
+ 500:
+ description: "Server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "id"
+ in: "path"
+ required: true
+ description: "ID or name of the container"
+ type: "string"
+ - name: "path"
+ in: "query"
+ required: true
+ description: "Resource in the container’s filesystem to archive."
+ type: "string"
+ tags: ["Container"]
+ get:
+ summary: "Get an archive of a filesystem resource in a container"
+ description: "Get a tar archive of a resource in the filesystem of container id."
+ operationId: "ContainerArchive"
+ produces: ["application/x-tar"]
+ responses:
+ 200:
+ description: "no error"
+ 400:
+ description: "Bad parameter"
+ schema:
+ allOf:
+ - $ref: "#/definitions/ErrorResponse"
+ - type: "object"
+ properties:
+ message:
+ description: "The error message. Either \"must specify path parameter\" (path cannot be empty) or \"not a directory\" (path was asserted to be a directory but exists as a file)."
+ type: "string"
+ x-nullable: false
+ 404:
+ description: "Container or path does not exist"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ examples:
+ application/json:
+ message: "No such container: c2ada9df5af8"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "id"
+ in: "path"
+ required: true
+ description: "ID or name of the container"
+ type: "string"
+ - name: "path"
+ in: "query"
+ required: true
+ description: "Resource in the container’s filesystem to archive."
+ type: "string"
+ tags: ["Container"]
+ put:
+ summary: "Extract an archive of files or folders to a directory in a container"
+ description: "Upload a tar archive to be extracted to a path in the filesystem of container id."
+ operationId: "PutContainerArchive"
+ consumes: ["application/x-tar", "application/octet-stream"]
+ responses:
+ 200:
+ description: "The content was extracted successfully"
+ 400:
+ description: "Bad parameter"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 403:
+ description: "Permission denied, the volume or container rootfs is marked as read-only."
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 404:
+ description: "No such container or path does not exist inside the container"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ examples:
+ application/json:
+ message: "No such container: c2ada9df5af8"
+ 500:
+ description: "Server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "id"
+ in: "path"
+ required: true
+ description: "ID or name of the container"
+ type: "string"
+ - name: "path"
+ in: "query"
+ required: true
+ description: "Path to a directory in the container to extract the archive’s contents into. "
+ type: "string"
+ - name: "noOverwriteDirNonDir"
+ in: "query"
+ description: "If “1”, “true”, or “True” then it will be an error if unpacking the given content would cause an existing directory to be replaced with a non-directory and vice versa."
+ type: "string"
+ - name: "inputStream"
+ in: "body"
+ required: true
+ description: "The input stream must be a tar archive compressed with one of the following algorithms: identity (no compression), gzip, bzip2, xz."
+ schema:
+ type: "string"
+ tags: ["Container"]
+ /containers/prune:
+ post:
+ summary: "Delete stopped containers"
+ produces:
+ - "application/json"
+ operationId: "ContainerPrune"
+ parameters:
+ - name: "filters"
+ in: "query"
+ description: |
+ Filters to process on the prune list, encoded as JSON (a `map[string][]string`).
+
+ Available filters:
+ - `until=<timestamp>` Prune containers created before this timestamp. The `<timestamp>` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon machine’s time.
+ - `label` (`label=<key>`, `label=<key>=<value>`, `label!=<key>`, or `label!=<key>=<value>`) Prune containers with (or without, in case `label!=...` is used) the specified labels.
+ type: "string"
+ responses:
+ 200:
+ description: "No error"
+ schema:
+ type: "object"
+ properties:
+ ContainersDeleted:
+ description: "Container IDs that were deleted"
+ type: "array"
+ items:
+ type: "string"
+ SpaceReclaimed:
+ description: "Disk space reclaimed in bytes"
+ type: "integer"
+ format: "int64"
+ 500:
+ description: "Server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ tags: ["Container"]
+ /images/json:
+ get:
+ summary: "List Images"
+ description: "Returns a list of images on the server. Note that it uses a different, smaller representation of an image than inspecting a single image."
+ operationId: "ImageList"
+ produces:
+ - "application/json"
+ responses:
+ 200:
+ description: "Summary image data for the images matching the query"
+ schema:
+ type: "array"
+ items:
+ $ref: "#/definitions/ImageSummary"
+ examples:
+ application/json:
+ - Id: "sha256:e216a057b1cb1efc11f8a268f37ef62083e70b1b38323ba252e25ac88904a7e8"
+ ParentId: ""
+ RepoTags:
+ - "ubuntu:12.04"
+ - "ubuntu:precise"
+ RepoDigests:
+ - "ubuntu@sha256:992069aee4016783df6345315302fa59681aae51a8eeb2f889dea59290f21787"
+ Created: 1474925151
+ Size: 103579269
+ VirtualSize: 103579269
+ SharedSize: 0
+ Labels: {}
+ Containers: 2
+ - Id: "sha256:3e314f95dcace0f5e4fd37b10862fe8398e3c60ed36600bc0ca5fda78b087175"
+ ParentId: ""
+ RepoTags:
+ - "ubuntu:12.10"
+ - "ubuntu:quantal"
+ RepoDigests:
+ - "ubuntu@sha256:002fba3e3255af10be97ea26e476692a7ebed0bb074a9ab960b2e7a1526b15d7"
+ - "ubuntu@sha256:68ea0200f0b90df725d99d823905b04cf844f6039ef60c60bf3e019915017bd3"
+ Created: 1403128455
+ Size: 172064416
+ VirtualSize: 172064416
+ SharedSize: 0
+ Labels: {}
+ Containers: 5
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "all"
+ in: "query"
+ description: "Show all images. Only images from a final layer (no children) are shown by default."
+ type: "boolean"
+ default: false
+ - name: "filters"
+ in: "query"
+ description: |
+ A JSON encoded value of the filters (a `map[string][]string`) to process on the images list. Available filters:
+
+ - `before`=(`<image-name>[:<tag>]`, `<image id>` or `<image@digest>`)
+ - `dangling=true`
+ - `label=key` or `label="key=value"` of an image label
+ - `reference`=(`<image-name>[:<tag>]`)
+ - `since`=(`<image-name>[:<tag>]`, `<image id>` or `<image@digest>`)
+ type: "string"
+ - name: "digests"
+ in: "query"
+ description: "Show digest information as a `RepoDigests` field on each image."
+ type: "boolean"
+ default: false
+ tags: ["Image"]
+ /build:
+ post:
+ summary: "Build an image"
+ description: |
+ Build an image from a tar archive with a `Dockerfile` in it.
+
+ The `Dockerfile` specifies how the image is built from the tar archive. It is typically in the archive's root, but can be at a different path or have a different name by specifying the `dockerfile` parameter. [See the `Dockerfile` reference for more information](https://docs.docker.com/engine/reference/builder/).
+
+ The Docker daemon performs a preliminary validation of the `Dockerfile` before starting the build, and returns an error if the syntax is incorrect. After that, each instruction is run one-by-one until the ID of the new image is output.
+
+ The build is canceled if the client drops the connection by quitting or being killed.
+ operationId: "ImageBuild"
+ consumes:
+ - "application/octet-stream"
+ produces:
+ - "application/json"
+ parameters:
+ - name: "inputStream"
+ in: "body"
+ description: "A tar archive compressed with one of the following algorithms: identity (no compression), gzip, bzip2, xz."
+ schema:
+ type: "string"
+ format: "binary"
+ - name: "dockerfile"
+ in: "query"
+ description: "Path within the build context to the `Dockerfile`. This is ignored if `remote` is specified and points to an external `Dockerfile`."
+ type: "string"
+ default: "Dockerfile"
+ - name: "t"
+ in: "query"
+ description: "A name and optional tag to apply to the image in the `name:tag` format. If you omit the tag the default `latest` value is assumed. You can provide several `t` parameters."
+ type: "string"
+ - name: "extrahosts"
+ in: "query"
+ description: "Extra hosts to add to /etc/hosts"
+ type: "string"
+ - name: "remote"
+ in: "query"
+ description: "A Git repository URI or HTTP/HTTPS context URI. If the URI points to a single text file, the file’s contents are placed into a file called `Dockerfile` and the image is built from that file. If the URI points to a tarball, the file is downloaded by the daemon and the contents therein used as the context for the build. If the URI points to a tarball and the `dockerfile` parameter is also specified, there must be a file with the corresponding path inside the tarball."
+ type: "string"
+ - name: "q"
+ in: "query"
+ description: "Suppress verbose build output."
+ type: "boolean"
+ default: false
+ - name: "nocache"
+ in: "query"
+ description: "Do not use the cache when building the image."
+ type: "boolean"
+ default: false
+ - name: "cachefrom"
+ in: "query"
+ description: "JSON array of images used for build cache resolution."
+ type: "string"
+ - name: "pull"
+ in: "query"
+ description: "Attempt to pull the image even if an older image exists locally."
+ type: "string"
+ - name: "rm"
+ in: "query"
+ description: "Remove intermediate containers after a successful build."
+ type: "boolean"
+ default: true
+ - name: "forcerm"
+ in: "query"
+ description: "Always remove intermediate containers, even upon failure."
+ type: "boolean"
+ default: false
+ - name: "memory"
+ in: "query"
+ description: "Set memory limit for build."
+ type: "integer"
+ - name: "memswap"
+ in: "query"
+ description: "Total memory (memory + swap). Set as `-1` to disable swap."
+ type: "integer"
+ - name: "cpushares"
+ in: "query"
+ description: "CPU shares (relative weight)."
+ type: "integer"
+ - name: "cpusetcpus"
+ in: "query"
+ description: "CPUs in which to allow execution (e.g., `0-3`, `0,1`)."
+ type: "string"
+ - name: "cpuperiod"
+ in: "query"
+ description: "The length of a CPU period in microseconds."
+ type: "integer"
+ - name: "cpuquota"
+ in: "query"
+ description: "Microseconds of CPU time that the container can get in a CPU period."
+ type: "integer"
+ - name: "buildargs"
+ in: "query"
+ description: "JSON map of string pairs for build-time variables. Users pass these values at build-time. Docker uses the buildargs as the environment context for commands run via the `Dockerfile` RUN instruction, or for variable expansion in other `Dockerfile` instructions. This is not meant for passing secret values. [Read more about the buildargs instruction.](https://docs.docker.com/engine/reference/builder/#arg)"
+ type: "integer"
+ - name: "shmsize"
+ in: "query"
+ description: "Size of `/dev/shm` in bytes. The size must be greater than 0. If omitted the system uses 64MB."
+ type: "integer"
+ - name: "squash"
+ in: "query"
+ description: "Squash the resulting images layers into a single layer. *(Experimental release only.)*"
+ type: "boolean"
+ - name: "labels"
+ in: "query"
+ description: "Arbitrary key/value labels to set on the image, as a JSON map of string pairs."
+ type: "string"
+ - name: "networkmode"
+ in: "query"
+ description: "Sets the networking mode for the run commands during
+ build. Supported standard values are: `bridge`, `host`, `none`, and
+ `container:<name|id>`. Any other value is taken as a custom network's
+ name to which this container should connect to."
+ type: "string"
+ - name: "Content-type"
+ in: "header"
+ type: "string"
+ enum:
+ - "application/x-tar"
+ default: "application/x-tar"
+ - name: "X-Registry-Config"
+ in: "header"
+ description: |
+ This is a base64-encoded JSON object with auth configurations for multiple registries that a build may refer to.
+
+ The key is a registry URL, and the value is an auth configuration object, [as described in the authentication section](#section/Authentication). For example:
+
+ ```
+ {
+ "docker.example.com": {
+ "username": "janedoe",
+ "password": "hunter2"
+ },
+ "https://index.docker.io/v1/": {
+ "username": "mobydock",
+ "password": "conta1n3rize14"
+ }
+ }
+ ```
+
+ Only the registry domain name (and port if not the default 443) are required. However, for legacy reasons, the Docker Hub registry must be specified with both a `https://` prefix and a `/v1/` suffix even though Docker will prefer to use the v2 registry API.
+ type: "string"
+ responses:
+ 200:
+ description: "no error"
+ 400:
+ description: "Bad parameter"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ tags: ["Image"]
+ /build/prune:
+ post:
+ summary: "Delete builder cache"
+ produces:
+ - "application/json"
+ operationId: "BuildPrune"
+ responses:
+ 200:
+ description: "No error"
+ schema:
+ type: "object"
+ properties:
+ SpaceReclaimed:
+ description: "Disk space reclaimed in bytes"
+ type: "integer"
+ format: "int64"
+ 500:
+ description: "Server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ tags: ["Image"]
+ /images/create:
+ post:
+ summary: "Create an image"
+ description: "Create an image by either pulling it from a registry or importing it."
+ operationId: "ImageCreate"
+ consumes:
+ - "text/plain"
+ - "application/octet-stream"
+ produces:
+ - "application/json"
+ responses:
+ 200:
+ description: "no error"
+ 404:
+ description: "repository does not exist or no read access"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "fromImage"
+ in: "query"
+ description: "Name of the image to pull. The name may include a tag or digest. This parameter may only be used when pulling an image. The pull is cancelled if the HTTP connection is closed."
+ type: "string"
+ - name: "fromSrc"
+ in: "query"
+ description: "Source to import. The value may be a URL from which the image can be retrieved or `-` to read the image from the request body. This parameter may only be used when importing an image."
+ type: "string"
+ - name: "repo"
+ in: "query"
+ description: "Repository name given to an image when it is imported. The repo may include a tag. This parameter may only be used when importing an image."
+ type: "string"
+ - name: "tag"
+ in: "query"
+ description: "Tag or digest. If empty when pulling an image, this causes all tags for the given image to be pulled."
+ type: "string"
+ - name: "inputImage"
+ in: "body"
+ description: "Image content if the value `-` has been specified in fromSrc query parameter"
+ schema:
+ type: "string"
+ required: false
+ - name: "X-Registry-Auth"
+ in: "header"
+ description: "A base64-encoded auth configuration. [See the authentication section for details.](#section/Authentication)"
+ type: "string"
+ tags: ["Image"]
+ /images/{name}/json:
+ get:
+ summary: "Inspect an image"
+ description: "Return low-level information about an image."
+ operationId: "ImageInspect"
+ produces:
+ - "application/json"
+ responses:
+ 200:
+ description: "No error"
+ schema:
+ $ref: "#/definitions/Image"
+ examples:
+ application/json:
+ Id: "sha256:85f05633ddc1c50679be2b16a0479ab6f7637f8884e0cfe0f4d20e1ebb3d6e7c"
+ Container: "cb91e48a60d01f1e27028b4fc6819f4f290b3cf12496c8176ec714d0d390984a"
+ Comment: ""
+ Os: "linux"
+ Architecture: "amd64"
+ Parent: "sha256:91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c"
+ ContainerConfig:
+ Tty: false
+ Hostname: "e611e15f9c9d"
+ Domainname: ""
+ AttachStdout: false
+ PublishService: ""
+ AttachStdin: false
+ OpenStdin: false
+ StdinOnce: false
+ NetworkDisabled: false
+ OnBuild: []
+ Image: "91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c"
+ User: ""
+ WorkingDir: ""
+ MacAddress: ""
+ AttachStderr: false
+ Labels:
+ com.example.license: "GPL"
+ com.example.version: "1.0"
+ com.example.vendor: "Acme"
+ Env:
+ - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
+ Cmd:
+ - "/bin/sh"
+ - "-c"
+ - "#(nop) LABEL com.example.vendor=Acme com.example.license=GPL com.example.version=1.0"
+ DockerVersion: "1.9.0-dev"
+ VirtualSize: 188359297
+ Size: 0
+ Author: ""
+ Created: "2015-09-10T08:30:53.26995814Z"
+ GraphDriver:
+ Name: "aufs"
+ Data: {}
+ RepoDigests:
+ - "localhost:5000/test/busybox/example@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf"
+ RepoTags:
+ - "example:1.0"
+ - "example:latest"
+ - "example:stable"
+ Config:
+ Image: "91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c"
+ NetworkDisabled: false
+ OnBuild: []
+ StdinOnce: false
+ PublishService: ""
+ AttachStdin: false
+ OpenStdin: false
+ Domainname: ""
+ AttachStdout: false
+ Tty: false
+ Hostname: "e611e15f9c9d"
+ Cmd:
+ - "/bin/bash"
+ Env:
+ - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
+ Labels:
+ com.example.vendor: "Acme"
+ com.example.version: "1.0"
+ com.example.license: "GPL"
+ MacAddress: ""
+ AttachStderr: false
+ WorkingDir: ""
+ User: ""
+ RootFS:
+ Type: "layers"
+ Layers:
+ - "sha256:1834950e52ce4d5a88a1bbd131c537f4d0e56d10ff0dd69e66be3b7dfa9df7e6"
+ - "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef"
+ 404:
+ description: "No such image"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ examples:
+ application/json:
+ message: "No such image: someimage (tag: latest)"
+ 500:
+ description: "Server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "name"
+ in: "path"
+ description: "Image name or id"
+ type: "string"
+ required: true
+ tags: ["Image"]
+ /images/{name}/history:
+ get:
+ summary: "Get the history of an image"
+ description: "Return parent layers of an image."
+ operationId: "ImageHistory"
+ produces: ["application/json"]
+ responses:
+ 200:
+ description: "List of image layers"
+ schema:
+ type: "array"
+ items:
+ type: "object"
+ x-go-name: HistoryResponseItem
+ required: [Id, Created, CreatedBy, Tags, Size, Comment]
+ properties:
+ Id:
+ type: "string"
+ x-nullable: false
+ Created:
+ type: "integer"
+ format: "int64"
+ x-nullable: false
+ CreatedBy:
+ type: "string"
+ x-nullable: false
+ Tags:
+ type: "array"
+ items:
+ type: "string"
+ Size:
+ type: "integer"
+ format: "int64"
+ x-nullable: false
+ Comment:
+ type: "string"
+ x-nullable: false
+ examples:
+ application/json:
+ - Id: "3db9c44f45209632d6050b35958829c3a2aa256d81b9a7be45b362ff85c54710"
+ Created: 1398108230
+ CreatedBy: "/bin/sh -c #(nop) ADD file:eb15dbd63394e063b805a3c32ca7bf0266ef64676d5a6fab4801f2e81e2a5148 in /"
+ Tags:
+ - "ubuntu:lucid"
+ - "ubuntu:10.04"
+ Size: 182964289
+ Comment: ""
+ - Id: "6cfa4d1f33fb861d4d114f43b25abd0ac737509268065cdfd69d544a59c85ab8"
+ Created: 1398108222
+ CreatedBy: "/bin/sh -c #(nop) MAINTAINER Tianon Gravi <admwiggin@gmail.com> - mkimage-debootstrap.sh -i iproute,iputils-ping,ubuntu-minimal -t lucid.tar.xz lucid http://archive.ubuntu.com/ubuntu/"
+ Tags: []
+ Size: 0
+ Comment: ""
+ - Id: "511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158"
+ Created: 1371157430
+ CreatedBy: ""
+ Tags:
+ - "scratch12:latest"
+ - "scratch:latest"
+ Size: 0
+ Comment: "Imported from -"
+ 404:
+ description: "No such image"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 500:
+ description: "Server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "name"
+ in: "path"
+ description: "Image name or ID"
+ type: "string"
+ required: true
+ tags: ["Image"]
+ /images/{name}/push:
+ post:
+ summary: "Push an image"
+ description: |
+ Push an image to a registry.
+
+ If you wish to push an image on to a private registry, that image must already have a tag which references the registry. For example, `registry.example.com/myimage:latest`.
+
+ The push is cancelled if the HTTP connection is closed.
+ operationId: "ImagePush"
+ consumes:
+ - "application/octet-stream"
+ responses:
+ 200:
+ description: "No error"
+ 404:
+ description: "No such image"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 500:
+ description: "Server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "name"
+ in: "path"
+ description: "Image name or ID."
+ type: "string"
+ required: true
+ - name: "tag"
+ in: "query"
+ description: "The tag to associate with the image on the registry."
+ type: "string"
+ - name: "X-Registry-Auth"
+ in: "header"
+ description: "A base64-encoded auth configuration. [See the authentication section for details.](#section/Authentication)"
+ type: "string"
+ required: true
+ tags: ["Image"]
+ /images/{name}/tag:
+ post:
+ summary: "Tag an image"
+ description: "Tag an image so that it becomes part of a repository."
+ operationId: "ImageTag"
+ responses:
+ 201:
+ description: "No error"
+ 400:
+ description: "Bad parameter"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 404:
+ description: "No such image"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 409:
+ description: "Conflict"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 500:
+ description: "Server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "name"
+ in: "path"
+ description: "Image name or ID to tag."
+ type: "string"
+ required: true
+ - name: "repo"
+ in: "query"
+ description: "The repository to tag in. For example, `someuser/someimage`."
+ type: "string"
+ - name: "tag"
+ in: "query"
+ description: "The name of the new tag."
+ type: "string"
+ tags: ["Image"]
+ /images/{name}:
+ delete:
+ summary: "Remove an image"
+ description: |
+ Remove an image, along with any untagged parent images that were
+ referenced by that image.
+
+ Images can't be removed if they have descendant images, are being
+ used by a running container or are being used by a build.
+ operationId: "ImageDelete"
+ produces: ["application/json"]
+ responses:
+ 200:
+ description: "The image was deleted successfully"
+ schema:
+ type: "array"
+ items:
+ $ref: "#/definitions/ImageDeleteResponseItem"
+ examples:
+ application/json:
+ - Untagged: "3e2f21a89f"
+ - Deleted: "3e2f21a89f"
+ - Deleted: "53b4f83ac9"
+ 404:
+ description: "No such image"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 409:
+ description: "Conflict"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 500:
+ description: "Server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "name"
+ in: "path"
+ description: "Image name or ID"
+ type: "string"
+ required: true
+ - name: "force"
+ in: "query"
+ description: "Remove the image even if it is being used by stopped containers or has other tags"
+ type: "boolean"
+ default: false
+ - name: "noprune"
+ in: "query"
+ description: "Do not delete untagged parent images"
+ type: "boolean"
+ default: false
+ tags: ["Image"]
+ /images/search:
+ get:
+ summary: "Search images"
+ description: "Search for an image on Docker Hub."
+ operationId: "ImageSearch"
+ produces:
+ - "application/json"
+ responses:
+ 200:
+ description: "No error"
+ schema:
+ type: "array"
+ items:
+ type: "object"
+ properties:
+ description:
+ type: "string"
+ is_official:
+ type: "boolean"
+ is_automated:
+ type: "boolean"
+ name:
+ type: "string"
+ star_count:
+ type: "integer"
+ examples:
+ application/json:
+ - description: ""
+ is_official: false
+ is_automated: false
+ name: "wma55/u1210sshd"
+ star_count: 0
+ - description: ""
+ is_official: false
+ is_automated: false
+ name: "jdswinbank/sshd"
+ star_count: 0
+ - description: ""
+ is_official: false
+ is_automated: false
+ name: "vgauthier/sshd"
+ star_count: 0
+ 500:
+ description: "Server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "term"
+ in: "query"
+ description: "Term to search"
+ type: "string"
+ required: true
+ - name: "limit"
+ in: "query"
+ description: "Maximum number of results to return"
+ type: "integer"
+ - name: "filters"
+ in: "query"
+ description: |
+ A JSON encoded value of the filters (a `map[string][]string`) to process on the images list. Available filters:
+
+ - `is-automated=(true|false)`
+ - `is-official=(true|false)`
+ - `stars=<number>` Matches images that has at least 'number' stars.
+ type: "string"
+ tags: ["Image"]
+ /images/prune:
+ post:
+ summary: "Delete unused images"
+ produces:
+ - "application/json"
+ operationId: "ImagePrune"
+ parameters:
+ - name: "filters"
+ in: "query"
+ description: |
+ Filters to process on the prune list, encoded as JSON (a `map[string][]string`). Available filters:
+
+ - `dangling=<boolean>` When set to `true` (or `1`), prune only
+ unused *and* untagged images. When set to `false`
+ (or `0`), all unused images are pruned.
+ - `until=<string>` Prune images created before this timestamp. The `<timestamp>` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon machine’s time.
+ - `label` (`label=<key>`, `label=<key>=<value>`, `label!=<key>`, or `label!=<key>=<value>`) Prune images with (or without, in case `label!=...` is used) the specified labels.
+ type: "string"
+ responses:
+ 200:
+ description: "No error"
+ schema:
+ type: "object"
+ properties:
+ ImagesDeleted:
+ description: "Images that were deleted"
+ type: "array"
+ items:
+ $ref: "#/definitions/ImageDeleteResponseItem"
+ SpaceReclaimed:
+ description: "Disk space reclaimed in bytes"
+ type: "integer"
+ format: "int64"
+ 500:
+ description: "Server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ tags: ["Image"]
+ /auth:
+ post:
+ summary: "Check auth configuration"
+ description: "Validate credentials for a registry and, if available, get an identity token for accessing the registry without password."
+ operationId: "SystemAuth"
+ consumes: ["application/json"]
+ produces: ["application/json"]
+ responses:
+ 200:
+ description: "An identity token was generated successfully."
+ schema:
+ type: "object"
+ required: [Status]
+ properties:
+ Status:
+ description: "The status of the authentication"
+ type: "string"
+ x-nullable: false
+ IdentityToken:
+ description: "An opaque token used to authenticate a user after a successful login"
+ type: "string"
+ x-nullable: false
+ examples:
+ application/json:
+ Status: "Login Succeeded"
+ IdentityToken: "9cbaf023786cd7..."
+ 204:
+ description: "No error"
+ 500:
+ description: "Server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "authConfig"
+ in: "body"
+ description: "Authentication to check"
+ schema:
+ $ref: "#/definitions/AuthConfig"
+ tags: ["System"]
+ /info:
+ get:
+ summary: "Get system information"
+ operationId: "SystemInfo"
+ produces:
+ - "application/json"
+ responses:
+ 200:
+ description: "No error"
+ schema:
+ $ref: "#/definitions/SystemInfo"
+ 500:
+ description: "Server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ tags: ["System"]
+ /version:
+ get:
+ summary: "Get version"
+ description: "Returns the version of Docker that is running and various information about the system that Docker is running on."
+ operationId: "SystemVersion"
+ produces: ["application/json"]
+ responses:
+ 200:
+ description: "no error"
+ schema:
+ type: "object"
+ properties:
+ Version:
+ type: "string"
+ ApiVersion:
+ type: "string"
+ MinAPIVersion:
+ type: "string"
+ GitCommit:
+ type: "string"
+ GoVersion:
+ type: "string"
+ Os:
+ type: "string"
+ Arch:
+ type: "string"
+ KernelVersion:
+ type: "string"
+ Experimental:
+ type: "boolean"
+ BuildTime:
+ type: "string"
+ examples:
+ application/json:
+ Version: "17.04.0"
+ Os: "linux"
+ KernelVersion: "3.19.0-23-generic"
+ GoVersion: "go1.7.5"
+ GitCommit: "deadbee"
+ Arch: "amd64"
+ ApiVersion: "1.27"
+ MinAPIVersion: "1.12"
+ BuildTime: "2016-06-14T07:09:13.444803460+00:00"
+ Experimental: true
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ tags: ["System"]
+ /_ping:
+ get:
+ summary: "Ping"
+ description: "This is a dummy endpoint you can use to test if the server is accessible."
+ operationId: "SystemPing"
+ produces: ["text/plain"]
+ responses:
+ 200:
+ description: "no error"
+ schema:
+ type: "string"
+ example: "OK"
+ headers:
+ API-Version:
+ type: "string"
+ description: "Max API Version the server supports"
+ Docker-Experimental:
+ type: "boolean"
+ description: "If the server is running with experimental mode enabled"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ tags: ["System"]
+ /commit:
+ post:
+ summary: "Create a new image from a container"
+ operationId: "ImageCommit"
+ consumes:
+ - "application/json"
+ produces:
+ - "application/json"
+ responses:
+ 201:
+ description: "no error"
+ schema:
+ $ref: "#/definitions/IdResponse"
+ 404:
+ description: "no such container"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ examples:
+ application/json:
+ message: "No such container: c2ada9df5af8"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "containerConfig"
+ in: "body"
+ description: "The container configuration"
+ schema:
+ $ref: "#/definitions/ContainerConfig"
+ - name: "container"
+ in: "query"
+ description: "The ID or name of the container to commit"
+ type: "string"
+ - name: "repo"
+ in: "query"
+ description: "Repository name for the created image"
+ type: "string"
+ - name: "tag"
+ in: "query"
+ description: "Tag name for the create image"
+ type: "string"
+ - name: "comment"
+ in: "query"
+ description: "Commit message"
+ type: "string"
+ - name: "author"
+ in: "query"
+ description: "Author of the image (e.g., `John Hannibal Smith <hannibal@a-team.com>`)"
+ type: "string"
+ - name: "pause"
+ in: "query"
+ description: "Whether to pause the container before committing"
+ type: "boolean"
+ default: true
+ - name: "changes"
+ in: "query"
+ description: "`Dockerfile` instructions to apply while committing"
+ type: "string"
+ tags: ["Image"]
+ /events:
+ get:
+ summary: "Monitor events"
+ description: |
+ Stream real-time events from the server.
+
+ Various objects within Docker report events when something happens to them.
+
+ Containers report these events: `attach`, `commit`, `copy`, `create`, `destroy`, `detach`, `die`, `exec_create`, `exec_detach`, `exec_start`, `export`, `health_status`, `kill`, `oom`, `pause`, `rename`, `resize`, `restart`, `start`, `stop`, `top`, `unpause`, and `update`
+
+ Images report these events: `delete`, `import`, `load`, `pull`, `push`, `save`, `tag`, and `untag`
+
+ Volumes report these events: `create`, `mount`, `unmount`, and `destroy`
+
+ Networks report these events: `create`, `connect`, `disconnect`, `destroy`, `update`, and `remove`
+
+ The Docker daemon reports these events: `reload`
+
+ Services report these events: `create`, `update`, and `remove`
+
+ Nodes report these events: `create`, `update`, and `remove`
+
+ Secrets report these events: `create`, `update`, and `remove`
+
+ Configs report these events: `create`, `update`, and `remove`
+
+ operationId: "SystemEvents"
+ produces:
+ - "application/json"
+ responses:
+ 200:
+ description: "no error"
+ schema:
+ type: "object"
+ properties:
+ Type:
+ description: "The type of object emitting the event"
+ type: "string"
+ Action:
+ description: "The type of event"
+ type: "string"
+ Actor:
+ type: "object"
+ properties:
+ ID:
+ description: "The ID of the object emitting the event"
+ type: "string"
+ Attributes:
+ description: "Various key/value attributes of the object, depending on its type"
+ type: "object"
+ additionalProperties:
+ type: "string"
+ time:
+ description: "Timestamp of event"
+ type: "integer"
+ timeNano:
+ description: "Timestamp of event, with nanosecond accuracy"
+ type: "integer"
+ format: "int64"
+ examples:
+ application/json:
+ Type: "container"
+ Action: "create"
+ Actor:
+ ID: "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743"
+ Attributes:
+ com.example.some-label: "some-label-value"
+ image: "alpine"
+ name: "my-container"
+ time: 1461943101
+ 400:
+ description: "bad parameter"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "since"
+ in: "query"
+ description: "Show events created since this timestamp then stream new events."
+ type: "string"
+ - name: "until"
+ in: "query"
+ description: "Show events created until this timestamp then stop streaming."
+ type: "string"
+ - name: "filters"
+ in: "query"
+ description: |
+ A JSON encoded value of filters (a `map[string][]string`) to process on the event list. Available filters:
+
+ - `container=<string>` container name or ID
+ - `daemon=<string>` daemon name or ID
+ - `event=<string>` event type
+ - `image=<string>` image name or ID
+ - `label=<string>` image or container label
+ - `network=<string>` network name or ID
+ - `plugin`=<string> plugin name or ID
+ - `scope`=<string> local or swarm
+ - `type=<string>` object to filter by, one of `container`, `image`, `volume`, `network`, `daemon`, `plugin`, `node`, `service` or `secret`
+ - `volume=<string>` volume name or ID
+ type: "string"
+ tags: ["System"]
+ /system/df:
+ get:
+ summary: "Get data usage information"
+ operationId: "SystemDataUsage"
+ responses:
+ 200:
+ description: "no error"
+ schema:
+ type: "object"
+ properties:
+ LayersSize:
+ type: "integer"
+ format: "int64"
+ Images:
+ type: "array"
+ items:
+ $ref: "#/definitions/ImageSummary"
+ Containers:
+ type: "array"
+ items:
+ $ref: "#/definitions/ContainerSummary"
+ Volumes:
+ type: "array"
+ items:
+ $ref: "#/definitions/Volume"
+ example:
+ LayersSize: 1092588
+ Images:
+ -
+ Id: "sha256:2b8fd9751c4c0f5dd266fcae00707e67a2545ef34f9a29354585f93dac906749"
+ ParentId: ""
+ RepoTags:
+ - "busybox:latest"
+ RepoDigests:
+ - "busybox@sha256:a59906e33509d14c036c8678d687bd4eec81ed7c4b8ce907b888c607f6a1e0e6"
+ Created: 1466724217
+ Size: 1092588
+ SharedSize: 0
+ VirtualSize: 1092588
+ Labels: {}
+ Containers: 1
+ Containers:
+ -
+ Id: "e575172ed11dc01bfce087fb27bee502db149e1a0fad7c296ad300bbff178148"
+ Names:
+ - "/top"
+ Image: "busybox"
+ ImageID: "sha256:2b8fd9751c4c0f5dd266fcae00707e67a2545ef34f9a29354585f93dac906749"
+ Command: "top"
+ Created: 1472592424
+ Ports: []
+ SizeRootFs: 1092588
+ Labels: {}
+ State: "exited"
+ Status: "Exited (0) 56 minutes ago"
+ HostConfig:
+ NetworkMode: "default"
+ NetworkSettings:
+ Networks:
+ bridge:
+ IPAMConfig: null
+ Links: null
+ Aliases: null
+ NetworkID: "d687bc59335f0e5c9ee8193e5612e8aee000c8c62ea170cfb99c098f95899d92"
+ EndpointID: "8ed5115aeaad9abb174f68dcf135b49f11daf597678315231a32ca28441dec6a"
+ Gateway: "172.18.0.1"
+ IPAddress: "172.18.0.2"
+ IPPrefixLen: 16
+ IPv6Gateway: ""
+ GlobalIPv6Address: ""
+ GlobalIPv6PrefixLen: 0
+ MacAddress: "02:42:ac:12:00:02"
+ Mounts: []
+ Volumes:
+ -
+ Name: "my-volume"
+ Driver: "local"
+ Mountpoint: "/var/lib/docker/volumes/my-volume/_data"
+ Labels: null
+ Scope: "local"
+ Options: null
+ UsageData:
+ Size: 10920104
+ RefCount: 2
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ tags: ["System"]
+ /images/{name}/get:
+ get:
+ summary: "Export an image"
+ description: |
+ Get a tarball containing all images and metadata for a repository.
+
+ If `name` is a specific name and tag (e.g. `ubuntu:latest`), then only that image (and its parents) are returned. If `name` is an image ID, similarly only that image (and its parents) are returned, but with the exclusion of the `repositories` file in the tarball, as there were no image names referenced.
+
+ ### Image tarball format
+
+ An image tarball contains one directory per image layer (named using its long ID), each containing these files:
+
+ - `VERSION`: currently `1.0` - the file format version
+ - `json`: detailed layer information, similar to `docker inspect layer_id`
+ - `layer.tar`: A tarfile containing the filesystem changes in this layer
+
+ The `layer.tar` file contains `aufs` style `.wh..wh.aufs` files and directories for storing attribute changes and deletions.
+
+ If the tarball defines a repository, the tarball should also include a `repositories` file at the root that contains a list of repository and tag names mapped to layer IDs.
+
+ ```json
+ {
+ "hello-world": {
+ "latest": "565a9d68a73f6706862bfe8409a7f659776d4d60a8d096eb4a3cbce6999cc2a1"
+ }
+ }
+ ```
+ operationId: "ImageGet"
+ produces:
+ - "application/x-tar"
+ responses:
+ 200:
+ description: "no error"
+ schema:
+ type: "string"
+ format: "binary"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "name"
+ in: "path"
+ description: "Image name or ID"
+ type: "string"
+ required: true
+ tags: ["Image"]
+ /images/get:
+ get:
+ summary: "Export several images"
+ description: |
+ Get a tarball containing all images and metadata for several image repositories.
+
+ For each value of the `names` parameter: if it is a specific name and tag (e.g. `ubuntu:latest`), then only that image (and its parents) are returned; if it is an image ID, similarly only that image (and its parents) are returned and there would be no names referenced in the 'repositories' file for this image ID.
+
+ For details on the format, see [the export image endpoint](#operation/ImageGet).
+ operationId: "ImageGetAll"
+ produces:
+ - "application/x-tar"
+ responses:
+ 200:
+ description: "no error"
+ schema:
+ type: "string"
+ format: "binary"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "names"
+ in: "query"
+ description: "Image names to filter by"
+ type: "array"
+ items:
+ type: "string"
+ tags: ["Image"]
+ /images/load:
+ post:
+ summary: "Import images"
+ description: |
+ Load a set of images and tags into a repository.
+
+ For details on the format, see [the export image endpoint](#operation/ImageGet).
+ operationId: "ImageLoad"
+ consumes:
+ - "application/x-tar"
+ produces:
+ - "application/json"
+ responses:
+ 200:
+ description: "no error"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "imagesTarball"
+ in: "body"
+ description: "Tar archive containing images"
+ schema:
+ type: "string"
+ format: "binary"
+ - name: "quiet"
+ in: "query"
+ description: "Suppress progress details during load."
+ type: "boolean"
+ default: false
+ tags: ["Image"]
+ /containers/{id}/exec:
+ post:
+ summary: "Create an exec instance"
+ description: "Run a command inside a running container."
+ operationId: "ContainerExec"
+ consumes:
+ - "application/json"
+ produces:
+ - "application/json"
+ responses:
+ 201:
+ description: "no error"
+ schema:
+ $ref: "#/definitions/IdResponse"
+ 404:
+ description: "no such container"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ examples:
+ application/json:
+ message: "No such container: c2ada9df5af8"
+ 409:
+ description: "container is paused"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 500:
+ description: "Server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "execConfig"
+ in: "body"
+ description: "Exec configuration"
+ schema:
+ type: "object"
+ properties:
+ AttachStdin:
+ type: "boolean"
+ description: "Attach to `stdin` of the exec command."
+ AttachStdout:
+ type: "boolean"
+ description: "Attach to `stdout` of the exec command."
+ AttachStderr:
+ type: "boolean"
+ description: "Attach to `stderr` of the exec command."
+ DetachKeys:
+ type: "string"
+ description: "Override the key sequence for detaching a container. Format is a single character `[a-Z]` or `ctrl-<value>` where `<value>` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`."
+ Tty:
+ type: "boolean"
+ description: "Allocate a pseudo-TTY."
+ Env:
+ description: "A list of environment variables in the form `[\"VAR=value\", ...]`."
+ type: "array"
+ items:
+ type: "string"
+ Cmd:
+ type: "array"
+ description: "Command to run, as a string or array of strings."
+ items:
+ type: "string"
+ Privileged:
+ type: "boolean"
+ description: "Runs the exec process with extended privileges."
+ default: false
+ User:
+ type: "string"
+ description: "The user, and optionally, group to run the exec process inside the container. Format is one of: `user`, `user:group`, `uid`, or `uid:gid`."
+ example:
+ AttachStdin: false
+ AttachStdout: true
+ AttachStderr: true
+ DetachKeys: "ctrl-p,ctrl-q"
+ Tty: false
+ Cmd:
+ - "date"
+ Env:
+ - "FOO=bar"
+ - "BAZ=quux"
+ required: true
+ - name: "id"
+ in: "path"
+ description: "ID or name of container"
+ type: "string"
+ required: true
+ tags: ["Exec"]
+ /exec/{id}/start:
+ post:
+ summary: "Start an exec instance"
+ description: "Starts a previously set up exec instance. If detach is true, this endpoint returns immediately after starting the command. Otherwise, it sets up an interactive session with the command."
+ operationId: "ExecStart"
+ consumes:
+ - "application/json"
+ produces:
+ - "application/vnd.docker.raw-stream"
+ responses:
+ 200:
+ description: "No error"
+ 404:
+ description: "No such exec instance"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 409:
+ description: "Container is stopped or paused"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "execStartConfig"
+ in: "body"
+ schema:
+ type: "object"
+ properties:
+ Detach:
+ type: "boolean"
+ description: "Detach from the command."
+ Tty:
+ type: "boolean"
+ description: "Allocate a pseudo-TTY."
+ example:
+ Detach: false
+ Tty: false
+ - name: "id"
+ in: "path"
+ description: "Exec instance ID"
+ required: true
+ type: "string"
+ tags: ["Exec"]
+ /exec/{id}/resize:
+ post:
+ summary: "Resize an exec instance"
+ description: "Resize the TTY session used by an exec instance. This endpoint only works if `tty` was specified as part of creating and starting the exec instance."
+ operationId: "ExecResize"
+ responses:
+ 201:
+ description: "No error"
+ 404:
+ description: "No such exec instance"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "id"
+ in: "path"
+ description: "Exec instance ID"
+ required: true
+ type: "string"
+ - name: "h"
+ in: "query"
+ description: "Height of the TTY session in characters"
+ type: "integer"
+ - name: "w"
+ in: "query"
+ description: "Width of the TTY session in characters"
+ type: "integer"
+ tags: ["Exec"]
+ /exec/{id}/json:
+ get:
+ summary: "Inspect an exec instance"
+ description: "Return low-level information about an exec instance."
+ operationId: "ExecInspect"
+ produces:
+ - "application/json"
+ responses:
+ 200:
+ description: "No error"
+ schema:
+ type: "object"
+ properties:
+ ID:
+ type: "string"
+ Running:
+ type: "boolean"
+ ExitCode:
+ type: "integer"
+ ProcessConfig:
+ $ref: "#/definitions/ProcessConfig"
+ OpenStdin:
+ type: "boolean"
+ OpenStderr:
+ type: "boolean"
+ OpenStdout:
+ type: "boolean"
+ ContainerID:
+ type: "string"
+ Pid:
+ type: "integer"
+ description: "The system process ID for the exec process."
+ examples:
+ application/json:
+ CanRemove: false
+ ContainerID: "b53ee82b53a40c7dca428523e34f741f3abc51d9f297a14ff874bf761b995126"
+ DetachKeys: ""
+ ExitCode: 2
+ ID: "f33bbfb39f5b142420f4759b2348913bd4a8d1a6d7fd56499cb41a1bb91d7b3b"
+ OpenStderr: true
+ OpenStdin: true
+ OpenStdout: true
+ ProcessConfig:
+ arguments:
+ - "-c"
+ - "exit 2"
+ entrypoint: "sh"
+ privileged: false
+ tty: true
+ user: "1000"
+ Running: false
+ Pid: 42000
+ 404:
+ description: "No such exec instance"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 500:
+ description: "Server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "id"
+ in: "path"
+ description: "Exec instance ID"
+ required: true
+ type: "string"
+ tags: ["Exec"]
+
+ /volumes:
+ get:
+ summary: "List volumes"
+ operationId: "VolumeList"
+ produces: ["application/json"]
+ responses:
+ 200:
+ description: "Summary volume data that matches the query"
+ schema:
+ type: "object"
+ required: [Volumes, Warnings]
+ properties:
+ Volumes:
+ type: "array"
+ x-nullable: false
+ description: "List of volumes"
+ items:
+ $ref: "#/definitions/Volume"
+ Warnings:
+ type: "array"
+ x-nullable: false
+ description: "Warnings that occurred when fetching the list of volumes"
+ items:
+ type: "string"
+
+ examples:
+ application/json:
+ Volumes:
+ - CreatedAt: "2017-07-19T12:00:26Z"
+ Name: "tardis"
+ Driver: "local"
+ Mountpoint: "/var/lib/docker/volumes/tardis"
+ Labels:
+ com.example.some-label: "some-value"
+ com.example.some-other-label: "some-other-value"
+ Scope: "local"
+ Options:
+ device: "tmpfs"
+ o: "size=100m,uid=1000"
+ type: "tmpfs"
+ Warnings: []
+ 500:
+ description: "Server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "filters"
+ in: "query"
+ description: |
+ JSON encoded value of the filters (a `map[string][]string`) to
+ process on the volumes list. Available filters:
+
+ - `dangling=<boolean>` When set to `true` (or `1`), returns all
+ volumes that are not in use by a container. When set to `false`
+ (or `0`), only volumes that are in use by one or more
+ containers are returned.
+ - `driver=<volume-driver-name>` Matches volumes based on their driver.
+ - `label=<key>` or `label=<key>:<value>` Matches volumes based on
+ the presence of a `label` alone or a `label` and a value.
+ - `name=<volume-name>` Matches all or part of a volume name.
+ type: "string"
+ format: "json"
+ tags: ["Volume"]
+
+ /volumes/create:
+ post:
+ summary: "Create a volume"
+ operationId: "VolumeCreate"
+ consumes: ["application/json"]
+ produces: ["application/json"]
+ responses:
+ 201:
+ description: "The volume was created successfully"
+ schema:
+ $ref: "#/definitions/Volume"
+ 500:
+ description: "Server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "volumeConfig"
+ in: "body"
+ required: true
+ description: "Volume configuration"
+ schema:
+ type: "object"
+ properties:
+ Name:
+ description: "The new volume's name. If not specified, Docker generates a name."
+ type: "string"
+ x-nullable: false
+ Driver:
+ description: "Name of the volume driver to use."
+ type: "string"
+ default: "local"
+ x-nullable: false
+ DriverOpts:
+ description: "A mapping of driver options and values. These options are passed directly to the driver and are driver specific."
+ type: "object"
+ additionalProperties:
+ type: "string"
+ Labels:
+ description: "User-defined key/value metadata."
+ type: "object"
+ additionalProperties:
+ type: "string"
+ example:
+ Name: "tardis"
+ Labels:
+ com.example.some-label: "some-value"
+ com.example.some-other-label: "some-other-value"
+ Driver: "custom"
+ tags: ["Volume"]
+
+ /volumes/{name}:
+ get:
+ summary: "Inspect a volume"
+ operationId: "VolumeInspect"
+ produces: ["application/json"]
+ responses:
+ 200:
+ description: "No error"
+ schema:
+ $ref: "#/definitions/Volume"
+ 404:
+ description: "No such volume"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 500:
+ description: "Server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "name"
+ in: "path"
+ required: true
+ description: "Volume name or ID"
+ type: "string"
+ tags: ["Volume"]
+
+ delete:
+ summary: "Remove a volume"
+ description: "Instruct the driver to remove the volume."
+ operationId: "VolumeDelete"
+ responses:
+ 204:
+ description: "The volume was removed"
+ 404:
+ description: "No such volume or volume driver"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 409:
+ description: "Volume is in use and cannot be removed"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 500:
+ description: "Server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "name"
+ in: "path"
+ required: true
+ description: "Volume name or ID"
+ type: "string"
+ - name: "force"
+ in: "query"
+ description: "Force the removal of the volume"
+ type: "boolean"
+ default: false
+ tags: ["Volume"]
+ /volumes/prune:
+ post:
+ summary: "Delete unused volumes"
+ produces:
+ - "application/json"
+ operationId: "VolumePrune"
+ parameters:
+ - name: "filters"
+ in: "query"
+ description: |
+ Filters to process on the prune list, encoded as JSON (a `map[string][]string`).
+
+ Available filters:
+ - `label` (`label=<key>`, `label=<key>=<value>`, `label!=<key>`, or `label!=<key>=<value>`) Prune volumes with (or without, in case `label!=...` is used) the specified labels.
+ type: "string"
+ responses:
+ 200:
+ description: "No error"
+ schema:
+ type: "object"
+ properties:
+ VolumesDeleted:
+ description: "Volumes that were deleted"
+ type: "array"
+ items:
+ type: "string"
+ SpaceReclaimed:
+ description: "Disk space reclaimed in bytes"
+ type: "integer"
+ format: "int64"
+ 500:
+ description: "Server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ tags: ["Volume"]
+ /networks:
+ get:
+ summary: "List networks"
+ description: |
+ Returns a list of networks. For details on the format, see [the network inspect endpoint](#operation/NetworkInspect).
+
+ Note that it uses a different, smaller representation of a network than inspecting a single network. For example,
+ the list of containers attached to the network is not propagated in API versions 1.28 and up.
+ operationId: "NetworkList"
+ produces:
+ - "application/json"
+ responses:
+ 200:
+ description: "No error"
+ schema:
+ type: "array"
+ items:
+ $ref: "#/definitions/Network"
+ examples:
+ application/json:
+ - Name: "bridge"
+ Id: "f2de39df4171b0dc801e8002d1d999b77256983dfc63041c0f34030aa3977566"
+ Created: "2016-10-19T06:21:00.416543526Z"
+ Scope: "local"
+ Driver: "bridge"
+ EnableIPv6: false
+ Internal: false
+ Attachable: false
+ Ingress: false
+ IPAM:
+ Driver: "default"
+ Config:
+ -
+ Subnet: "172.17.0.0/16"
+ Options:
+ com.docker.network.bridge.default_bridge: "true"
+ com.docker.network.bridge.enable_icc: "true"
+ com.docker.network.bridge.enable_ip_masquerade: "true"
+ com.docker.network.bridge.host_binding_ipv4: "0.0.0.0"
+ com.docker.network.bridge.name: "docker0"
+ com.docker.network.driver.mtu: "1500"
+ - Name: "none"
+ Id: "e086a3893b05ab69242d3c44e49483a3bbbd3a26b46baa8f61ab797c1088d794"
+ Created: "0001-01-01T00:00:00Z"
+ Scope: "local"
+ Driver: "null"
+ EnableIPv6: false
+ Internal: false
+ Attachable: false
+ Ingress: false
+ IPAM:
+ Driver: "default"
+ Config: []
+ Containers: {}
+ Options: {}
+ - Name: "host"
+ Id: "13e871235c677f196c4e1ecebb9dc733b9b2d2ab589e30c539efeda84a24215e"
+ Created: "0001-01-01T00:00:00Z"
+ Scope: "local"
+ Driver: "host"
+ EnableIPv6: false
+ Internal: false
+ Attachable: false
+ Ingress: false
+ IPAM:
+ Driver: "default"
+ Config: []
+ Containers: {}
+ Options: {}
+ 500:
+ description: "Server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "filters"
+ in: "query"
+ description: |
+ JSON encoded value of the filters (a `map[string][]string`) to process on the networks list. Available filters:
+
+ - `driver=<driver-name>` Matches a network's driver.
+ - `id=<network-id>` Matches all or part of a network ID.
+ - `label=<key>` or `label=<key>=<value>` of a network label.
+ - `name=<network-name>` Matches all or part of a network name.
+ - `scope=["swarm"|"global"|"local"]` Filters networks by scope (`swarm`, `global`, or `local`).
+ - `type=["custom"|"builtin"]` Filters networks by type. The `custom` keyword returns all user-defined networks.
+ type: "string"
+ tags: ["Network"]
+
+ /networks/{id}:
+ get:
+ summary: "Inspect a network"
+ operationId: "NetworkInspect"
+ produces:
+ - "application/json"
+ responses:
+ 200:
+ description: "No error"
+ schema:
+ $ref: "#/definitions/Network"
+ 404:
+ description: "Network not found"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 500:
+ description: "Server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "id"
+ in: "path"
+ description: "Network ID or name"
+ required: true
+ type: "string"
+ - name: "verbose"
+ in: "query"
+ description: "Detailed inspect output for troubleshooting"
+ type: "boolean"
+ default: false
+ - name: "scope"
+ in: "query"
+ description: "Filter the network by scope (swarm, global, or local)"
+ type: "string"
+ tags: ["Network"]
+
+ delete:
+ summary: "Remove a network"
+ operationId: "NetworkDelete"
+ responses:
+ 204:
+ description: "No error"
+ 403:
+ description: "operation not supported for pre-defined networks"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 404:
+ description: "no such network"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 500:
+ description: "Server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "id"
+ in: "path"
+ description: "Network ID or name"
+ required: true
+ type: "string"
+ tags: ["Network"]
+
+ /networks/create:
+ post:
+ summary: "Create a network"
+ operationId: "NetworkCreate"
+ consumes:
+ - "application/json"
+ produces:
+ - "application/json"
+ responses:
+ 201:
+ description: "No error"
+ schema:
+ type: "object"
+ properties:
+ Id:
+ description: "The ID of the created network."
+ type: "string"
+ Warning:
+ type: "string"
+ example:
+ Id: "22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30"
+ Warning: ""
+ 403:
+ description: "operation not supported for pre-defined networks"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 404:
+ description: "plugin not found"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 500:
+ description: "Server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "networkConfig"
+ in: "body"
+ description: "Network configuration"
+ required: true
+ schema:
+ type: "object"
+ required: ["Name"]
+ properties:
+ Name:
+ description: "The network's name."
+ type: "string"
+ CheckDuplicate:
+ description: "Check for networks with duplicate names. Since Network is primarily keyed based on a random ID and not on the name, and network name is strictly a user-friendly alias to the network which is uniquely identified using ID, there is no guaranteed way to check for duplicates. CheckDuplicate is there to provide a best effort checking of any networks which has the same name but it is not guaranteed to catch all name collisions."
+ type: "boolean"
+ Driver:
+ description: "Name of the network driver plugin to use."
+ type: "string"
+ default: "bridge"
+ Internal:
+ description: "Restrict external access to the network."
+ type: "boolean"
+ Attachable:
+ description: "Globally scoped network is manually attachable by regular containers from workers in swarm mode."
+ type: "boolean"
+ Ingress:
+ description: "Ingress network is the network which provides the routing-mesh in swarm mode."
+ type: "boolean"
+ IPAM:
+ description: "Optional custom IP scheme for the network."
+ $ref: "#/definitions/IPAM"
+ EnableIPv6:
+ description: "Enable IPv6 on the network."
+ type: "boolean"
+ Options:
+ description: "Network specific options to be used by the drivers."
+ type: "object"
+ additionalProperties:
+ type: "string"
+ Labels:
+ description: "User-defined key/value metadata."
+ type: "object"
+ additionalProperties:
+ type: "string"
+ example:
+ Name: "isolated_nw"
+ CheckDuplicate: false
+ Driver: "bridge"
+ EnableIPv6: true
+ IPAM:
+ Driver: "default"
+ Config:
+ - Subnet: "172.20.0.0/16"
+ IPRange: "172.20.10.0/24"
+ Gateway: "172.20.10.11"
+ - Subnet: "2001:db8:abcd::/64"
+ Gateway: "2001:db8:abcd::1011"
+ Options:
+ foo: "bar"
+ Internal: true
+ Attachable: false
+ Ingress: false
+ Options:
+ com.docker.network.bridge.default_bridge: "true"
+ com.docker.network.bridge.enable_icc: "true"
+ com.docker.network.bridge.enable_ip_masquerade: "true"
+ com.docker.network.bridge.host_binding_ipv4: "0.0.0.0"
+ com.docker.network.bridge.name: "docker0"
+ com.docker.network.driver.mtu: "1500"
+ Labels:
+ com.example.some-label: "some-value"
+ com.example.some-other-label: "some-other-value"
+ tags: ["Network"]
+
+ /networks/{id}/connect:
+ post:
+ summary: "Connect a container to a network"
+ operationId: "NetworkConnect"
+ consumes:
+ - "application/octet-stream"
+ responses:
+ 200:
+ description: "No error"
+ 403:
+ description: "Operation not supported for swarm scoped networks"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 404:
+ description: "Network or container not found"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 500:
+ description: "Server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "id"
+ in: "path"
+ description: "Network ID or name"
+ required: true
+ type: "string"
+ - name: "container"
+ in: "body"
+ required: true
+ schema:
+ type: "object"
+ properties:
+ Container:
+ type: "string"
+ description: "The ID or name of the container to connect to the network."
+ EndpointConfig:
+ $ref: "#/definitions/EndpointSettings"
+ example:
+ Container: "3613f73ba0e4"
+ EndpointConfig:
+ IPAMConfig:
+ IPv4Address: "172.24.56.89"
+ IPv6Address: "2001:db8::5689"
+ tags: ["Network"]
+
+ /networks/{id}/disconnect:
+ post:
+ summary: "Disconnect a container from a network"
+ operationId: "NetworkDisconnect"
+ consumes:
+ - "application/json"
+ responses:
+ 200:
+ description: "No error"
+ 403:
+ description: "Operation not supported for swarm scoped networks"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 404:
+ description: "Network or container not found"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 500:
+ description: "Server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "id"
+ in: "path"
+ description: "Network ID or name"
+ required: true
+ type: "string"
+ - name: "container"
+ in: "body"
+ required: true
+ schema:
+ type: "object"
+ properties:
+ Container:
+ type: "string"
+ description: "The ID or name of the container to disconnect from the network."
+ Force:
+ type: "boolean"
+ description: "Force the container to disconnect from the network."
+ tags: ["Network"]
+ /networks/prune:
+ post:
+ summary: "Delete unused networks"
+ produces:
+ - "application/json"
+ operationId: "NetworkPrune"
+ parameters:
+ - name: "filters"
+ in: "query"
+ description: |
+ Filters to process on the prune list, encoded as JSON (a `map[string][]string`).
+
+ Available filters:
+ - `until=<timestamp>` Prune networks created before this timestamp. The `<timestamp>` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon machine’s time.
+ - `label` (`label=<key>`, `label=<key>=<value>`, `label!=<key>`, or `label!=<key>=<value>`) Prune networks with (or without, in case `label!=...` is used) the specified labels.
+ type: "string"
+ responses:
+ 200:
+ description: "No error"
+ schema:
+ type: "object"
+ properties:
+ NetworksDeleted:
+ description: "Networks that were deleted"
+ type: "array"
+ items:
+ type: "string"
+ 500:
+ description: "Server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ tags: ["Network"]
+ /plugins:
+ get:
+ summary: "List plugins"
+ operationId: "PluginList"
+ description: "Returns information about installed plugins."
+ produces: ["application/json"]
+ responses:
+ 200:
+ description: "No error"
+ schema:
+ type: "array"
+ items:
+ $ref: "#/definitions/Plugin"
+ 500:
+ description: "Server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "filters"
+ in: "query"
+ type: "string"
+ description: |
+ A JSON encoded value of the filters (a `map[string][]string`) to process on the plugin list. Available filters:
+
+ - `capability=<capability name>`
+ - `enable=<true>|<false>`
+ tags: ["Plugin"]
+
+ /plugins/privileges:
+ get:
+ summary: "Get plugin privileges"
+ operationId: "GetPluginPrivileges"
+ responses:
+ 200:
+ description: "no error"
+ schema:
+ type: "array"
+ items:
+ description: "Describes a permission the user has to accept upon installing the plugin."
+ type: "object"
+ properties:
+ Name:
+ type: "string"
+ Description:
+ type: "string"
+ Value:
+ type: "array"
+ items:
+ type: "string"
+ example:
+ - Name: "network"
+ Description: ""
+ Value:
+ - "host"
+ - Name: "mount"
+ Description: ""
+ Value:
+ - "/data"
+ - Name: "device"
+ Description: ""
+ Value:
+ - "/dev/cpu_dma_latency"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "remote"
+ in: "query"
+ description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted."
+ required: true
+ type: "string"
+ tags:
+ - "Plugin"
+
+ /plugins/pull:
+ post:
+ summary: "Install a plugin"
+ operationId: "PluginPull"
+ description: |
+ Pulls and installs a plugin. After the plugin is installed, it can be enabled using the [`POST /plugins/{name}/enable` endpoint](#operation/PostPluginsEnable).
+ produces:
+ - "application/json"
+ responses:
+ 204:
+ description: "no error"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "remote"
+ in: "query"
+ description: |
+ Remote reference for plugin to install.
+
+ The `:latest` tag is optional, and is used as the default if omitted.
+ required: true
+ type: "string"
+ - name: "name"
+ in: "query"
+ description: |
+ Local name for the pulled plugin.
+
+ The `:latest` tag is optional, and is used as the default if omitted.
+ required: false
+ type: "string"
+ - name: "X-Registry-Auth"
+ in: "header"
+ description: "A base64-encoded auth configuration to use when pulling a plugin from a registry. [See the authentication section for details.](#section/Authentication)"
+ type: "string"
+ - name: "body"
+ in: "body"
+ schema:
+ type: "array"
+ items:
+ description: "Describes a permission accepted by the user upon installing the plugin."
+ type: "object"
+ properties:
+ Name:
+ type: "string"
+ Description:
+ type: "string"
+ Value:
+ type: "array"
+ items:
+ type: "string"
+ example:
+ - Name: "network"
+ Description: ""
+ Value:
+ - "host"
+ - Name: "mount"
+ Description: ""
+ Value:
+ - "/data"
+ - Name: "device"
+ Description: ""
+ Value:
+ - "/dev/cpu_dma_latency"
+ tags: ["Plugin"]
+ /plugins/{name}/json:
+ get:
+ summary: "Inspect a plugin"
+ operationId: "PluginInspect"
+ responses:
+ 200:
+ description: "no error"
+ schema:
+ $ref: "#/definitions/Plugin"
+ 404:
+ description: "plugin is not installed"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "name"
+ in: "path"
+ description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted."
+ required: true
+ type: "string"
+ tags: ["Plugin"]
+ /plugins/{name}:
+ delete:
+ summary: "Remove a plugin"
+ operationId: "PluginDelete"
+ responses:
+ 200:
+ description: "no error"
+ schema:
+ $ref: "#/definitions/Plugin"
+ 404:
+ description: "plugin is not installed"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "name"
+ in: "path"
+ description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted."
+ required: true
+ type: "string"
+ - name: "force"
+ in: "query"
+ description: "Disable the plugin before removing. This may result in issues if the plugin is in use by a container."
+ type: "boolean"
+ default: false
+ tags: ["Plugin"]
+ /plugins/{name}/enable:
+ post:
+ summary: "Enable a plugin"
+ operationId: "PluginEnable"
+ responses:
+ 200:
+ description: "no error"
+ 404:
+ description: "plugin is not installed"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "name"
+ in: "path"
+ description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted."
+ required: true
+ type: "string"
+ - name: "timeout"
+ in: "query"
+ description: "Set the HTTP client timeout (in seconds)"
+ type: "integer"
+ default: 0
+ tags: ["Plugin"]
+ /plugins/{name}/disable:
+ post:
+ summary: "Disable a plugin"
+ operationId: "PluginDisable"
+ responses:
+ 200:
+ description: "no error"
+ 404:
+ description: "plugin is not installed"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "name"
+ in: "path"
+ description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted."
+ required: true
+ type: "string"
+ tags: ["Plugin"]
+ /plugins/{name}/upgrade:
+ post:
+ summary: "Upgrade a plugin"
+ operationId: "PluginUpgrade"
+ responses:
+ 204:
+ description: "no error"
+ 404:
+ description: "plugin not installed"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "name"
+ in: "path"
+ description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted."
+ required: true
+ type: "string"
+ - name: "remote"
+ in: "query"
+ description: |
+ Remote reference to upgrade to.
+
+ The `:latest` tag is optional, and is used as the default if omitted.
+ required: true
+ type: "string"
+ - name: "X-Registry-Auth"
+ in: "header"
+ description: "A base64-encoded auth configuration to use when pulling a plugin from a registry. [See the authentication section for details.](#section/Authentication)"
+ type: "string"
+ - name: "body"
+ in: "body"
+ schema:
+ type: "array"
+ items:
+ description: "Describes a permission accepted by the user upon installing the plugin."
+ type: "object"
+ properties:
+ Name:
+ type: "string"
+ Description:
+ type: "string"
+ Value:
+ type: "array"
+ items:
+ type: "string"
+ example:
+ - Name: "network"
+ Description: ""
+ Value:
+ - "host"
+ - Name: "mount"
+ Description: ""
+ Value:
+ - "/data"
+ - Name: "device"
+ Description: ""
+ Value:
+ - "/dev/cpu_dma_latency"
+ tags: ["Plugin"]
+ /plugins/create:
+ post:
+ summary: "Create a plugin"
+ operationId: "PluginCreate"
+ consumes:
+ - "application/x-tar"
+ responses:
+ 204:
+ description: "no error"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "name"
+ in: "query"
+ description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted."
+ required: true
+ type: "string"
+ - name: "tarContext"
+ in: "body"
+ description: "Path to tar containing plugin rootfs and manifest"
+ schema:
+ type: "string"
+ format: "binary"
+ tags: ["Plugin"]
+ /plugins/{name}/push:
+ post:
+ summary: "Push a plugin"
+ operationId: "PluginPush"
+ description: |
+ Push a plugin to the registry.
+ parameters:
+ - name: "name"
+ in: "path"
+ description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted."
+ required: true
+ type: "string"
+ responses:
+ 200:
+ description: "no error"
+ 404:
+ description: "plugin not installed"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ tags: ["Plugin"]
+ /plugins/{name}/set:
+ post:
+ summary: "Configure a plugin"
+ operationId: "PluginSet"
+ consumes:
+ - "application/json"
+ parameters:
+ - name: "name"
+ in: "path"
+ description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted."
+ required: true
+ type: "string"
+ - name: "body"
+ in: "body"
+ schema:
+ type: "array"
+ items:
+ type: "string"
+ example: ["DEBUG=1"]
+ responses:
+ 204:
+ description: "No error"
+ 404:
+ description: "Plugin not installed"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 500:
+ description: "Server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ tags: ["Plugin"]
+ /nodes:
+ get:
+ summary: "List nodes"
+ operationId: "NodeList"
+ responses:
+ 200:
+ description: "no error"
+ schema:
+ type: "array"
+ items:
+ $ref: "#/definitions/Node"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 503:
+ description: "node is not part of a swarm"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "filters"
+ in: "query"
+ description: |
+ Filters to process on the nodes list, encoded as JSON (a `map[string][]string`).
+
+ Available filters:
+ - `id=<node id>`
+ - `label=<engine label>`
+ - `membership=`(`accepted`|`pending`)`
+ - `name=<node name>`
+ - `role=`(`manager`|`worker`)`
+ type: "string"
+ tags: ["Node"]
+ /nodes/{id}:
+ get:
+ summary: "Inspect a node"
+ operationId: "NodeInspect"
+ responses:
+ 200:
+ description: "no error"
+ schema:
+ $ref: "#/definitions/Node"
+ 404:
+ description: "no such node"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 503:
+ description: "node is not part of a swarm"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "id"
+ in: "path"
+ description: "The ID or name of the node"
+ type: "string"
+ required: true
+ tags: ["Node"]
+ delete:
+ summary: "Delete a node"
+ operationId: "NodeDelete"
+ responses:
+ 200:
+ description: "no error"
+ 404:
+ description: "no such node"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 503:
+ description: "node is not part of a swarm"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "id"
+ in: "path"
+ description: "The ID or name of the node"
+ type: "string"
+ required: true
+ - name: "force"
+ in: "query"
+ description: "Force remove a node from the swarm"
+ default: false
+ type: "boolean"
+ tags: ["Node"]
+ /nodes/{id}/update:
+ post:
+ summary: "Update a node"
+ operationId: "NodeUpdate"
+ responses:
+ 200:
+ description: "no error"
+ 400:
+ description: "bad parameter"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 404:
+ description: "no such node"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 503:
+ description: "node is not part of a swarm"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "id"
+ in: "path"
+ description: "The ID of the node"
+ type: "string"
+ required: true
+ - name: "body"
+ in: "body"
+ schema:
+ $ref: "#/definitions/NodeSpec"
+ - name: "version"
+ in: "query"
+ description: "The version number of the node object being updated. This is required to avoid conflicting writes."
+ type: "integer"
+ format: "int64"
+ required: true
+ tags: ["Node"]
+ /swarm:
+ get:
+ summary: "Inspect swarm"
+ operationId: "SwarmInspect"
+ responses:
+ 200:
+ description: "no error"
+ schema:
+ $ref: "#/definitions/Swarm"
+ 404:
+ description: "no such swarm"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 503:
+ description: "node is not part of a swarm"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ tags: ["Swarm"]
+ /swarm/init:
+ post:
+ summary: "Initialize a new swarm"
+ operationId: "SwarmInit"
+ produces:
+ - "application/json"
+ - "text/plain"
+ responses:
+ 200:
+ description: "no error"
+ schema:
+ description: "The node ID"
+ type: "string"
+ example: "7v2t30z9blmxuhnyo6s4cpenp"
+ 400:
+ description: "bad parameter"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 503:
+ description: "node is already part of a swarm"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "body"
+ in: "body"
+ required: true
+ schema:
+ type: "object"
+ properties:
+ ListenAddr:
+ description: "Listen address used for inter-manager communication, as well as determining the networking interface used for the VXLAN Tunnel Endpoint (VTEP). This can either be an address/port combination in the form `192.168.1.1:4567`, or an interface followed by a port number, like `eth0:4567`. If the port number is omitted, the default swarm listening port is used."
+ type: "string"
+ AdvertiseAddr:
+ description: "Externally reachable address advertised to other nodes. This can either be an address/port combination in the form `192.168.1.1:4567`, or an interface followed by a port number, like `eth0:4567`. If the port number is omitted, the port number from the listen address is used. If `AdvertiseAddr` is not specified, it will be automatically detected when possible."
+ type: "string"
+ DataPathAddr:
+ description: |
+ Address or interface to use for data path traffic (format: `<ip|interface>`), for example, `192.168.1.1`,
+ or an interface, like `eth0`. If `DataPathAddr` is unspecified, the same address as `AdvertiseAddr`
+ is used.
+
+ The `DataPathAddr` specifies the address that global scope network drivers will publish towards other
+ nodes in order to reach the containers running on this node. Using this parameter it is possible to
+ separate the container data traffic from the management traffic of the cluster.
+ type: "string"
+ ForceNewCluster:
+ description: "Force creation of a new swarm."
+ type: "boolean"
+ Spec:
+ $ref: "#/definitions/SwarmSpec"
+ example:
+ ListenAddr: "0.0.0.0:2377"
+ AdvertiseAddr: "192.168.1.1:2377"
+ ForceNewCluster: false
+ Spec:
+ Orchestration: {}
+ Raft: {}
+ Dispatcher: {}
+ CAConfig: {}
+ EncryptionConfig:
+ AutoLockManagers: false
+ tags: ["Swarm"]
+ /swarm/join:
+ post:
+ summary: "Join an existing swarm"
+ operationId: "SwarmJoin"
+ responses:
+ 200:
+ description: "no error"
+ 400:
+ description: "bad parameter"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 503:
+ description: "node is already part of a swarm"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "body"
+ in: "body"
+ required: true
+ schema:
+ type: "object"
+ properties:
+ ListenAddr:
+ description: "Listen address used for inter-manager communication if the node gets promoted to manager, as well as determining the networking interface used for the VXLAN Tunnel Endpoint (VTEP)."
+ type: "string"
+ AdvertiseAddr:
+ description: "Externally reachable address advertised to other nodes. This can either be an address/port combination in the form `192.168.1.1:4567`, or an interface followed by a port number, like `eth0:4567`. If the port number is omitted, the port number from the listen address is used. If `AdvertiseAddr` is not specified, it will be automatically detected when possible."
+ type: "string"
+ DataPathAddr:
+ description: |
+ Address or interface to use for data path traffic (format: `<ip|interface>`), for example, `192.168.1.1`,
+ or an interface, like `eth0`. If `DataPathAddr` is unspecified, the same address as `AdvertiseAddr`
+ is used.
+
+ The `DataPathAddr` specifies the address that global scope network drivers will publish towards other
+ nodes in order to reach the containers running on this node. Using this parameter it is possible to
+ separate the container data traffic from the management traffic of the cluster.
+
+ type: "string"
+ RemoteAddrs:
+ description: "Addresses of manager nodes already participating in the swarm."
+ type: "string"
+ JoinToken:
+ description: "Secret token for joining this swarm."
+ type: "string"
+ example:
+ ListenAddr: "0.0.0.0:2377"
+ AdvertiseAddr: "192.168.1.1:2377"
+ RemoteAddrs:
+ - "node1:2377"
+ JoinToken: "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2"
+ tags: ["Swarm"]
+ /swarm/leave:
+ post:
+ summary: "Leave a swarm"
+ operationId: "SwarmLeave"
+ responses:
+ 200:
+ description: "no error"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 503:
+ description: "node is not part of a swarm"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "force"
+ description: "Force leave swarm, even if this is the last manager or that it will break the cluster."
+ in: "query"
+ type: "boolean"
+ default: false
+ tags: ["Swarm"]
+ /swarm/update:
+ post:
+ summary: "Update a swarm"
+ operationId: "SwarmUpdate"
+ responses:
+ 200:
+ description: "no error"
+ 400:
+ description: "bad parameter"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 503:
+ description: "node is not part of a swarm"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "body"
+ in: "body"
+ required: true
+ schema:
+ $ref: "#/definitions/SwarmSpec"
+ - name: "version"
+ in: "query"
+ description: "The version number of the swarm object being updated. This is required to avoid conflicting writes."
+ type: "integer"
+ format: "int64"
+ required: true
+ - name: "rotateWorkerToken"
+ in: "query"
+ description: "Rotate the worker join token."
+ type: "boolean"
+ default: false
+ - name: "rotateManagerToken"
+ in: "query"
+ description: "Rotate the manager join token."
+ type: "boolean"
+ default: false
+ - name: "rotateManagerUnlockKey"
+ in: "query"
+ description: "Rotate the manager unlock key."
+ type: "boolean"
+ default: false
+ tags: ["Swarm"]
+ /swarm/unlockkey:
+ get:
+ summary: "Get the unlock key"
+ operationId: "SwarmUnlockkey"
+ consumes:
+ - "application/json"
+ responses:
+ 200:
+ description: "no error"
+ schema:
+ type: "object"
+ properties:
+ UnlockKey:
+ description: "The swarm's unlock key."
+ type: "string"
+ example:
+ UnlockKey: "SWMKEY-1-7c37Cc8654o6p38HnroywCi19pllOnGtbdZEgtKxZu8"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 503:
+ description: "node is not part of a swarm"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ tags: ["Swarm"]
+ /swarm/unlock:
+ post:
+ summary: "Unlock a locked manager"
+ operationId: "SwarmUnlock"
+ consumes:
+ - "application/json"
+ produces:
+ - "application/json"
+ parameters:
+ - name: "body"
+ in: "body"
+ required: true
+ schema:
+ type: "object"
+ properties:
+ UnlockKey:
+ description: "The swarm's unlock key."
+ type: "string"
+ example:
+ UnlockKey: "SWMKEY-1-7c37Cc8654o6p38HnroywCi19pllOnGtbdZEgtKxZu8"
+ responses:
+ 200:
+ description: "no error"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 503:
+ description: "node is not part of a swarm"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ tags: ["Swarm"]
+ /services:
+ get:
+ summary: "List services"
+ operationId: "ServiceList"
+ responses:
+ 200:
+ description: "no error"
+ schema:
+ type: "array"
+ items:
+ $ref: "#/definitions/Service"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 503:
+ description: "node is not part of a swarm"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "filters"
+ in: "query"
+ type: "string"
+ description: |
+ A JSON encoded value of the filters (a `map[string][]string`) to process on the services list. Available filters:
+
+ - `id=<service id>`
+ - `label=<service label>`
+ - `mode=["replicated"|"global"]`
+ - `name=<service name>`
+ tags: ["Service"]
+ /services/create:
+ post:
+ summary: "Create a service"
+ operationId: "ServiceCreate"
+ consumes:
+ - "application/json"
+ produces:
+ - "application/json"
+ responses:
+ 201:
+ description: "no error"
+ schema:
+ type: "object"
+ properties:
+ ID:
+ description: "The ID of the created service."
+ type: "string"
+ Warning:
+ description: "Optional warning message"
+ type: "string"
+ example:
+ ID: "ak7w3gjqoa3kuz8xcpnyy0pvl"
+ Warning: "unable to pin image doesnotexist:latest to digest: image library/doesnotexist:latest not found"
+ 400:
+ description: "bad parameter"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 403:
+ description: "network is not eligible for services"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 409:
+ description: "name conflicts with an existing service"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 503:
+ description: "node is not part of a swarm"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "body"
+ in: "body"
+ required: true
+ schema:
+ allOf:
+ - $ref: "#/definitions/ServiceSpec"
+ - type: "object"
+ example:
+ Name: "web"
+ TaskTemplate:
+ ContainerSpec:
+ Image: "nginx:alpine"
+ Mounts:
+ -
+ ReadOnly: true
+ Source: "web-data"
+ Target: "/usr/share/nginx/html"
+ Type: "volume"
+ VolumeOptions:
+ DriverConfig: {}
+ Labels:
+ com.example.something: "something-value"
+ Hosts: ["10.10.10.10 host1", "ABCD:EF01:2345:6789:ABCD:EF01:2345:6789 host2"]
+ User: "33"
+ DNSConfig:
+ Nameservers: ["8.8.8.8"]
+ Search: ["example.org"]
+ Options: ["timeout:3"]
+ Secrets:
+ -
+ File:
+ Name: "www.example.org.key"
+ UID: "33"
+ GID: "33"
+ Mode: 384
+ SecretID: "fpjqlhnwb19zds35k8wn80lq9"
+ SecretName: "example_org_domain_key"
+ LogDriver:
+ Name: "json-file"
+ Options:
+ max-file: "3"
+ max-size: "10M"
+ Placement: {}
+ Resources:
+ Limits:
+ MemoryBytes: 104857600
+ Reservations: {}
+ RestartPolicy:
+ Condition: "on-failure"
+ Delay: 10000000000
+ MaxAttempts: 10
+ Mode:
+ Replicated:
+ Replicas: 4
+ UpdateConfig:
+ Parallelism: 2
+ Delay: 1000000000
+ FailureAction: "pause"
+ Monitor: 15000000000
+ MaxFailureRatio: 0.15
+ RollbackConfig:
+ Parallelism: 1
+ Delay: 1000000000
+ FailureAction: "pause"
+ Monitor: 15000000000
+ MaxFailureRatio: 0.15
+ EndpointSpec:
+ Ports:
+ -
+ Protocol: "tcp"
+ PublishedPort: 8080
+ TargetPort: 80
+ Labels:
+ foo: "bar"
+ - name: "X-Registry-Auth"
+ in: "header"
+ description: "A base64-encoded auth configuration for pulling from private registries. [See the authentication section for details.](#section/Authentication)"
+ type: "string"
+ tags: ["Service"]
+ /services/{id}:
+ get:
+ summary: "Inspect a service"
+ operationId: "ServiceInspect"
+ responses:
+ 200:
+ description: "no error"
+ schema:
+ $ref: "#/definitions/Service"
+ 404:
+ description: "no such service"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 503:
+ description: "node is not part of a swarm"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "id"
+ in: "path"
+ description: "ID or name of service."
+ required: true
+ type: "string"
+ - name: "insertDefaults"
+ in: "query"
+ description: "Fill empty fields with default values."
+ type: "boolean"
+ default: false
+ tags: ["Service"]
+ delete:
+ summary: "Delete a service"
+ operationId: "ServiceDelete"
+ responses:
+ 200:
+ description: "no error"
+ 404:
+ description: "no such service"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 503:
+ description: "node is not part of a swarm"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "id"
+ in: "path"
+ description: "ID or name of service."
+ required: true
+ type: "string"
+ tags: ["Service"]
+ /services/{id}/update:
+ post:
+ summary: "Update a service"
+ operationId: "ServiceUpdate"
+ consumes: ["application/json"]
+ produces: ["application/json"]
+ responses:
+ 200:
+ description: "no error"
+ schema:
+ $ref: "#/definitions/ServiceUpdateResponse"
+ 400:
+ description: "bad parameter"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 404:
+ description: "no such service"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 503:
+ description: "node is not part of a swarm"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "id"
+ in: "path"
+ description: "ID or name of service."
+ required: true
+ type: "string"
+ - name: "body"
+ in: "body"
+ required: true
+ schema:
+ allOf:
+ - $ref: "#/definitions/ServiceSpec"
+ - type: "object"
+ example:
+ Name: "top"
+ TaskTemplate:
+ ContainerSpec:
+ Image: "busybox"
+ Args:
+ - "top"
+ Resources:
+ Limits: {}
+ Reservations: {}
+ RestartPolicy:
+ Condition: "any"
+ MaxAttempts: 0
+ Placement: {}
+ ForceUpdate: 0
+ Mode:
+ Replicated:
+ Replicas: 1
+ UpdateConfig:
+ Parallelism: 2
+ Delay: 1000000000
+ FailureAction: "pause"
+ Monitor: 15000000000
+ MaxFailureRatio: 0.15
+ RollbackConfig:
+ Parallelism: 1
+ Delay: 1000000000
+ FailureAction: "pause"
+ Monitor: 15000000000
+ MaxFailureRatio: 0.15
+ EndpointSpec:
+ Mode: "vip"
+
+ - name: "version"
+ in: "query"
+ description: "The version number of the service object being updated. This is required to avoid conflicting writes."
+ required: true
+ type: "integer"
+ - name: "registryAuthFrom"
+ in: "query"
+ type: "string"
+ description: "If the X-Registry-Auth header is not specified, this
+ parameter indicates where to find registry authorization credentials. The
+ valid values are `spec` and `previous-spec`."
+ default: "spec"
+ - name: "rollback"
+ in: "query"
+ type: "string"
+ description: "Set to this parameter to `previous` to cause a
+ server-side rollback to the previous service spec. The supplied spec will be
+ ignored in this case."
+ - name: "X-Registry-Auth"
+ in: "header"
+ description: "A base64-encoded auth configuration for pulling from private registries. [See the authentication section for details.](#section/Authentication)"
+ type: "string"
+
+ tags: ["Service"]
+ /services/{id}/logs:
+ get:
+ summary: "Get service logs"
+ description: |
+ Get `stdout` and `stderr` logs from a service.
+
+ **Note**: This endpoint works only for services with the `json-file` or `journald` logging drivers.
+ operationId: "ServiceLogs"
+ produces:
+ - "application/vnd.docker.raw-stream"
+ - "application/json"
+ responses:
+ 101:
+ description: "logs returned as a stream"
+ schema:
+ type: "string"
+ format: "binary"
+ 200:
+ description: "logs returned as a string in response body"
+ schema:
+ type: "string"
+ 404:
+ description: "no such service"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ examples:
+ application/json:
+ message: "No such service: c2ada9df5af8"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 503:
+ description: "node is not part of a swarm"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "id"
+ in: "path"
+ required: true
+ description: "ID or name of the service"
+ type: "string"
+ - name: "details"
+ in: "query"
+ description: "Show service context and extra details provided to logs."
+ type: "boolean"
+ default: false
+ - name: "follow"
+ in: "query"
+ description: |
+ Return the logs as a stream.
+
+ This will return a `101` HTTP response with a `Connection: upgrade` header, then hijack the HTTP connection to send raw output. For more information about hijacking and the stream format, [see the documentation for the attach endpoint](#operation/ContainerAttach).
+ type: "boolean"
+ default: false
+ - name: "stdout"
+ in: "query"
+ description: "Return logs from `stdout`"
+ type: "boolean"
+ default: false
+ - name: "stderr"
+ in: "query"
+ description: "Return logs from `stderr`"
+ type: "boolean"
+ default: false
+ - name: "since"
+ in: "query"
+ description: "Only return logs since this time, as a UNIX timestamp"
+ type: "integer"
+ default: 0
+ - name: "timestamps"
+ in: "query"
+ description: "Add timestamps to every log line"
+ type: "boolean"
+ default: false
+ - name: "tail"
+ in: "query"
+ description: "Only return this number of log lines from the end of the logs. Specify as an integer or `all` to output all log lines."
+ type: "string"
+ default: "all"
+ tags: ["Service"]
+ /tasks:
+ get:
+ summary: "List tasks"
+ operationId: "TaskList"
+ produces:
+ - "application/json"
+ responses:
+ 200:
+ description: "no error"
+ schema:
+ type: "array"
+ items:
+ $ref: "#/definitions/Task"
+ example:
+ - ID: "0kzzo1i0y4jz6027t0k7aezc7"
+ Version:
+ Index: 71
+ CreatedAt: "2016-06-07T21:07:31.171892745Z"
+ UpdatedAt: "2016-06-07T21:07:31.376370513Z"
+ Spec:
+ ContainerSpec:
+ Image: "redis"
+ Resources:
+ Limits: {}
+ Reservations: {}
+ RestartPolicy:
+ Condition: "any"
+ MaxAttempts: 0
+ Placement: {}
+ ServiceID: "9mnpnzenvg8p8tdbtq4wvbkcz"
+ Slot: 1
+ NodeID: "60gvrl6tm78dmak4yl7srz94v"
+ Status:
+ Timestamp: "2016-06-07T21:07:31.290032978Z"
+ State: "running"
+ Message: "started"
+ ContainerStatus:
+ ContainerID: "e5d62702a1b48d01c3e02ca1e0212a250801fa8d67caca0b6f35919ebc12f035"
+ PID: 677
+ DesiredState: "running"
+ NetworksAttachments:
+ - Network:
+ ID: "4qvuz4ko70xaltuqbt8956gd1"
+ Version:
+ Index: 18
+ CreatedAt: "2016-06-07T20:31:11.912919752Z"
+ UpdatedAt: "2016-06-07T21:07:29.955277358Z"
+ Spec:
+ Name: "ingress"
+ Labels:
+ com.docker.swarm.internal: "true"
+ DriverConfiguration: {}
+ IPAMOptions:
+ Driver: {}
+ Configs:
+ - Subnet: "10.255.0.0/16"
+ Gateway: "10.255.0.1"
+ DriverState:
+ Name: "overlay"
+ Options:
+ com.docker.network.driver.overlay.vxlanid_list: "256"
+ IPAMOptions:
+ Driver:
+ Name: "default"
+ Configs:
+ - Subnet: "10.255.0.0/16"
+ Gateway: "10.255.0.1"
+ Addresses:
+ - "10.255.0.10/16"
+ - ID: "1yljwbmlr8er2waf8orvqpwms"
+ Version:
+ Index: 30
+ CreatedAt: "2016-06-07T21:07:30.019104782Z"
+ UpdatedAt: "2016-06-07T21:07:30.231958098Z"
+ Name: "hopeful_cori"
+ Spec:
+ ContainerSpec:
+ Image: "redis"
+ Resources:
+ Limits: {}
+ Reservations: {}
+ RestartPolicy:
+ Condition: "any"
+ MaxAttempts: 0
+ Placement: {}
+ ServiceID: "9mnpnzenvg8p8tdbtq4wvbkcz"
+ Slot: 1
+ NodeID: "60gvrl6tm78dmak4yl7srz94v"
+ Status:
+ Timestamp: "2016-06-07T21:07:30.202183143Z"
+ State: "shutdown"
+ Message: "shutdown"
+ ContainerStatus:
+ ContainerID: "1cf8d63d18e79668b0004a4be4c6ee58cddfad2dae29506d8781581d0688a213"
+ DesiredState: "shutdown"
+ NetworksAttachments:
+ - Network:
+ ID: "4qvuz4ko70xaltuqbt8956gd1"
+ Version:
+ Index: 18
+ CreatedAt: "2016-06-07T20:31:11.912919752Z"
+ UpdatedAt: "2016-06-07T21:07:29.955277358Z"
+ Spec:
+ Name: "ingress"
+ Labels:
+ com.docker.swarm.internal: "true"
+ DriverConfiguration: {}
+ IPAMOptions:
+ Driver: {}
+ Configs:
+ - Subnet: "10.255.0.0/16"
+ Gateway: "10.255.0.1"
+ DriverState:
+ Name: "overlay"
+ Options:
+ com.docker.network.driver.overlay.vxlanid_list: "256"
+ IPAMOptions:
+ Driver:
+ Name: "default"
+ Configs:
+ - Subnet: "10.255.0.0/16"
+ Gateway: "10.255.0.1"
+ Addresses:
+ - "10.255.0.5/16"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 503:
+ description: "node is not part of a swarm"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "filters"
+ in: "query"
+ type: "string"
+ description: |
+ A JSON encoded value of the filters (a `map[string][]string`) to process on the tasks list. Available filters:
+
+ - `desired-state=(running | shutdown | accepted)`
+ - `id=<task id>`
+ - `label=key` or `label="key=value"`
+ - `name=<task name>`
+ - `node=<node id or name>`
+ - `service=<service name>`
+ tags: ["Task"]
+ /tasks/{id}:
+ get:
+ summary: "Inspect a task"
+ operationId: "TaskInspect"
+ produces:
+ - "application/json"
+ responses:
+ 200:
+ description: "no error"
+ schema:
+ $ref: "#/definitions/Task"
+ 404:
+ description: "no such task"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 503:
+ description: "node is not part of a swarm"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "id"
+ in: "path"
+ description: "ID of the task"
+ required: true
+ type: "string"
+ tags: ["Task"]
+ /tasks/{id}/logs:
+ get:
+ summary: "Get task logs"
+ description: |
+ Get `stdout` and `stderr` logs from a task.
+
+ **Note**: This endpoint works only for services with the `json-file` or `journald` logging drivers.
+ operationId: "TaskLogs"
+ produces:
+ - "application/vnd.docker.raw-stream"
+ - "application/json"
+ responses:
+ 101:
+ description: "logs returned as a stream"
+ schema:
+ type: "string"
+ format: "binary"
+ 200:
+ description: "logs returned as a string in response body"
+ schema:
+ type: "string"
+ 404:
+ description: "no such task"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ examples:
+ application/json:
+ message: "No such task: c2ada9df5af8"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 503:
+ description: "node is not part of a swarm"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "id"
+ in: "path"
+ required: true
+ description: "ID of the task"
+ type: "string"
+ - name: "details"
+ in: "query"
+ description: "Show task context and extra details provided to logs."
+ type: "boolean"
+ default: false
+ - name: "follow"
+ in: "query"
+ description: |
+ Return the logs as a stream.
+
+ This will return a `101` HTTP response with a `Connection: upgrade` header, then hijack the HTTP connection to send raw output. For more information about hijacking and the stream format, [see the documentation for the attach endpoint](#operation/ContainerAttach).
+ type: "boolean"
+ default: false
+ - name: "stdout"
+ in: "query"
+ description: "Return logs from `stdout`"
+ type: "boolean"
+ default: false
+ - name: "stderr"
+ in: "query"
+ description: "Return logs from `stderr`"
+ type: "boolean"
+ default: false
+ - name: "since"
+ in: "query"
+ description: "Only return logs since this time, as a UNIX timestamp"
+ type: "integer"
+ default: 0
+ - name: "timestamps"
+ in: "query"
+ description: "Add timestamps to every log line"
+ type: "boolean"
+ default: false
+ - name: "tail"
+ in: "query"
+ description: "Only return this number of log lines from the end of the logs. Specify as an integer or `all` to output all log lines."
+ type: "string"
+ default: "all"
+ /secrets:
+ get:
+ summary: "List secrets"
+ operationId: "SecretList"
+ produces:
+ - "application/json"
+ responses:
+ 200:
+ description: "no error"
+ schema:
+ type: "array"
+ items:
+ $ref: "#/definitions/Secret"
+ example:
+ - ID: "blt1owaxmitz71s9v5zh81zun"
+ Version:
+ Index: 85
+ CreatedAt: "2017-07-20T13:55:28.678958722Z"
+ UpdatedAt: "2017-07-20T13:55:28.678958722Z"
+ Spec:
+ Name: "mysql-passwd"
+ Labels:
+ some.label: "some.value"
+ Driver:
+ Name: "secret-bucket"
+ Options:
+ OptionA: "value for driver option A"
+ OptionB: "value for driver option B"
+ - ID: "ktnbjxoalbkvbvedmg1urrz8h"
+ Version:
+ Index: 11
+ CreatedAt: "2016-11-05T01:20:17.327670065Z"
+ UpdatedAt: "2016-11-05T01:20:17.327670065Z"
+ Spec:
+ Name: "app-dev.crt"
+ Labels:
+ foo: "bar"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 503:
+ description: "node is not part of a swarm"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "filters"
+ in: "query"
+ type: "string"
+ description: |
+ A JSON encoded value of the filters (a `map[string][]string`) to process on the secrets list. Available filters:
+
+ - `id=<secret id>`
+ - `label=<key> or label=<key>=value`
+ - `name=<secret name>`
+ - `names=<secret name>`
+ tags: ["Secret"]
+ /secrets/create:
+ post:
+ summary: "Create a secret"
+ operationId: "SecretCreate"
+ consumes:
+ - "application/json"
+ produces:
+ - "application/json"
+ responses:
+ 201:
+ description: "no error"
+ schema:
+ type: "object"
+ properties:
+ ID:
+ description: "The ID of the created secret."
+ type: "string"
+ example:
+ ID: "ktnbjxoalbkvbvedmg1urrz8h"
+ 409:
+ description: "name conflicts with an existing object"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 503:
+ description: "node is not part of a swarm"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "body"
+ in: "body"
+ schema:
+ allOf:
+ - $ref: "#/definitions/SecretSpec"
+ - type: "object"
+ example:
+ Name: "app-key.crt"
+ Labels:
+ foo: "bar"
+ Data: "VEhJUyBJUyBOT1QgQSBSRUFMIENFUlRJRklDQVRFCg=="
+ Driver:
+ Name: "secret-bucket"
+ Options:
+ OptionA: "value for driver option A"
+ OptionB: "value for driver option B"
+ tags: ["Secret"]
+ /secrets/{id}:
+ get:
+ summary: "Inspect a secret"
+ operationId: "SecretInspect"
+ produces:
+ - "application/json"
+ responses:
+ 200:
+ description: "no error"
+ schema:
+ $ref: "#/definitions/Secret"
+ examples:
+ application/json:
+ ID: "ktnbjxoalbkvbvedmg1urrz8h"
+ Version:
+ Index: 11
+ CreatedAt: "2016-11-05T01:20:17.327670065Z"
+ UpdatedAt: "2016-11-05T01:20:17.327670065Z"
+ Spec:
+ Name: "app-dev.crt"
+ Labels:
+ foo: "bar"
+ Driver:
+ Name: "secret-bucket"
+ Options:
+ OptionA: "value for driver option A"
+ OptionB: "value for driver option B"
+
+ 404:
+ description: "secret not found"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 503:
+ description: "node is not part of a swarm"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "id"
+ in: "path"
+ required: true
+ type: "string"
+ description: "ID of the secret"
+ tags: ["Secret"]
+ delete:
+ summary: "Delete a secret"
+ operationId: "SecretDelete"
+ produces:
+ - "application/json"
+ responses:
+ 204:
+ description: "no error"
+ 404:
+ description: "secret not found"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 503:
+ description: "node is not part of a swarm"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "id"
+ in: "path"
+ required: true
+ type: "string"
+ description: "ID of the secret"
+ tags: ["Secret"]
+ /secrets/{id}/update:
+ post:
+ summary: "Update a Secret"
+ operationId: "SecretUpdate"
+ responses:
+ 200:
+ description: "no error"
+ 400:
+ description: "bad parameter"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 404:
+ description: "no such secret"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 503:
+ description: "node is not part of a swarm"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "id"
+ in: "path"
+ description: "The ID or name of the secret"
+ type: "string"
+ required: true
+ - name: "body"
+ in: "body"
+ schema:
+ $ref: "#/definitions/SecretSpec"
+ description: "The spec of the secret to update. Currently, only the Labels field can be updated. All other fields must remain unchanged from the [SecretInspect endpoint](#operation/SecretInspect) response values."
+ - name: "version"
+ in: "query"
+ description: "The version number of the secret object being updated. This is required to avoid conflicting writes."
+ type: "integer"
+ format: "int64"
+ required: true
+ tags: ["Secret"]
+ /configs:
+ get:
+ summary: "List configs"
+ operationId: "ConfigList"
+ produces:
+ - "application/json"
+ responses:
+ 200:
+ description: "no error"
+ schema:
+ type: "array"
+ items:
+ $ref: "#/definitions/Config"
+ example:
+ - ID: "ktnbjxoalbkvbvedmg1urrz8h"
+ Version:
+ Index: 11
+ CreatedAt: "2016-11-05T01:20:17.327670065Z"
+ UpdatedAt: "2016-11-05T01:20:17.327670065Z"
+ Spec:
+ Name: "server.conf"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 503:
+ description: "node is not part of a swarm"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "filters"
+ in: "query"
+ type: "string"
+ description: |
+ A JSON encoded value of the filters (a `map[string][]string`) to process on the configs list. Available filters:
+
+ - `id=<config id>`
+ - `label=<key> or label=<key>=value`
+ - `name=<config name>`
+ - `names=<config name>`
+ tags: ["Config"]
+ /configs/create:
+ post:
+ summary: "Create a config"
+ operationId: "ConfigCreate"
+ consumes:
+ - "application/json"
+ produces:
+ - "application/json"
+ responses:
+ 201:
+ description: "no error"
+ schema:
+ type: "object"
+ properties:
+ ID:
+ description: "The ID of the created config."
+ type: "string"
+ example:
+ ID: "ktnbjxoalbkvbvedmg1urrz8h"
+ 409:
+ description: "name conflicts with an existing object"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 503:
+ description: "node is not part of a swarm"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "body"
+ in: "body"
+ schema:
+ allOf:
+ - $ref: "#/definitions/ConfigSpec"
+ - type: "object"
+ example:
+ Name: "server.conf"
+ Labels:
+ foo: "bar"
+ Data: "VEhJUyBJUyBOT1QgQSBSRUFMIENFUlRJRklDQVRFCg=="
+ tags: ["Config"]
+ /configs/{id}:
+ get:
+ summary: "Inspect a config"
+ operationId: "ConfigInspect"
+ produces:
+ - "application/json"
+ responses:
+ 200:
+ description: "no error"
+ schema:
+ $ref: "#/definitions/Config"
+ examples:
+ application/json:
+ ID: "ktnbjxoalbkvbvedmg1urrz8h"
+ Version:
+ Index: 11
+ CreatedAt: "2016-11-05T01:20:17.327670065Z"
+ UpdatedAt: "2016-11-05T01:20:17.327670065Z"
+ Spec:
+ Name: "app-dev.crt"
+ 404:
+ description: "config not found"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 503:
+ description: "node is not part of a swarm"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "id"
+ in: "path"
+ required: true
+ type: "string"
+ description: "ID of the config"
+ tags: ["Config"]
+ delete:
+ summary: "Delete a config"
+ operationId: "ConfigDelete"
+ produces:
+ - "application/json"
+ responses:
+ 204:
+ description: "no error"
+ 404:
+ description: "config not found"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 503:
+ description: "node is not part of a swarm"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "id"
+ in: "path"
+ required: true
+ type: "string"
+ description: "ID of the config"
+ tags: ["Config"]
+ /configs/{id}/update:
+ post:
+ summary: "Update a Config"
+ operationId: "ConfigUpdate"
+ responses:
+ 200:
+ description: "no error"
+ 400:
+ description: "bad parameter"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 404:
+ description: "no such config"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 503:
+ description: "node is not part of a swarm"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "id"
+ in: "path"
+ description: "The ID or name of the config"
+ type: "string"
+ required: true
+ - name: "body"
+ in: "body"
+ schema:
+ $ref: "#/definitions/ConfigSpec"
+ description: "The spec of the config to update. Currently, only the Labels field can be updated. All other fields must remain unchanged from the [ConfigInspect endpoint](#operation/ConfigInspect) response values."
+ - name: "version"
+ in: "query"
+ description: "The version number of the config object being updated. This is required to avoid conflicting writes."
+ type: "integer"
+ format: "int64"
+ required: true
+ tags: ["Config"]
+ /distribution/{name}/json:
+ get:
+ summary: "Get image information from the registry"
+ description: "Return image digest and platform information by contacting the registry."
+ operationId: "DistributionInspect"
+ produces:
+ - "application/json"
+ responses:
+ 200:
+ description: "descriptor and platform information"
+ schema:
+ type: "object"
+ x-go-name: DistributionInspect
+ required: [Descriptor, Platforms]
+ properties:
+ Descriptor:
+ type: "object"
+ description: "A descriptor struct containing digest, media type, and size"
+ properties:
+ MediaType:
+ type: "string"
+ Size:
+ type: "integer"
+ format: "int64"
+ Digest:
+ type: "string"
+ URLs:
+ type: "array"
+ items:
+ type: "string"
+ Platforms:
+ type: "array"
+ description: "An array containing all platforms supported by the image"
+ items:
+ type: "object"
+ properties:
+ Architecture:
+ type: "string"
+ OS:
+ type: "string"
+ OSVersion:
+ type: "string"
+ OSFeatures:
+ type: "array"
+ items:
+ type: "string"
+ Variant:
+ type: "string"
+ Features:
+ type: "array"
+ items:
+ type: "string"
+ examples:
+ application/json:
+ Descriptor:
+ MediaType: "application/vnd.docker.distribution.manifest.v2+json"
+ Digest: "sha256:c0537ff6a5218ef531ece93d4984efc99bbf3f7497c0a7726c88e2bb7584dc96"
+ Size: 3987495
+ URLs:
+ - ""
+ Platforms:
+ - Architecture: "amd64"
+ OS: "linux"
+ OSVersion: ""
+ OSFeatures:
+ - ""
+ Variant: ""
+ Features:
+ - ""
+ 401:
+ description: "Failed authentication or no image found"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ examples:
+ application/json:
+ message: "No such image: someimage (tag: latest)"
+ 500:
+ description: "Server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ parameters:
+ - name: "name"
+ in: "path"
+ description: "Image name or id"
+ type: "string"
+ required: true
+ tags: ["Distribution"]
+ /session:
+ post:
+ summary: "Initialize interactive session"
+ description: |
+ Start a new interactive session with a server. Session allows server to call back to the client for advanced capabilities.
+
+ > **Note**: This endpoint is *experimental* and only available if the daemon is started with experimental
+ > features enabled. The specifications for this endpoint may still change in a future version of the API.
+
+ ### Hijacking
+
+ This endpoint hijacks the HTTP connection to HTTP2 transport that allows the client to expose gPRC services on that connection.
+
+ For example, the client sends this request to upgrade the connection:
+
+ ```
+ POST /session HTTP/1.1
+ Upgrade: h2c
+ Connection: Upgrade
+ ```
+
+ The Docker daemon will respond with a `101 UPGRADED` response follow with the raw stream:
+
+ ```
+ HTTP/1.1 101 UPGRADED
+ Connection: Upgrade
+ Upgrade: h2c
+ ```
+ operationId: "Session"
+ produces:
+ - "application/vnd.docker.raw-stream"
+ responses:
+ 101:
+ description: "no error, hijacking successful"
+ 400:
+ description: "bad parameter"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ 500:
+ description: "server error"
+ schema:
+ $ref: "#/definitions/ErrorResponse"
+ tags: ["Session (experimental)"]
diff --git a/unum/vendor/github.com/docker/docker/api/types/auth.go b/unum/vendor/github.com/docker/docker/api/types/auth.go
new file mode 100644
index 0000000..056af6b
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/api/types/auth.go
@@ -0,0 +1,22 @@
+package types
+
+// AuthConfig contains authorization information for connecting to a Registry
+type AuthConfig struct {
+ Username string `json:"username,omitempty"`
+ Password string `json:"password,omitempty"`
+ Auth string `json:"auth,omitempty"`
+
+ // Email is an optional value associated with the username.
+ // This field is deprecated and will be removed in a later
+ // version of docker.
+ Email string `json:"email,omitempty"`
+
+ ServerAddress string `json:"serveraddress,omitempty"`
+
+ // IdentityToken is used to authenticate the user and get
+ // an access token for the registry.
+ IdentityToken string `json:"identitytoken,omitempty"`
+
+ // RegistryToken is a bearer token to be sent to a registry
+ RegistryToken string `json:"registrytoken,omitempty"`
+}
diff --git a/unum/vendor/github.com/docker/docker/api/types/blkiodev/blkio.go b/unum/vendor/github.com/docker/docker/api/types/blkiodev/blkio.go
new file mode 100644
index 0000000..931ae10
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/api/types/blkiodev/blkio.go
@@ -0,0 +1,23 @@
+package blkiodev
+
+import "fmt"
+
+// WeightDevice is a structure that holds device:weight pair
+type WeightDevice struct {
+ Path string
+ Weight uint16
+}
+
+func (w *WeightDevice) String() string {
+ return fmt.Sprintf("%s:%d", w.Path, w.Weight)
+}
+
+// ThrottleDevice is a structure that holds device:rate_per_second pair
+type ThrottleDevice struct {
+ Path string
+ Rate uint64
+}
+
+func (t *ThrottleDevice) String() string {
+ return fmt.Sprintf("%s:%d", t.Path, t.Rate)
+}
diff --git a/unum/vendor/github.com/docker/docker/api/types/client.go b/unum/vendor/github.com/docker/docker/api/types/client.go
new file mode 100644
index 0000000..18a1263
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/api/types/client.go
@@ -0,0 +1,389 @@
+package types
+
+import (
+ "bufio"
+ "io"
+ "net"
+
+ "github.com/docker/docker/api/types/container"
+ "github.com/docker/docker/api/types/filters"
+ units "github.com/docker/go-units"
+)
+
+// CheckpointCreateOptions holds parameters to create a checkpoint from a container
+type CheckpointCreateOptions struct {
+ CheckpointID string
+ CheckpointDir string
+ Exit bool
+}
+
+// CheckpointListOptions holds parameters to list checkpoints for a container
+type CheckpointListOptions struct {
+ CheckpointDir string
+}
+
+// CheckpointDeleteOptions holds parameters to delete a checkpoint from a container
+type CheckpointDeleteOptions struct {
+ CheckpointID string
+ CheckpointDir string
+}
+
+// ContainerAttachOptions holds parameters to attach to a container.
+type ContainerAttachOptions struct {
+ Stream bool
+ Stdin bool
+ Stdout bool
+ Stderr bool
+ DetachKeys string
+ Logs bool
+}
+
+// ContainerCommitOptions holds parameters to commit changes into a container.
+type ContainerCommitOptions struct {
+ Reference string
+ Comment string
+ Author string
+ Changes []string
+ Pause bool
+ Config *container.Config
+}
+
+// ContainerExecInspect holds information returned by exec inspect.
+type ContainerExecInspect struct {
+ ExecID string
+ ContainerID string
+ Running bool
+ ExitCode int
+ Pid int
+}
+
+// ContainerListOptions holds parameters to list containers with.
+type ContainerListOptions struct {
+ Quiet bool
+ Size bool
+ All bool
+ Latest bool
+ Since string
+ Before string
+ Limit int
+ Filters filters.Args
+}
+
+// ContainerLogsOptions holds parameters to filter logs with.
+type ContainerLogsOptions struct {
+ ShowStdout bool
+ ShowStderr bool
+ Since string
+ Timestamps bool
+ Follow bool
+ Tail string
+ Details bool
+}
+
+// ContainerRemoveOptions holds parameters to remove containers.
+type ContainerRemoveOptions struct {
+ RemoveVolumes bool
+ RemoveLinks bool
+ Force bool
+}
+
+// ContainerStartOptions holds parameters to start containers.
+type ContainerStartOptions struct {
+ CheckpointID string
+ CheckpointDir string
+}
+
+// CopyToContainerOptions holds information
+// about files to copy into a container
+type CopyToContainerOptions struct {
+ AllowOverwriteDirWithFile bool
+ CopyUIDGID bool
+}
+
+// EventsOptions holds parameters to filter events with.
+type EventsOptions struct {
+ Since string
+ Until string
+ Filters filters.Args
+}
+
+// NetworkListOptions holds parameters to filter the list of networks with.
+type NetworkListOptions struct {
+ Filters filters.Args
+}
+
+// HijackedResponse holds connection information for a hijacked request.
+type HijackedResponse struct {
+ Conn net.Conn
+ Reader *bufio.Reader
+}
+
+// Close closes the hijacked connection and reader.
+func (h *HijackedResponse) Close() {
+ h.Conn.Close()
+}
+
+// CloseWriter is an interface that implements structs
+// that close input streams to prevent from writing.
+type CloseWriter interface {
+ CloseWrite() error
+}
+
+// CloseWrite closes a readWriter for writing.
+func (h *HijackedResponse) CloseWrite() error {
+ if conn, ok := h.Conn.(CloseWriter); ok {
+ return conn.CloseWrite()
+ }
+ return nil
+}
+
+// ImageBuildOptions holds the information
+// necessary to build images.
+type ImageBuildOptions struct {
+ Tags []string
+ SuppressOutput bool
+ RemoteContext string
+ NoCache bool
+ Remove bool
+ ForceRemove bool
+ PullParent bool
+ Isolation container.Isolation
+ CPUSetCPUs string
+ CPUSetMems string
+ CPUShares int64
+ CPUQuota int64
+ CPUPeriod int64
+ Memory int64
+ MemorySwap int64
+ CgroupParent string
+ NetworkMode string
+ ShmSize int64
+ Dockerfile string
+ Ulimits []*units.Ulimit
+ // BuildArgs needs to be a *string instead of just a string so that
+ // we can tell the difference between "" (empty string) and no value
+ // at all (nil). See the parsing of buildArgs in
+ // api/server/router/build/build_routes.go for even more info.
+ BuildArgs map[string]*string
+ AuthConfigs map[string]AuthConfig
+ Context io.Reader
+ Labels map[string]string
+ // squash the resulting image's layers to the parent
+ // preserves the original image and creates a new one from the parent with all
+ // the changes applied to a single layer
+ Squash bool
+ // CacheFrom specifies images that are used for matching cache. Images
+ // specified here do not need to have a valid parent chain to match cache.
+ CacheFrom []string
+ SecurityOpt []string
+ ExtraHosts []string // List of extra hosts
+ Target string
+ SessionID string
+
+ // TODO @jhowardmsft LCOW Support: This will require extending to include
+ // `Platform string`, but is ommited for now as it's hard-coded temporarily
+ // to avoid API changes.
+}
+
+// ImageBuildResponse holds information
+// returned by a server after building
+// an image.
+type ImageBuildResponse struct {
+ Body io.ReadCloser
+ OSType string
+}
+
+// ImageCreateOptions holds information to create images.
+type ImageCreateOptions struct {
+ RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry
+}
+
+// ImageImportSource holds source information for ImageImport
+type ImageImportSource struct {
+ Source io.Reader // Source is the data to send to the server to create this image from. You must set SourceName to "-" to leverage this.
+ SourceName string // SourceName is the name of the image to pull. Set to "-" to leverage the Source attribute.
+}
+
+// ImageImportOptions holds information to import images from the client host.
+type ImageImportOptions struct {
+ Tag string // Tag is the name to tag this image with. This attribute is deprecated.
+ Message string // Message is the message to tag the image with
+ Changes []string // Changes are the raw changes to apply to this image
+}
+
+// ImageListOptions holds parameters to filter the list of images with.
+type ImageListOptions struct {
+ All bool
+ Filters filters.Args
+}
+
+// ImageLoadResponse returns information to the client about a load process.
+type ImageLoadResponse struct {
+ // Body must be closed to avoid a resource leak
+ Body io.ReadCloser
+ JSON bool
+}
+
+// ImagePullOptions holds information to pull images.
+type ImagePullOptions struct {
+ All bool
+ RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry
+ PrivilegeFunc RequestPrivilegeFunc
+}
+
+// RequestPrivilegeFunc is a function interface that
+// clients can supply to retry operations after
+// getting an authorization error.
+// This function returns the registry authentication
+// header value in base 64 format, or an error
+// if the privilege request fails.
+type RequestPrivilegeFunc func() (string, error)
+
+//ImagePushOptions holds information to push images.
+type ImagePushOptions ImagePullOptions
+
+// ImageRemoveOptions holds parameters to remove images.
+type ImageRemoveOptions struct {
+ Force bool
+ PruneChildren bool
+}
+
+// ImageSearchOptions holds parameters to search images with.
+type ImageSearchOptions struct {
+ RegistryAuth string
+ PrivilegeFunc RequestPrivilegeFunc
+ Filters filters.Args
+ Limit int
+}
+
+// ResizeOptions holds parameters to resize a tty.
+// It can be used to resize container ttys and
+// exec process ttys too.
+type ResizeOptions struct {
+ Height uint
+ Width uint
+}
+
+// NodeListOptions holds parameters to list nodes with.
+type NodeListOptions struct {
+ Filters filters.Args
+}
+
+// NodeRemoveOptions holds parameters to remove nodes with.
+type NodeRemoveOptions struct {
+ Force bool
+}
+
+// ServiceCreateOptions contains the options to use when creating a service.
+type ServiceCreateOptions struct {
+ // EncodedRegistryAuth is the encoded registry authorization credentials to
+ // use when updating the service.
+ //
+ // This field follows the format of the X-Registry-Auth header.
+ EncodedRegistryAuth string
+
+ // QueryRegistry indicates whether the service update requires
+ // contacting a registry. A registry may be contacted to retrieve
+ // the image digest and manifest, which in turn can be used to update
+ // platform or other information about the service.
+ QueryRegistry bool
+}
+
+// ServiceCreateResponse contains the information returned to a client
+// on the creation of a new service.
+type ServiceCreateResponse struct {
+ // ID is the ID of the created service.
+ ID string
+ // Warnings is a set of non-fatal warning messages to pass on to the user.
+ Warnings []string `json:",omitempty"`
+}
+
+// Values for RegistryAuthFrom in ServiceUpdateOptions
+const (
+ RegistryAuthFromSpec = "spec"
+ RegistryAuthFromPreviousSpec = "previous-spec"
+)
+
+// ServiceUpdateOptions contains the options to be used for updating services.
+type ServiceUpdateOptions struct {
+ // EncodedRegistryAuth is the encoded registry authorization credentials to
+ // use when updating the service.
+ //
+ // This field follows the format of the X-Registry-Auth header.
+ EncodedRegistryAuth string
+
+ // TODO(stevvooe): Consider moving the version parameter of ServiceUpdate
+ // into this field. While it does open API users up to racy writes, most
+ // users may not need that level of consistency in practice.
+
+ // RegistryAuthFrom specifies where to find the registry authorization
+ // credentials if they are not given in EncodedRegistryAuth. Valid
+ // values are "spec" and "previous-spec".
+ RegistryAuthFrom string
+
+ // Rollback indicates whether a server-side rollback should be
+ // performed. When this is set, the provided spec will be ignored.
+ // The valid values are "previous" and "none". An empty value is the
+ // same as "none".
+ Rollback string
+
+ // QueryRegistry indicates whether the service update requires
+ // contacting a registry. A registry may be contacted to retrieve
+ // the image digest and manifest, which in turn can be used to update
+ // platform or other information about the service.
+ QueryRegistry bool
+}
+
+// ServiceListOptions holds parameters to list services with.
+type ServiceListOptions struct {
+ Filters filters.Args
+}
+
+// ServiceInspectOptions holds parameters related to the "service inspect"
+// operation.
+type ServiceInspectOptions struct {
+ InsertDefaults bool
+}
+
+// TaskListOptions holds parameters to list tasks with.
+type TaskListOptions struct {
+ Filters filters.Args
+}
+
+// PluginRemoveOptions holds parameters to remove plugins.
+type PluginRemoveOptions struct {
+ Force bool
+}
+
+// PluginEnableOptions holds parameters to enable plugins.
+type PluginEnableOptions struct {
+ Timeout int
+}
+
+// PluginDisableOptions holds parameters to disable plugins.
+type PluginDisableOptions struct {
+ Force bool
+}
+
+// PluginInstallOptions holds parameters to install a plugin.
+type PluginInstallOptions struct {
+ Disabled bool
+ AcceptAllPermissions bool
+ RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry
+ RemoteRef string // RemoteRef is the plugin name on the registry
+ PrivilegeFunc RequestPrivilegeFunc
+ AcceptPermissionsFunc func(PluginPrivileges) (bool, error)
+ Args []string
+}
+
+// SwarmUnlockKeyResponse contains the response for Engine API:
+// GET /swarm/unlockkey
+type SwarmUnlockKeyResponse struct {
+ // UnlockKey is the unlock key in ASCII-armored format.
+ UnlockKey string
+}
+
+// PluginCreateOptions hold all options to plugin create.
+type PluginCreateOptions struct {
+ RepoName string
+}
diff --git a/unum/vendor/github.com/docker/docker/api/types/configs.go b/unum/vendor/github.com/docker/docker/api/types/configs.go
new file mode 100644
index 0000000..e4d2ce6
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/api/types/configs.go
@@ -0,0 +1,70 @@
+package types
+
+import (
+ "github.com/docker/docker/api/types/container"
+ "github.com/docker/docker/api/types/network"
+)
+
+// configs holds structs used for internal communication between the
+// frontend (such as an http server) and the backend (such as the
+// docker daemon).
+
+// ContainerCreateConfig is the parameter set to ContainerCreate()
+type ContainerCreateConfig struct {
+ Name string
+ Config *container.Config
+ HostConfig *container.HostConfig
+ NetworkingConfig *network.NetworkingConfig
+ AdjustCPUShares bool
+ Platform string
+}
+
+// ContainerRmConfig holds arguments for the container remove
+// operation. This struct is used to tell the backend what operations
+// to perform.
+type ContainerRmConfig struct {
+ ForceRemove, RemoveVolume, RemoveLink bool
+}
+
+// ContainerCommitConfig contains build configs for commit operation,
+// and is used when making a commit with the current state of the container.
+type ContainerCommitConfig struct {
+ Pause bool
+ Repo string
+ Tag string
+ Author string
+ Comment string
+ // merge container config into commit config before commit
+ MergeConfigs bool
+ Config *container.Config
+}
+
+// ExecConfig is a small subset of the Config struct that holds the configuration
+// for the exec feature of docker.
+type ExecConfig struct {
+ User string // User that will run the command
+ Privileged bool // Is the container in privileged mode
+ Tty bool // Attach standard streams to a tty.
+ AttachStdin bool // Attach the standard input, makes possible user interaction
+ AttachStderr bool // Attach the standard error
+ AttachStdout bool // Attach the standard output
+ Detach bool // Execute in detach mode
+ DetachKeys string // Escape keys for detach
+ Env []string // Environment variables
+ Cmd []string // Execution commands and args
+}
+
+// PluginRmConfig holds arguments for plugin remove.
+type PluginRmConfig struct {
+ ForceRemove bool
+}
+
+// PluginEnableConfig holds arguments for plugin enable
+type PluginEnableConfig struct {
+ Timeout int
+}
+
+// PluginDisableConfig holds arguments for plugin disable.
+type PluginDisableConfig struct {
+ ForceDisable bool
+}
diff --git a/unum/vendor/github.com/docker/docker/api/types/container/config.go b/unum/vendor/github.com/docker/docker/api/types/container/config.go
new file mode 100644
index 0000000..55a03fc
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/api/types/container/config.go
@@ -0,0 +1,69 @@
+package container
+
+import (
+ "time"
+
+ "github.com/docker/docker/api/types/strslice"
+ "github.com/docker/go-connections/nat"
+)
+
+// MinimumDuration puts a minimum on user configured duration.
+// This is to prevent API error on time unit. For example, API may
+// set 3 as healthcheck interval with intention of 3 seconds, but
+// Docker interprets it as 3 nanoseconds.
+const MinimumDuration = 1 * time.Millisecond
+
+// HealthConfig holds configuration settings for the HEALTHCHECK feature.
+type HealthConfig struct {
+ // Test is the test to perform to check that the container is healthy.
+ // An empty slice means to inherit the default.
+ // The options are:
+ // {} : inherit healthcheck
+ // {"NONE"} : disable healthcheck
+ // {"CMD", args...} : exec arguments directly
+ // {"CMD-SHELL", command} : run command with system's default shell
+ Test []string `json:",omitempty"`
+
+ // Zero means to inherit. Durations are expressed as integer nanoseconds.
+ Interval time.Duration `json:",omitempty"` // Interval is the time to wait between checks.
+ Timeout time.Duration `json:",omitempty"` // Timeout is the time to wait before considering the check to have hung.
+ StartPeriod time.Duration `json:",omitempty"` // The start period for the container to initialize before the retries starts to count down.
+
+ // Retries is the number of consecutive failures needed to consider a container as unhealthy.
+ // Zero means inherit.
+ Retries int `json:",omitempty"`
+}
+
+// Config contains the configuration data about a container.
+// It should hold only portable information about the container.
+// Here, "portable" means "independent from the host we are running on".
+// Non-portable information *should* appear in HostConfig.
+// All fields added to this struct must be marked `omitempty` to keep getting
+// predictable hashes from the old `v1Compatibility` configuration.
+type Config struct {
+ Hostname string // Hostname
+ Domainname string // Domainname
+ User string // User that will run the command(s) inside the container, also support user:group
+ AttachStdin bool // Attach the standard input, makes possible user interaction
+ AttachStdout bool // Attach the standard output
+ AttachStderr bool // Attach the standard error
+ ExposedPorts nat.PortSet `json:",omitempty"` // List of exposed ports
+ Tty bool // Attach standard streams to a tty, including stdin if it is not closed.
+ OpenStdin bool // Open stdin
+ StdinOnce bool // If true, close stdin after the 1 attached client disconnects.
+ Env []string // List of environment variable to set in the container
+ Cmd strslice.StrSlice // Command to run when starting the container
+ Healthcheck *HealthConfig `json:",omitempty"` // Healthcheck describes how to check the container is healthy
+ ArgsEscaped bool `json:",omitempty"` // True if command is already escaped (Windows specific)
+ Image string // Name of the image as it was passed by the operator (e.g. could be symbolic)
+ Volumes map[string]struct{} // List of volumes (mounts) used for the container
+ WorkingDir string // Current directory (PWD) in the command will be launched
+ Entrypoint strslice.StrSlice // Entrypoint to run when starting the container
+ NetworkDisabled bool `json:",omitempty"` // Is network disabled
+ MacAddress string `json:",omitempty"` // Mac Address of the container
+ OnBuild []string // ONBUILD metadata that were defined on the image Dockerfile
+ Labels map[string]string // List of labels set to this container
+ StopSignal string `json:",omitempty"` // Signal to stop a container
+ StopTimeout *int `json:",omitempty"` // Timeout (in seconds) to stop a container
+ Shell strslice.StrSlice `json:",omitempty"` // Shell for shell-form of RUN, CMD, ENTRYPOINT
+}
diff --git a/unum/vendor/github.com/docker/docker/api/types/container/container_changes.go b/unum/vendor/github.com/docker/docker/api/types/container/container_changes.go
new file mode 100644
index 0000000..767945a
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/api/types/container/container_changes.go
@@ -0,0 +1,21 @@
+package container
+
+// ----------------------------------------------------------------------------
+// DO NOT EDIT THIS FILE
+// This file was generated by `swagger generate operation`
+//
+// See hack/generate-swagger-api.sh
+// ----------------------------------------------------------------------------
+
+// ContainerChangeResponseItem container change response item
+// swagger:model ContainerChangeResponseItem
+type ContainerChangeResponseItem struct {
+
+ // Kind of change
+ // Required: true
+ Kind uint8 `json:"Kind"`
+
+ // Path to file that has changed
+ // Required: true
+ Path string `json:"Path"`
+}
diff --git a/unum/vendor/github.com/docker/docker/api/types/container/container_create.go b/unum/vendor/github.com/docker/docker/api/types/container/container_create.go
new file mode 100644
index 0000000..c95023b
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/api/types/container/container_create.go
@@ -0,0 +1,21 @@
+package container
+
+// ----------------------------------------------------------------------------
+// DO NOT EDIT THIS FILE
+// This file was generated by `swagger generate operation`
+//
+// See hack/generate-swagger-api.sh
+// ----------------------------------------------------------------------------
+
+// ContainerCreateCreatedBody container create created body
+// swagger:model ContainerCreateCreatedBody
+type ContainerCreateCreatedBody struct {
+
+ // The ID of the created container
+ // Required: true
+ ID string `json:"Id"`
+
+ // Warnings encountered when creating the container
+ // Required: true
+ Warnings []string `json:"Warnings"`
+}
diff --git a/unum/vendor/github.com/docker/docker/api/types/container/container_top.go b/unum/vendor/github.com/docker/docker/api/types/container/container_top.go
new file mode 100644
index 0000000..78bc37e
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/api/types/container/container_top.go
@@ -0,0 +1,21 @@
+package container
+
+// ----------------------------------------------------------------------------
+// DO NOT EDIT THIS FILE
+// This file was generated by `swagger generate operation`
+//
+// See hack/generate-swagger-api.sh
+// ----------------------------------------------------------------------------
+
+// ContainerTopOKBody container top o k body
+// swagger:model ContainerTopOKBody
+type ContainerTopOKBody struct {
+
+ // Each process running in the container, where each is process is an array of values corresponding to the titles
+ // Required: true
+ Processes [][]string `json:"Processes"`
+
+ // The ps column titles
+ // Required: true
+ Titles []string `json:"Titles"`
+}
diff --git a/unum/vendor/github.com/docker/docker/api/types/container/container_update.go b/unum/vendor/github.com/docker/docker/api/types/container/container_update.go
new file mode 100644
index 0000000..2339366
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/api/types/container/container_update.go
@@ -0,0 +1,17 @@
+package container
+
+// ----------------------------------------------------------------------------
+// DO NOT EDIT THIS FILE
+// This file was generated by `swagger generate operation`
+//
+// See hack/generate-swagger-api.sh
+// ----------------------------------------------------------------------------
+
+// ContainerUpdateOKBody container update o k body
+// swagger:model ContainerUpdateOKBody
+type ContainerUpdateOKBody struct {
+
+ // warnings
+ // Required: true
+ Warnings []string `json:"Warnings"`
+}
diff --git a/unum/vendor/github.com/docker/docker/api/types/container/container_wait.go b/unum/vendor/github.com/docker/docker/api/types/container/container_wait.go
new file mode 100644
index 0000000..77ecdba
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/api/types/container/container_wait.go
@@ -0,0 +1,17 @@
+package container
+
+// ----------------------------------------------------------------------------
+// DO NOT EDIT THIS FILE
+// This file was generated by `swagger generate operation`
+//
+// See hack/generate-swagger-api.sh
+// ----------------------------------------------------------------------------
+
+// ContainerWaitOKBody container wait o k body
+// swagger:model ContainerWaitOKBody
+type ContainerWaitOKBody struct {
+
+ // Exit code of the container
+ // Required: true
+ StatusCode int64 `json:"StatusCode"`
+}
diff --git a/unum/vendor/github.com/docker/docker/api/types/container/host_config.go b/unum/vendor/github.com/docker/docker/api/types/container/host_config.go
new file mode 100644
index 0000000..bb421b3
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/api/types/container/host_config.go
@@ -0,0 +1,385 @@
+package container
+
+import (
+ "strings"
+
+ "github.com/docker/docker/api/types/blkiodev"
+ "github.com/docker/docker/api/types/mount"
+ "github.com/docker/docker/api/types/strslice"
+ "github.com/docker/go-connections/nat"
+ "github.com/docker/go-units"
+)
+
+// Isolation represents the isolation technology of a container. The supported
+// values are platform specific
+type Isolation string
+
+// IsDefault indicates the default isolation technology of a container. On Linux this
+// is the native driver. On Windows, this is a Windows Server Container.
+func (i Isolation) IsDefault() bool {
+ return strings.ToLower(string(i)) == "default" || string(i) == ""
+}
+
+// IpcMode represents the container ipc stack.
+type IpcMode string
+
+// IsPrivate indicates whether the container uses its own private ipc namespace which can not be shared.
+func (n IpcMode) IsPrivate() bool {
+ return n == "private"
+}
+
+// IsHost indicates whether the container shares the host's ipc namespace.
+func (n IpcMode) IsHost() bool {
+ return n == "host"
+}
+
+// IsShareable indicates whether the container's ipc namespace can be shared with another container.
+func (n IpcMode) IsShareable() bool {
+ return n == "shareable"
+}
+
+// IsContainer indicates whether the container uses another container's ipc namespace.
+func (n IpcMode) IsContainer() bool {
+ parts := strings.SplitN(string(n), ":", 2)
+ return len(parts) > 1 && parts[0] == "container"
+}
+
+// IsNone indicates whether container IpcMode is set to "none".
+func (n IpcMode) IsNone() bool {
+ return n == "none"
+}
+
+// IsEmpty indicates whether container IpcMode is empty
+func (n IpcMode) IsEmpty() bool {
+ return n == ""
+}
+
+// Valid indicates whether the ipc mode is valid.
+func (n IpcMode) Valid() bool {
+ return n.IsEmpty() || n.IsNone() || n.IsPrivate() || n.IsHost() || n.IsShareable() || n.IsContainer()
+}
+
+// Container returns the name of the container ipc stack is going to be used.
+func (n IpcMode) Container() string {
+ parts := strings.SplitN(string(n), ":", 2)
+ if len(parts) > 1 && parts[0] == "container" {
+ return parts[1]
+ }
+ return ""
+}
+
+// NetworkMode represents the container network stack.
+type NetworkMode string
+
+// IsNone indicates whether container isn't using a network stack.
+func (n NetworkMode) IsNone() bool {
+ return n == "none"
+}
+
+// IsDefault indicates whether container uses the default network stack.
+func (n NetworkMode) IsDefault() bool {
+ return n == "default"
+}
+
+// IsPrivate indicates whether container uses its private network stack.
+func (n NetworkMode) IsPrivate() bool {
+ return !(n.IsHost() || n.IsContainer())
+}
+
+// IsContainer indicates whether container uses a container network stack.
+func (n NetworkMode) IsContainer() bool {
+ parts := strings.SplitN(string(n), ":", 2)
+ return len(parts) > 1 && parts[0] == "container"
+}
+
+// ConnectedContainer is the id of the container which network this container is connected to.
+func (n NetworkMode) ConnectedContainer() string {
+ parts := strings.SplitN(string(n), ":", 2)
+ if len(parts) > 1 {
+ return parts[1]
+ }
+ return ""
+}
+
+//UserDefined indicates user-created network
+func (n NetworkMode) UserDefined() string {
+ if n.IsUserDefined() {
+ return string(n)
+ }
+ return ""
+}
+
+// UsernsMode represents userns mode in the container.
+type UsernsMode string
+
+// IsHost indicates whether the container uses the host's userns.
+func (n UsernsMode) IsHost() bool {
+ return n == "host"
+}
+
+// IsPrivate indicates whether the container uses the a private userns.
+func (n UsernsMode) IsPrivate() bool {
+ return !(n.IsHost())
+}
+
+// Valid indicates whether the userns is valid.
+func (n UsernsMode) Valid() bool {
+ parts := strings.Split(string(n), ":")
+ switch mode := parts[0]; mode {
+ case "", "host":
+ default:
+ return false
+ }
+ return true
+}
+
+// CgroupSpec represents the cgroup to use for the container.
+type CgroupSpec string
+
+// IsContainer indicates whether the container is using another container cgroup
+func (c CgroupSpec) IsContainer() bool {
+ parts := strings.SplitN(string(c), ":", 2)
+ return len(parts) > 1 && parts[0] == "container"
+}
+
+// Valid indicates whether the cgroup spec is valid.
+func (c CgroupSpec) Valid() bool {
+ return c.IsContainer() || c == ""
+}
+
+// Container returns the name of the container whose cgroup will be used.
+func (c CgroupSpec) Container() string {
+ parts := strings.SplitN(string(c), ":", 2)
+ if len(parts) > 1 {
+ return parts[1]
+ }
+ return ""
+}
+
+// UTSMode represents the UTS namespace of the container.
+type UTSMode string
+
+// IsPrivate indicates whether the container uses its private UTS namespace.
+func (n UTSMode) IsPrivate() bool {
+ return !(n.IsHost())
+}
+
+// IsHost indicates whether the container uses the host's UTS namespace.
+func (n UTSMode) IsHost() bool {
+ return n == "host"
+}
+
+// Valid indicates whether the UTS namespace is valid.
+func (n UTSMode) Valid() bool {
+ parts := strings.Split(string(n), ":")
+ switch mode := parts[0]; mode {
+ case "", "host":
+ default:
+ return false
+ }
+ return true
+}
+
+// PidMode represents the pid namespace of the container.
+type PidMode string
+
+// IsPrivate indicates whether the container uses its own new pid namespace.
+func (n PidMode) IsPrivate() bool {
+ return !(n.IsHost() || n.IsContainer())
+}
+
+// IsHost indicates whether the container uses the host's pid namespace.
+func (n PidMode) IsHost() bool {
+ return n == "host"
+}
+
+// IsContainer indicates whether the container uses a container's pid namespace.
+func (n PidMode) IsContainer() bool {
+ parts := strings.SplitN(string(n), ":", 2)
+ return len(parts) > 1 && parts[0] == "container"
+}
+
+// Valid indicates whether the pid namespace is valid.
+func (n PidMode) Valid() bool {
+ parts := strings.Split(string(n), ":")
+ switch mode := parts[0]; mode {
+ case "", "host":
+ case "container":
+ if len(parts) != 2 || parts[1] == "" {
+ return false
+ }
+ default:
+ return false
+ }
+ return true
+}
+
+// Container returns the name of the container whose pid namespace is going to be used.
+func (n PidMode) Container() string {
+ parts := strings.SplitN(string(n), ":", 2)
+ if len(parts) > 1 {
+ return parts[1]
+ }
+ return ""
+}
+
+// DeviceMapping represents the device mapping between the host and the container.
+type DeviceMapping struct {
+ PathOnHost string
+ PathInContainer string
+ CgroupPermissions string
+}
+
+// RestartPolicy represents the restart policies of the container.
+type RestartPolicy struct {
+ Name string
+ MaximumRetryCount int
+}
+
+// IsNone indicates whether the container has the "no" restart policy.
+// This means the container will not automatically restart when exiting.
+func (rp *RestartPolicy) IsNone() bool {
+ return rp.Name == "no" || rp.Name == ""
+}
+
+// IsAlways indicates whether the container has the "always" restart policy.
+// This means the container will automatically restart regardless of the exit status.
+func (rp *RestartPolicy) IsAlways() bool {
+ return rp.Name == "always"
+}
+
+// IsOnFailure indicates whether the container has the "on-failure" restart policy.
+// This means the container will automatically restart of exiting with a non-zero exit status.
+func (rp *RestartPolicy) IsOnFailure() bool {
+ return rp.Name == "on-failure"
+}
+
+// IsUnlessStopped indicates whether the container has the
+// "unless-stopped" restart policy. This means the container will
+// automatically restart unless user has put it to stopped state.
+func (rp *RestartPolicy) IsUnlessStopped() bool {
+ return rp.Name == "unless-stopped"
+}
+
+// IsSame compares two RestartPolicy to see if they are the same
+func (rp *RestartPolicy) IsSame(tp *RestartPolicy) bool {
+ return rp.Name == tp.Name && rp.MaximumRetryCount == tp.MaximumRetryCount
+}
+
+// LogMode is a type to define the available modes for logging
+// These modes affect how logs are handled when log messages start piling up.
+type LogMode string
+
+// Available logging modes
+const (
+ LogModeUnset = ""
+ LogModeBlocking LogMode = "blocking"
+ LogModeNonBlock LogMode = "non-blocking"
+)
+
+// LogConfig represents the logging configuration of the container.
+type LogConfig struct {
+ Type string
+ Config map[string]string
+}
+
+// Resources contains container's resources (cgroups config, ulimits...)
+type Resources struct {
+ // Applicable to all platforms
+ CPUShares int64 `json:"CpuShares"` // CPU shares (relative weight vs. other containers)
+ Memory int64 // Memory limit (in bytes)
+ NanoCPUs int64 `json:"NanoCpus"` // CPU quota in units of 10<sup>-9</sup> CPUs.
+
+ // Applicable to UNIX platforms
+ CgroupParent string // Parent cgroup.
+ BlkioWeight uint16 // Block IO weight (relative weight vs. other containers)
+ BlkioWeightDevice []*blkiodev.WeightDevice
+ BlkioDeviceReadBps []*blkiodev.ThrottleDevice
+ BlkioDeviceWriteBps []*blkiodev.ThrottleDevice
+ BlkioDeviceReadIOps []*blkiodev.ThrottleDevice
+ BlkioDeviceWriteIOps []*blkiodev.ThrottleDevice
+ CPUPeriod int64 `json:"CpuPeriod"` // CPU CFS (Completely Fair Scheduler) period
+ CPUQuota int64 `json:"CpuQuota"` // CPU CFS (Completely Fair Scheduler) quota
+ CPURealtimePeriod int64 `json:"CpuRealtimePeriod"` // CPU real-time period
+ CPURealtimeRuntime int64 `json:"CpuRealtimeRuntime"` // CPU real-time runtime
+ CpusetCpus string // CpusetCpus 0-2, 0,1
+ CpusetMems string // CpusetMems 0-2, 0,1
+ Devices []DeviceMapping // List of devices to map inside the container
+ DeviceCgroupRules []string // List of rule to be added to the device cgroup
+ DiskQuota int64 // Disk limit (in bytes)
+ KernelMemory int64 // Kernel memory limit (in bytes)
+ MemoryReservation int64 // Memory soft limit (in bytes)
+ MemorySwap int64 // Total memory usage (memory + swap); set `-1` to enable unlimited swap
+ MemorySwappiness *int64 // Tuning container memory swappiness behaviour
+ OomKillDisable *bool // Whether to disable OOM Killer or not
+ PidsLimit int64 // Setting pids limit for a container
+ Ulimits []*units.Ulimit // List of ulimits to be set in the container
+
+ // Applicable to Windows
+ CPUCount int64 `json:"CpuCount"` // CPU count
+ CPUPercent int64 `json:"CpuPercent"` // CPU percent
+ IOMaximumIOps uint64 // Maximum IOps for the container system drive
+ IOMaximumBandwidth uint64 // Maximum IO in bytes per second for the container system drive
+}
+
+// UpdateConfig holds the mutable attributes of a Container.
+// Those attributes can be updated at runtime.
+type UpdateConfig struct {
+ // Contains container's resources (cgroups, ulimits)
+ Resources
+ RestartPolicy RestartPolicy
+}
+
+// HostConfig the non-portable Config structure of a container.
+// Here, "non-portable" means "dependent of the host we are running on".
+// Portable information *should* appear in Config.
+type HostConfig struct {
+ // Applicable to all platforms
+ Binds []string // List of volume bindings for this container
+ ContainerIDFile string // File (path) where the containerId is written
+ LogConfig LogConfig // Configuration of the logs for this container
+ NetworkMode NetworkMode // Network mode to use for the container
+ PortBindings nat.PortMap // Port mapping between the exposed port (container) and the host
+ RestartPolicy RestartPolicy // Restart policy to be used for the container
+ AutoRemove bool // Automatically remove container when it exits
+ VolumeDriver string // Name of the volume driver used to mount volumes
+ VolumesFrom []string // List of volumes to take from other container
+
+ // Applicable to UNIX platforms
+ CapAdd strslice.StrSlice // List of kernel capabilities to add to the container
+ CapDrop strslice.StrSlice // List of kernel capabilities to remove from the container
+ DNS []string `json:"Dns"` // List of DNS server to lookup
+ DNSOptions []string `json:"DnsOptions"` // List of DNSOption to look for
+ DNSSearch []string `json:"DnsSearch"` // List of DNSSearch to look for
+ ExtraHosts []string // List of extra hosts
+ GroupAdd []string // List of additional groups that the container process will run as
+ IpcMode IpcMode // IPC namespace to use for the container
+ Cgroup CgroupSpec // Cgroup to use for the container
+ Links []string // List of links (in the name:alias form)
+ OomScoreAdj int // Container preference for OOM-killing
+ PidMode PidMode // PID namespace to use for the container
+ Privileged bool // Is the container in privileged mode
+ PublishAllPorts bool // Should docker publish all exposed port for the container
+ ReadonlyRootfs bool // Is the container root filesystem in read-only
+ SecurityOpt []string // List of string values to customize labels for MLS systems, such as SELinux.
+ StorageOpt map[string]string `json:",omitempty"` // Storage driver options per container.
+ Tmpfs map[string]string `json:",omitempty"` // List of tmpfs (mounts) used for the container
+ UTSMode UTSMode // UTS namespace to use for the container
+ UsernsMode UsernsMode // The user namespace to use for the container
+ ShmSize int64 // Total shm memory usage
+ Sysctls map[string]string `json:",omitempty"` // List of Namespaced sysctls used for the container
+ Runtime string `json:",omitempty"` // Runtime to use with this container
+
+ // Applicable to Windows
+ ConsoleSize [2]uint // Initial console size (height,width)
+ Isolation Isolation // Isolation technology of the container (e.g. default, hyperv)
+
+ // Contains container's resources (cgroups, ulimits)
+ Resources
+
+ // Mounts specs used by the container
+ Mounts []mount.Mount `json:",omitempty"`
+
+ // Run a custom init inside the container, if null, use the daemon's configured settings
+ Init *bool `json:",omitempty"`
+}
diff --git a/unum/vendor/github.com/docker/docker/api/types/container/hostconfig_unix.go b/unum/vendor/github.com/docker/docker/api/types/container/hostconfig_unix.go
new file mode 100644
index 0000000..2d664d1
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/api/types/container/hostconfig_unix.go
@@ -0,0 +1,41 @@
+// +build !windows
+
+package container
+
+// IsValid indicates if an isolation technology is valid
+func (i Isolation) IsValid() bool {
+ return i.IsDefault()
+}
+
+// NetworkName returns the name of the network stack.
+func (n NetworkMode) NetworkName() string {
+ if n.IsBridge() {
+ return "bridge"
+ } else if n.IsHost() {
+ return "host"
+ } else if n.IsContainer() {
+ return "container"
+ } else if n.IsNone() {
+ return "none"
+ } else if n.IsDefault() {
+ return "default"
+ } else if n.IsUserDefined() {
+ return n.UserDefined()
+ }
+ return ""
+}
+
+// IsBridge indicates whether container uses the bridge network stack
+func (n NetworkMode) IsBridge() bool {
+ return n == "bridge"
+}
+
+// IsHost indicates whether container uses the host network stack.
+func (n NetworkMode) IsHost() bool {
+ return n == "host"
+}
+
+// IsUserDefined indicates user-created network
+func (n NetworkMode) IsUserDefined() bool {
+ return !n.IsDefault() && !n.IsBridge() && !n.IsHost() && !n.IsNone() && !n.IsContainer()
+}
diff --git a/unum/vendor/github.com/docker/docker/api/types/container/hostconfig_windows.go b/unum/vendor/github.com/docker/docker/api/types/container/hostconfig_windows.go
new file mode 100644
index 0000000..469923f
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/api/types/container/hostconfig_windows.go
@@ -0,0 +1,54 @@
+package container
+
+import (
+ "strings"
+)
+
+// IsBridge indicates whether container uses the bridge network stack
+// in windows it is given the name NAT
+func (n NetworkMode) IsBridge() bool {
+ return n == "nat"
+}
+
+// IsHost indicates whether container uses the host network stack.
+// returns false as this is not supported by windows
+func (n NetworkMode) IsHost() bool {
+ return false
+}
+
+// IsUserDefined indicates user-created network
+func (n NetworkMode) IsUserDefined() bool {
+ return !n.IsDefault() && !n.IsNone() && !n.IsBridge() && !n.IsContainer()
+}
+
+// IsHyperV indicates the use of a Hyper-V partition for isolation
+func (i Isolation) IsHyperV() bool {
+ return strings.ToLower(string(i)) == "hyperv"
+}
+
+// IsProcess indicates the use of process isolation
+func (i Isolation) IsProcess() bool {
+ return strings.ToLower(string(i)) == "process"
+}
+
+// IsValid indicates if an isolation technology is valid
+func (i Isolation) IsValid() bool {
+ return i.IsDefault() || i.IsHyperV() || i.IsProcess()
+}
+
+// NetworkName returns the name of the network stack.
+func (n NetworkMode) NetworkName() string {
+ if n.IsDefault() {
+ return "default"
+ } else if n.IsBridge() {
+ return "nat"
+ } else if n.IsNone() {
+ return "none"
+ } else if n.IsContainer() {
+ return "container"
+ } else if n.IsUserDefined() {
+ return n.UserDefined()
+ }
+
+ return ""
+}
diff --git a/unum/vendor/github.com/docker/docker/api/types/container/waitcondition.go b/unum/vendor/github.com/docker/docker/api/types/container/waitcondition.go
new file mode 100644
index 0000000..64820fe
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/api/types/container/waitcondition.go
@@ -0,0 +1,22 @@
+package container
+
+// WaitCondition is a type used to specify a container state for which
+// to wait.
+type WaitCondition string
+
+// Possible WaitCondition Values.
+//
+// WaitConditionNotRunning (default) is used to wait for any of the non-running
+// states: "created", "exited", "dead", "removing", or "removed".
+//
+// WaitConditionNextExit is used to wait for the next time the state changes
+// to a non-running state. If the state is currently "created" or "exited",
+// this would cause Wait() to block until either the container runs and exits
+// or is removed.
+//
+// WaitConditionRemoved is used to wait for the container to be removed.
+const (
+ WaitConditionNotRunning WaitCondition = "not-running"
+ WaitConditionNextExit WaitCondition = "next-exit"
+ WaitConditionRemoved WaitCondition = "removed"
+)
diff --git a/unum/vendor/github.com/docker/docker/api/types/error_response.go b/unum/vendor/github.com/docker/docker/api/types/error_response.go
new file mode 100644
index 0000000..dc942d9
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/api/types/error_response.go
@@ -0,0 +1,13 @@
+package types
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+// ErrorResponse Represents an error.
+// swagger:model ErrorResponse
+type ErrorResponse struct {
+
+ // The error message.
+ // Required: true
+ Message string `json:"message"`
+}
diff --git a/unum/vendor/github.com/docker/docker/api/types/events/events.go b/unum/vendor/github.com/docker/docker/api/types/events/events.go
new file mode 100644
index 0000000..e292565
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/api/types/events/events.go
@@ -0,0 +1,52 @@
+package events
+
+const (
+ // ContainerEventType is the event type that containers generate
+ ContainerEventType = "container"
+ // DaemonEventType is the event type that daemon generate
+ DaemonEventType = "daemon"
+ // ImageEventType is the event type that images generate
+ ImageEventType = "image"
+ // NetworkEventType is the event type that networks generate
+ NetworkEventType = "network"
+ // PluginEventType is the event type that plugins generate
+ PluginEventType = "plugin"
+ // VolumeEventType is the event type that volumes generate
+ VolumeEventType = "volume"
+ // ServiceEventType is the event type that services generate
+ ServiceEventType = "service"
+ // NodeEventType is the event type that nodes generate
+ NodeEventType = "node"
+ // SecretEventType is the event type that secrets generate
+ SecretEventType = "secret"
+ // ConfigEventType is the event type that configs generate
+ ConfigEventType = "config"
+)
+
+// Actor describes something that generates events,
+// like a container, or a network, or a volume.
+// It has a defined name and a set or attributes.
+// The container attributes are its labels, other actors
+// can generate these attributes from other properties.
+type Actor struct {
+ ID string
+ Attributes map[string]string
+}
+
+// Message represents the information an event contains
+type Message struct {
+ // Deprecated information from JSONMessage.
+ // With data only in container events.
+ Status string `json:"status,omitempty"`
+ ID string `json:"id,omitempty"`
+ From string `json:"from,omitempty"`
+
+ Type string
+ Action string
+ Actor Actor
+ // Engine events are local scope. Cluster events are swarm scope.
+ Scope string `json:"scope,omitempty"`
+
+ Time int64 `json:"time,omitempty"`
+ TimeNano int64 `json:"timeNano,omitempty"`
+}
diff --git a/unum/vendor/github.com/docker/docker/api/types/filters/parse.go b/unum/vendor/github.com/docker/docker/api/types/filters/parse.go
new file mode 100644
index 0000000..363d454
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/api/types/filters/parse.go
@@ -0,0 +1,317 @@
+// Package filters provides helper function to parse and handle command line
+// filter, used for example in docker ps or docker images commands.
+package filters
+
+import (
+ "encoding/json"
+ "errors"
+ "regexp"
+ "strings"
+
+ "github.com/docker/docker/api/types/versions"
+)
+
+// Args stores filter arguments as map key:{map key: bool}.
+// It contains an aggregation of the map of arguments (which are in the form
+// of -f 'key=value') based on the key, and stores values for the same key
+// in a map with string keys and boolean values.
+// e.g given -f 'label=label1=1' -f 'label=label2=2' -f 'image.name=ubuntu'
+// the args will be {"image.name":{"ubuntu":true},"label":{"label1=1":true,"label2=2":true}}
+type Args struct {
+ fields map[string]map[string]bool
+}
+
+// NewArgs initializes a new Args struct.
+func NewArgs() Args {
+ return Args{fields: map[string]map[string]bool{}}
+}
+
+// ParseFlag parses the argument to the filter flag. Like
+//
+// `docker ps -f 'created=today' -f 'image.name=ubuntu*'`
+//
+// If prev map is provided, then it is appended to, and returned. By default a new
+// map is created.
+func ParseFlag(arg string, prev Args) (Args, error) {
+ filters := prev
+ if len(arg) == 0 {
+ return filters, nil
+ }
+
+ if !strings.Contains(arg, "=") {
+ return filters, ErrBadFormat
+ }
+
+ f := strings.SplitN(arg, "=", 2)
+
+ name := strings.ToLower(strings.TrimSpace(f[0]))
+ value := strings.TrimSpace(f[1])
+
+ filters.Add(name, value)
+
+ return filters, nil
+}
+
+// ErrBadFormat is an error returned in case of bad format for a filter.
+var ErrBadFormat = errors.New("bad format of filter (expected name=value)")
+
+// ToParam packs the Args into a string for easy transport from client to server.
+func ToParam(a Args) (string, error) {
+ // this way we don't URL encode {}, just empty space
+ if a.Len() == 0 {
+ return "", nil
+ }
+
+ buf, err := json.Marshal(a.fields)
+ if err != nil {
+ return "", err
+ }
+ return string(buf), nil
+}
+
+// ToParamWithVersion packs the Args into a string for easy transport from client to server.
+// The generated string will depend on the specified version (corresponding to the API version).
+func ToParamWithVersion(version string, a Args) (string, error) {
+ // this way we don't URL encode {}, just empty space
+ if a.Len() == 0 {
+ return "", nil
+ }
+
+ // for daemons older than v1.10, filter must be of the form map[string][]string
+ var buf []byte
+ var err error
+ if version != "" && versions.LessThan(version, "1.22") {
+ buf, err = json.Marshal(convertArgsToSlice(a.fields))
+ } else {
+ buf, err = json.Marshal(a.fields)
+ }
+ if err != nil {
+ return "", err
+ }
+ return string(buf), nil
+}
+
+// FromParam unpacks the filter Args.
+func FromParam(p string) (Args, error) {
+ if len(p) == 0 {
+ return NewArgs(), nil
+ }
+
+ r := strings.NewReader(p)
+ d := json.NewDecoder(r)
+
+ m := map[string]map[string]bool{}
+ if err := d.Decode(&m); err != nil {
+ r.Seek(0, 0)
+
+ // Allow parsing old arguments in slice format.
+ // Because other libraries might be sending them in this format.
+ deprecated := map[string][]string{}
+ if deprecatedErr := d.Decode(&deprecated); deprecatedErr == nil {
+ m = deprecatedArgs(deprecated)
+ } else {
+ return NewArgs(), err
+ }
+ }
+ return Args{m}, nil
+}
+
+// Get returns the list of values associates with a field.
+// It returns a slice of strings to keep backwards compatibility with old code.
+func (filters Args) Get(field string) []string {
+ values := filters.fields[field]
+ if values == nil {
+ return make([]string, 0)
+ }
+ slice := make([]string, 0, len(values))
+ for key := range values {
+ slice = append(slice, key)
+ }
+ return slice
+}
+
+// Add adds a new value to a filter field.
+func (filters Args) Add(name, value string) {
+ if _, ok := filters.fields[name]; ok {
+ filters.fields[name][value] = true
+ } else {
+ filters.fields[name] = map[string]bool{value: true}
+ }
+}
+
+// Del removes a value from a filter field.
+func (filters Args) Del(name, value string) {
+ if _, ok := filters.fields[name]; ok {
+ delete(filters.fields[name], value)
+ if len(filters.fields[name]) == 0 {
+ delete(filters.fields, name)
+ }
+ }
+}
+
+// Len returns the number of fields in the arguments.
+func (filters Args) Len() int {
+ return len(filters.fields)
+}
+
+// MatchKVList returns true if the values for the specified field matches the ones
+// from the sources.
+// e.g. given Args are {'label': {'label1=1','label2=1'}, 'image.name', {'ubuntu'}},
+// field is 'label' and sources are {'label1': '1', 'label2': '2'}
+// it returns true.
+func (filters Args) MatchKVList(field string, sources map[string]string) bool {
+ fieldValues := filters.fields[field]
+
+ //do not filter if there is no filter set or cannot determine filter
+ if len(fieldValues) == 0 {
+ return true
+ }
+
+ if len(sources) == 0 {
+ return false
+ }
+
+ for name2match := range fieldValues {
+ testKV := strings.SplitN(name2match, "=", 2)
+
+ v, ok := sources[testKV[0]]
+ if !ok {
+ return false
+ }
+ if len(testKV) == 2 && testKV[1] != v {
+ return false
+ }
+ }
+
+ return true
+}
+
+// Match returns true if the values for the specified field matches the source string
+// e.g. given Args are {'label': {'label1=1','label2=1'}, 'image.name', {'ubuntu'}},
+// field is 'image.name' and source is 'ubuntu'
+// it returns true.
+func (filters Args) Match(field, source string) bool {
+ if filters.ExactMatch(field, source) {
+ return true
+ }
+
+ fieldValues := filters.fields[field]
+ for name2match := range fieldValues {
+ match, err := regexp.MatchString(name2match, source)
+ if err != nil {
+ continue
+ }
+ if match {
+ return true
+ }
+ }
+ return false
+}
+
+// ExactMatch returns true if the source matches exactly one of the filters.
+func (filters Args) ExactMatch(field, source string) bool {
+ fieldValues, ok := filters.fields[field]
+ //do not filter if there is no filter set or cannot determine filter
+ if !ok || len(fieldValues) == 0 {
+ return true
+ }
+
+ // try to match full name value to avoid O(N) regular expression matching
+ return fieldValues[source]
+}
+
+// UniqueExactMatch returns true if there is only one filter and the source matches exactly this one.
+func (filters Args) UniqueExactMatch(field, source string) bool {
+ fieldValues := filters.fields[field]
+ //do not filter if there is no filter set or cannot determine filter
+ if len(fieldValues) == 0 {
+ return true
+ }
+ if len(filters.fields[field]) != 1 {
+ return false
+ }
+
+ // try to match full name value to avoid O(N) regular expression matching
+ return fieldValues[source]
+}
+
+// FuzzyMatch returns true if the source matches exactly one of the filters,
+// or the source has one of the filters as a prefix.
+func (filters Args) FuzzyMatch(field, source string) bool {
+ if filters.ExactMatch(field, source) {
+ return true
+ }
+
+ fieldValues := filters.fields[field]
+ for prefix := range fieldValues {
+ if strings.HasPrefix(source, prefix) {
+ return true
+ }
+ }
+ return false
+}
+
+// Include returns true if the name of the field to filter is in the filters.
+func (filters Args) Include(field string) bool {
+ _, ok := filters.fields[field]
+ return ok
+}
+
+type invalidFilter string
+
+func (e invalidFilter) Error() string {
+ return "Invalid filter '" + string(e) + "'"
+}
+
+func (invalidFilter) InvalidParameter() {}
+
+// Validate ensures that all the fields in the filter are valid.
+// It returns an error as soon as it finds an invalid field.
+func (filters Args) Validate(accepted map[string]bool) error {
+ for name := range filters.fields {
+ if !accepted[name] {
+ return invalidFilter(name)
+ }
+ }
+ return nil
+}
+
+// WalkValues iterates over the list of filtered values for a field.
+// It stops the iteration if it finds an error and it returns that error.
+func (filters Args) WalkValues(field string, op func(value string) error) error {
+ if _, ok := filters.fields[field]; !ok {
+ return nil
+ }
+ for v := range filters.fields[field] {
+ if err := op(v); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func deprecatedArgs(d map[string][]string) map[string]map[string]bool {
+ m := map[string]map[string]bool{}
+ for k, v := range d {
+ values := map[string]bool{}
+ for _, vv := range v {
+ values[vv] = true
+ }
+ m[k] = values
+ }
+ return m
+}
+
+func convertArgsToSlice(f map[string]map[string]bool) map[string][]string {
+ m := map[string][]string{}
+ for k, v := range f {
+ values := []string{}
+ for kk := range v {
+ if v[kk] {
+ values = append(values, kk)
+ }
+ }
+ m[k] = values
+ }
+ return m
+}
diff --git a/unum/vendor/github.com/docker/docker/api/types/graph_driver_data.go b/unum/vendor/github.com/docker/docker/api/types/graph_driver_data.go
new file mode 100644
index 0000000..4d9bf1c
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/api/types/graph_driver_data.go
@@ -0,0 +1,17 @@
+package types
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+// GraphDriverData Information about a container's graph driver.
+// swagger:model GraphDriverData
+type GraphDriverData struct {
+
+ // data
+ // Required: true
+ Data map[string]string `json:"Data"`
+
+ // name
+ // Required: true
+ Name string `json:"Name"`
+}
diff --git a/unum/vendor/github.com/docker/docker/api/types/id_response.go b/unum/vendor/github.com/docker/docker/api/types/id_response.go
new file mode 100644
index 0000000..7592d2f
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/api/types/id_response.go
@@ -0,0 +1,13 @@
+package types
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+// IDResponse Response to an API call that returns just an Id
+// swagger:model IdResponse
+type IDResponse struct {
+
+ // The id of the newly created object.
+ // Required: true
+ ID string `json:"Id"`
+}
diff --git a/unum/vendor/github.com/docker/docker/api/types/image/image_history.go b/unum/vendor/github.com/docker/docker/api/types/image/image_history.go
new file mode 100644
index 0000000..0dd30c7
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/api/types/image/image_history.go
@@ -0,0 +1,37 @@
+package image
+
+// ----------------------------------------------------------------------------
+// DO NOT EDIT THIS FILE
+// This file was generated by `swagger generate operation`
+//
+// See hack/generate-swagger-api.sh
+// ----------------------------------------------------------------------------
+
+// HistoryResponseItem history response item
+// swagger:model HistoryResponseItem
+type HistoryResponseItem struct {
+
+ // comment
+ // Required: true
+ Comment string `json:"Comment"`
+
+ // created
+ // Required: true
+ Created int64 `json:"Created"`
+
+ // created by
+ // Required: true
+ CreatedBy string `json:"CreatedBy"`
+
+ // Id
+ // Required: true
+ ID string `json:"Id"`
+
+ // size
+ // Required: true
+ Size int64 `json:"Size"`
+
+ // tags
+ // Required: true
+ Tags []string `json:"Tags"`
+}
diff --git a/unum/vendor/github.com/docker/docker/api/types/image_delete_response_item.go b/unum/vendor/github.com/docker/docker/api/types/image_delete_response_item.go
new file mode 100644
index 0000000..b9a65a0
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/api/types/image_delete_response_item.go
@@ -0,0 +1,15 @@
+package types
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+// ImageDeleteResponseItem image delete response item
+// swagger:model ImageDeleteResponseItem
+type ImageDeleteResponseItem struct {
+
+ // The image ID of an image that was deleted
+ Deleted string `json:"Deleted,omitempty"`
+
+ // The image ID of an image that was untagged
+ Untagged string `json:"Untagged,omitempty"`
+}
diff --git a/unum/vendor/github.com/docker/docker/api/types/image_summary.go b/unum/vendor/github.com/docker/docker/api/types/image_summary.go
new file mode 100644
index 0000000..e145b3d
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/api/types/image_summary.go
@@ -0,0 +1,49 @@
+package types
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+// ImageSummary image summary
+// swagger:model ImageSummary
+type ImageSummary struct {
+
+ // containers
+ // Required: true
+ Containers int64 `json:"Containers"`
+
+ // created
+ // Required: true
+ Created int64 `json:"Created"`
+
+ // Id
+ // Required: true
+ ID string `json:"Id"`
+
+ // labels
+ // Required: true
+ Labels map[string]string `json:"Labels"`
+
+ // parent Id
+ // Required: true
+ ParentID string `json:"ParentId"`
+
+ // repo digests
+ // Required: true
+ RepoDigests []string `json:"RepoDigests"`
+
+ // repo tags
+ // Required: true
+ RepoTags []string `json:"RepoTags"`
+
+ // shared size
+ // Required: true
+ SharedSize int64 `json:"SharedSize"`
+
+ // size
+ // Required: true
+ Size int64 `json:"Size"`
+
+ // virtual size
+ // Required: true
+ VirtualSize int64 `json:"VirtualSize"`
+}
diff --git a/unum/vendor/github.com/docker/docker/api/types/mount/mount.go b/unum/vendor/github.com/docker/docker/api/types/mount/mount.go
new file mode 100644
index 0000000..b7d133c
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/api/types/mount/mount.go
@@ -0,0 +1,130 @@
+package mount
+
+import (
+ "os"
+)
+
+// Type represents the type of a mount.
+type Type string
+
+// Type constants
+const (
+ // TypeBind is the type for mounting host dir
+ TypeBind Type = "bind"
+ // TypeVolume is the type for remote storage volumes
+ TypeVolume Type = "volume"
+ // TypeTmpfs is the type for mounting tmpfs
+ TypeTmpfs Type = "tmpfs"
+ // TypeNamedPipe is the type for mounting Windows named pipes
+ TypeNamedPipe Type = "npipe"
+)
+
+// Mount represents a mount (volume).
+type Mount struct {
+ Type Type `json:",omitempty"`
+ // Source specifies the name of the mount. Depending on mount type, this
+ // may be a volume name or a host path, or even ignored.
+ // Source is not supported for tmpfs (must be an empty value)
+ Source string `json:",omitempty"`
+ Target string `json:",omitempty"`
+ ReadOnly bool `json:",omitempty"`
+ Consistency Consistency `json:",omitempty"`
+
+ BindOptions *BindOptions `json:",omitempty"`
+ VolumeOptions *VolumeOptions `json:",omitempty"`
+ TmpfsOptions *TmpfsOptions `json:",omitempty"`
+}
+
+// Propagation represents the propagation of a mount.
+type Propagation string
+
+const (
+ // PropagationRPrivate RPRIVATE
+ PropagationRPrivate Propagation = "rprivate"
+ // PropagationPrivate PRIVATE
+ PropagationPrivate Propagation = "private"
+ // PropagationRShared RSHARED
+ PropagationRShared Propagation = "rshared"
+ // PropagationShared SHARED
+ PropagationShared Propagation = "shared"
+ // PropagationRSlave RSLAVE
+ PropagationRSlave Propagation = "rslave"
+ // PropagationSlave SLAVE
+ PropagationSlave Propagation = "slave"
+)
+
+// Propagations is the list of all valid mount propagations
+var Propagations = []Propagation{
+ PropagationRPrivate,
+ PropagationPrivate,
+ PropagationRShared,
+ PropagationShared,
+ PropagationRSlave,
+ PropagationSlave,
+}
+
+// Consistency represents the consistency requirements of a mount.
+type Consistency string
+
+const (
+ // ConsistencyFull guarantees bind mount-like consistency
+ ConsistencyFull Consistency = "consistent"
+ // ConsistencyCached mounts can cache read data and FS structure
+ ConsistencyCached Consistency = "cached"
+ // ConsistencyDelegated mounts can cache read and written data and structure
+ ConsistencyDelegated Consistency = "delegated"
+ // ConsistencyDefault provides "consistent" behavior unless overridden
+ ConsistencyDefault Consistency = "default"
+)
+
+// BindOptions defines options specific to mounts of type "bind".
+type BindOptions struct {
+ Propagation Propagation `json:",omitempty"`
+}
+
+// VolumeOptions represents the options for a mount of type volume.
+type VolumeOptions struct {
+ NoCopy bool `json:",omitempty"`
+ Labels map[string]string `json:",omitempty"`
+ DriverConfig *Driver `json:",omitempty"`
+}
+
+// Driver represents a volume driver.
+type Driver struct {
+ Name string `json:",omitempty"`
+ Options map[string]string `json:",omitempty"`
+}
+
+// TmpfsOptions defines options specific to mounts of type "tmpfs".
+type TmpfsOptions struct {
+ // Size sets the size of the tmpfs, in bytes.
+ //
+ // This will be converted to an operating system specific value
+ // depending on the host. For example, on linux, it will be converted to
+ // use a 'k', 'm' or 'g' syntax. BSD, though not widely supported with
+ // docker, uses a straight byte value.
+ //
+ // Percentages are not supported.
+ SizeBytes int64 `json:",omitempty"`
+ // Mode of the tmpfs upon creation
+ Mode os.FileMode `json:",omitempty"`
+
+ // TODO(stevvooe): There are several more tmpfs flags, specified in the
+ // daemon, that are accepted. Only the most basic are added for now.
+ //
+ // From docker/docker/pkg/mount/flags.go:
+ //
+ // var validFlags = map[string]bool{
+ // "": true,
+ // "size": true, X
+ // "mode": true, X
+ // "uid": true,
+ // "gid": true,
+ // "nr_inodes": true,
+ // "nr_blocks": true,
+ // "mpol": true,
+ // }
+ //
+ // Some of these may be straightforward to add, but others, such as
+ // uid/gid have implications in a clustered system.
+}
diff --git a/unum/vendor/github.com/docker/docker/api/types/network/network.go b/unum/vendor/github.com/docker/docker/api/types/network/network.go
new file mode 100644
index 0000000..7c7dbac
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/api/types/network/network.go
@@ -0,0 +1,108 @@
+package network
+
+// Address represents an IP address
+type Address struct {
+ Addr string
+ PrefixLen int
+}
+
+// IPAM represents IP Address Management
+type IPAM struct {
+ Driver string
+ Options map[string]string //Per network IPAM driver options
+ Config []IPAMConfig
+}
+
+// IPAMConfig represents IPAM configurations
+type IPAMConfig struct {
+ Subnet string `json:",omitempty"`
+ IPRange string `json:",omitempty"`
+ Gateway string `json:",omitempty"`
+ AuxAddress map[string]string `json:"AuxiliaryAddresses,omitempty"`
+}
+
+// EndpointIPAMConfig represents IPAM configurations for the endpoint
+type EndpointIPAMConfig struct {
+ IPv4Address string `json:",omitempty"`
+ IPv6Address string `json:",omitempty"`
+ LinkLocalIPs []string `json:",omitempty"`
+}
+
+// Copy makes a copy of the endpoint ipam config
+func (cfg *EndpointIPAMConfig) Copy() *EndpointIPAMConfig {
+ cfgCopy := *cfg
+ cfgCopy.LinkLocalIPs = make([]string, 0, len(cfg.LinkLocalIPs))
+ cfgCopy.LinkLocalIPs = append(cfgCopy.LinkLocalIPs, cfg.LinkLocalIPs...)
+ return &cfgCopy
+}
+
+// PeerInfo represents one peer of an overlay network
+type PeerInfo struct {
+ Name string
+ IP string
+}
+
+// EndpointSettings stores the network endpoint details
+type EndpointSettings struct {
+ // Configurations
+ IPAMConfig *EndpointIPAMConfig
+ Links []string
+ Aliases []string
+ // Operational data
+ NetworkID string
+ EndpointID string
+ Gateway string
+ IPAddress string
+ IPPrefixLen int
+ IPv6Gateway string
+ GlobalIPv6Address string
+ GlobalIPv6PrefixLen int
+ MacAddress string
+ DriverOpts map[string]string
+}
+
+// Task carries the information about one backend task
+type Task struct {
+ Name string
+ EndpointID string
+ EndpointIP string
+ Info map[string]string
+}
+
+// ServiceInfo represents service parameters with the list of service's tasks
+type ServiceInfo struct {
+ VIP string
+ Ports []string
+ LocalLBIndex int
+ Tasks []Task
+}
+
+// Copy makes a deep copy of `EndpointSettings`
+func (es *EndpointSettings) Copy() *EndpointSettings {
+ epCopy := *es
+ if es.IPAMConfig != nil {
+ epCopy.IPAMConfig = es.IPAMConfig.Copy()
+ }
+
+ if es.Links != nil {
+ links := make([]string, 0, len(es.Links))
+ epCopy.Links = append(links, es.Links...)
+ }
+
+ if es.Aliases != nil {
+ aliases := make([]string, 0, len(es.Aliases))
+ epCopy.Aliases = append(aliases, es.Aliases...)
+ }
+ return &epCopy
+}
+
+// NetworkingConfig represents the container's networking configuration for each of its interfaces
+// Carries the networking configs specified in the `docker run` and `docker network connect` commands
+type NetworkingConfig struct {
+ EndpointsConfig map[string]*EndpointSettings // Endpoint configs for each connecting network
+}
+
+// ConfigReference specifies the source which provides a network's configuration
+type ConfigReference struct {
+ Network string
+}
diff --git a/unum/vendor/github.com/docker/docker/api/types/plugin.go b/unum/vendor/github.com/docker/docker/api/types/plugin.go
new file mode 100644
index 0000000..cab333e
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/api/types/plugin.go
@@ -0,0 +1,200 @@
+package types
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+// Plugin A plugin for the Engine API
+// swagger:model Plugin
+type Plugin struct {
+
+ // config
+ // Required: true
+ Config PluginConfig `json:"Config"`
+
+ // True if the plugin is running. False if the plugin is not running, only installed.
+ // Required: true
+ Enabled bool `json:"Enabled"`
+
+ // Id
+ ID string `json:"Id,omitempty"`
+
+ // name
+ // Required: true
+ Name string `json:"Name"`
+
+ // plugin remote reference used to push/pull the plugin
+ PluginReference string `json:"PluginReference,omitempty"`
+
+ // settings
+ // Required: true
+ Settings PluginSettings `json:"Settings"`
+}
+
+// PluginConfig The config of a plugin.
+// swagger:model PluginConfig
+type PluginConfig struct {
+
+ // args
+ // Required: true
+ Args PluginConfigArgs `json:"Args"`
+
+ // description
+ // Required: true
+ Description string `json:"Description"`
+
+ // Docker Version used to create the plugin
+ DockerVersion string `json:"DockerVersion,omitempty"`
+
+ // documentation
+ // Required: true
+ Documentation string `json:"Documentation"`
+
+ // entrypoint
+ // Required: true
+ Entrypoint []string `json:"Entrypoint"`
+
+ // env
+ // Required: true
+ Env []PluginEnv `json:"Env"`
+
+ // interface
+ // Required: true
+ Interface PluginConfigInterface `json:"Interface"`
+
+ // ipc host
+ // Required: true
+ IpcHost bool `json:"IpcHost"`
+
+ // linux
+ // Required: true
+ Linux PluginConfigLinux `json:"Linux"`
+
+ // mounts
+ // Required: true
+ Mounts []PluginMount `json:"Mounts"`
+
+ // network
+ // Required: true
+ Network PluginConfigNetwork `json:"Network"`
+
+ // pid host
+ // Required: true
+ PidHost bool `json:"PidHost"`
+
+ // propagated mount
+ // Required: true
+ PropagatedMount string `json:"PropagatedMount"`
+
+ // user
+ User PluginConfigUser `json:"User,omitempty"`
+
+ // work dir
+ // Required: true
+ WorkDir string `json:"WorkDir"`
+
+ // rootfs
+ Rootfs *PluginConfigRootfs `json:"rootfs,omitempty"`
+}
+
+// PluginConfigArgs plugin config args
+// swagger:model PluginConfigArgs
+type PluginConfigArgs struct {
+
+ // description
+ // Required: true
+ Description string `json:"Description"`
+
+ // name
+ // Required: true
+ Name string `json:"Name"`
+
+ // settable
+ // Required: true
+ Settable []string `json:"Settable"`
+
+ // value
+ // Required: true
+ Value []string `json:"Value"`
+}
+
+// PluginConfigInterface The interface between Docker and the plugin
+// swagger:model PluginConfigInterface
+type PluginConfigInterface struct {
+
+ // socket
+ // Required: true
+ Socket string `json:"Socket"`
+
+ // types
+ // Required: true
+ Types []PluginInterfaceType `json:"Types"`
+}
+
+// PluginConfigLinux plugin config linux
+// swagger:model PluginConfigLinux
+type PluginConfigLinux struct {
+
+ // allow all devices
+ // Required: true
+ AllowAllDevices bool `json:"AllowAllDevices"`
+
+ // capabilities
+ // Required: true
+ Capabilities []string `json:"Capabilities"`
+
+ // devices
+ // Required: true
+ Devices []PluginDevice `json:"Devices"`
+}
+
+// PluginConfigNetwork plugin config network
+// swagger:model PluginConfigNetwork
+type PluginConfigNetwork struct {
+
+ // type
+ // Required: true
+ Type string `json:"Type"`
+}
+
+// PluginConfigRootfs plugin config rootfs
+// swagger:model PluginConfigRootfs
+type PluginConfigRootfs struct {
+
+ // diff ids
+ DiffIds []string `json:"diff_ids"`
+
+ // type
+ Type string `json:"type,omitempty"`
+}
+
+// PluginConfigUser plugin config user
+// swagger:model PluginConfigUser
+type PluginConfigUser struct {
+
+ // g ID
+ GID uint32 `json:"GID,omitempty"`
+
+ // UID
+ UID uint32 `json:"UID,omitempty"`
+}
+
+// PluginSettings Settings that can be modified by users.
+// swagger:model PluginSettings
+type PluginSettings struct {
+
+ // args
+ // Required: true
+ Args []string `json:"Args"`
+
+ // devices
+ // Required: true
+ Devices []PluginDevice `json:"Devices"`
+
+ // env
+ // Required: true
+ Env []string `json:"Env"`
+
+ // mounts
+ // Required: true
+ Mounts []PluginMount `json:"Mounts"`
+}
diff --git a/unum/vendor/github.com/docker/docker/api/types/plugin_device.go b/unum/vendor/github.com/docker/docker/api/types/plugin_device.go
new file mode 100644
index 0000000..5699010
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/api/types/plugin_device.go
@@ -0,0 +1,25 @@
+package types
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+// PluginDevice plugin device
+// swagger:model PluginDevice
+type PluginDevice struct {
+
+ // description
+ // Required: true
+ Description string `json:"Description"`
+
+ // name
+ // Required: true
+ Name string `json:"Name"`
+
+ // path
+ // Required: true
+ Path *string `json:"Path"`
+
+ // settable
+ // Required: true
+ Settable []string `json:"Settable"`
+}
diff --git a/unum/vendor/github.com/docker/docker/api/types/plugin_env.go b/unum/vendor/github.com/docker/docker/api/types/plugin_env.go
new file mode 100644
index 0000000..32962dc
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/api/types/plugin_env.go
@@ -0,0 +1,25 @@
+package types
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+// PluginEnv plugin env
+// swagger:model PluginEnv
+type PluginEnv struct {
+
+ // description
+ // Required: true
+ Description string `json:"Description"`
+
+ // name
+ // Required: true
+ Name string `json:"Name"`
+
+ // settable
+ // Required: true
+ Settable []string `json:"Settable"`
+
+ // value
+ // Required: true
+ Value *string `json:"Value"`
+}
diff --git a/unum/vendor/github.com/docker/docker/api/types/plugin_interface_type.go b/unum/vendor/github.com/docker/docker/api/types/plugin_interface_type.go
new file mode 100644
index 0000000..c82f204
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/api/types/plugin_interface_type.go
@@ -0,0 +1,21 @@
+package types
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+// PluginInterfaceType plugin interface type
+// swagger:model PluginInterfaceType
+type PluginInterfaceType struct {
+
+ // capability
+ // Required: true
+ Capability string `json:"Capability"`
+
+ // prefix
+ // Required: true
+ Prefix string `json:"Prefix"`
+
+ // version
+ // Required: true
+ Version string `json:"Version"`
+}
diff --git a/unum/vendor/github.com/docker/docker/api/types/plugin_mount.go b/unum/vendor/github.com/docker/docker/api/types/plugin_mount.go
new file mode 100644
index 0000000..5c031cf
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/api/types/plugin_mount.go
@@ -0,0 +1,37 @@
+package types
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+// PluginMount plugin mount
+// swagger:model PluginMount
+type PluginMount struct {
+
+ // description
+ // Required: true
+ Description string `json:"Description"`
+
+ // destination
+ // Required: true
+ Destination string `json:"Destination"`
+
+ // name
+ // Required: true
+ Name string `json:"Name"`
+
+ // options
+ // Required: true
+ Options []string `json:"Options"`
+
+ // settable
+ // Required: true
+ Settable []string `json:"Settable"`
+
+ // source
+ // Required: true
+ Source *string `json:"Source"`
+
+ // type
+ // Required: true
+ Type string `json:"Type"`
+}
diff --git a/unum/vendor/github.com/docker/docker/api/types/plugin_responses.go b/unum/vendor/github.com/docker/docker/api/types/plugin_responses.go
new file mode 100644
index 0000000..18f743f
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/api/types/plugin_responses.go
@@ -0,0 +1,71 @@
+package types
+
+import (
+ "encoding/json"
+ "fmt"
+ "sort"
+)
+
+// PluginsListResponse contains the response for the Engine API
+type PluginsListResponse []*Plugin
+
+// UnmarshalJSON implements json.Unmarshaler for PluginInterfaceType
+func (t *PluginInterfaceType) UnmarshalJSON(p []byte) error {
+ versionIndex := len(p)
+ prefixIndex := 0
+ if len(p) < 2 || p[0] != '"' || p[len(p)-1] != '"' {
+ return fmt.Errorf("%q is not a plugin interface type", p)
+ }
+ p = p[1 : len(p)-1]
+loop:
+ for i, b := range p {
+ switch b {
+ case '.':
+ prefixIndex = i
+ case '/':
+ versionIndex = i
+ break loop
+ }
+ }
+ t.Prefix = string(p[:prefixIndex])
+ t.Capability = string(p[prefixIndex+1 : versionIndex])
+ if versionIndex < len(p) {
+ t.Version = string(p[versionIndex+1:])
+ }
+ return nil
+}
+
+// MarshalJSON implements json.Marshaler for PluginInterfaceType
+func (t *PluginInterfaceType) MarshalJSON() ([]byte, error) {
+ return json.Marshal(t.String())
+}
+
+// String implements fmt.Stringer for PluginInterfaceType
+func (t PluginInterfaceType) String() string {
+ return fmt.Sprintf("%s.%s/%s", t.Prefix, t.Capability, t.Version)
+}
+
+// PluginPrivilege describes a permission the user has to accept
+// upon installing a plugin.
+type PluginPrivilege struct {
+ Name string
+ Description string
+ Value []string
+}
+
+// PluginPrivileges is a list of PluginPrivilege
+type PluginPrivileges []PluginPrivilege
+
+func (s PluginPrivileges) Len() int {
+ return len(s)
+}
+
+func (s PluginPrivileges) Less(i, j int) bool {
+ return s[i].Name < s[j].Name
+}
+
+func (s PluginPrivileges) Swap(i, j int) {
+ sort.Strings(s[i].Value)
+ sort.Strings(s[j].Value)
+ s[i], s[j] = s[j], s[i]
+}
diff --git a/unum/vendor/github.com/docker/docker/api/types/port.go b/unum/vendor/github.com/docker/docker/api/types/port.go
new file mode 100644
index 0000000..ad52d46
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/api/types/port.go
@@ -0,0 +1,23 @@
+package types
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+// Port An open port on a container
+// swagger:model Port
+type Port struct {
+
+ // IP
+ IP string `json:"IP,omitempty"`
+
+ // Port on the container
+ // Required: true
+ PrivatePort uint16 `json:"PrivatePort"`
+
+ // Port exposed on the host
+ PublicPort uint16 `json:"PublicPort,omitempty"`
+
+ // type
+ // Required: true
+ Type string `json:"Type"`
+}
diff --git a/unum/vendor/github.com/docker/docker/api/types/registry/authenticate.go b/unum/vendor/github.com/docker/docker/api/types/registry/authenticate.go
new file mode 100644
index 0000000..42cac44
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/api/types/registry/authenticate.go
@@ -0,0 +1,21 @@
+package registry
+
+// ----------------------------------------------------------------------------
+// DO NOT EDIT THIS FILE
+// This file was generated by `swagger generate operation`
+//
+// See hack/generate-swagger-api.sh
+// ----------------------------------------------------------------------------
+
+// AuthenticateOKBody authenticate o k body
+// swagger:model AuthenticateOKBody
+type AuthenticateOKBody struct {
+
+ // An opaque token used to authenticate a user after a successful login
+ // Required: true
+ IdentityToken string `json:"IdentityToken"`
+
+ // The status of the authentication
+ // Required: true
+ Status string `json:"Status"`
+}
diff --git a/unum/vendor/github.com/docker/docker/api/types/registry/registry.go b/unum/vendor/github.com/docker/docker/api/types/registry/registry.go
new file mode 100644
index 0000000..b98a943
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/api/types/registry/registry.go
@@ -0,0 +1,119 @@
+package registry
+
+import (
+ "encoding/json"
+ "net"
+
+ "github.com/opencontainers/image-spec/specs-go/v1"
+)
+
+// ServiceConfig stores daemon registry services configuration.
+type ServiceConfig struct {
+ AllowNondistributableArtifactsCIDRs []*NetIPNet
+ AllowNondistributableArtifactsHostnames []string
+ InsecureRegistryCIDRs []*NetIPNet `json:"InsecureRegistryCIDRs"`
+ IndexConfigs map[string]*IndexInfo `json:"IndexConfigs"`
+ Mirrors []string
+}
+
+// NetIPNet is the net.IPNet type, which can be marshalled and
+// unmarshalled to JSON
+type NetIPNet net.IPNet
+
+// String returns the CIDR notation of ipnet
+func (ipnet *NetIPNet) String() string {
+ return (*net.IPNet)(ipnet).String()
+}
+
+// MarshalJSON returns the JSON representation of the IPNet
+func (ipnet *NetIPNet) MarshalJSON() ([]byte, error) {
+ return json.Marshal((*net.IPNet)(ipnet).String())
+}
+
+// UnmarshalJSON sets the IPNet from a byte array of JSON
+func (ipnet *NetIPNet) UnmarshalJSON(b []byte) (err error) {
+ var ipnetStr string
+ if err = json.Unmarshal(b, &ipnetStr); err == nil {
+ var cidr *net.IPNet
+ if _, cidr, err = net.ParseCIDR(ipnetStr); err == nil {
+ *ipnet = NetIPNet(*cidr)
+ }
+ }
+ return
+}
+
+// IndexInfo contains information about a registry
+//
+// RepositoryInfo Examples:
+// {
+// "Index" : {
+// "Name" : "docker.io",
+// "Mirrors" : ["https://registry-2.docker.io/v1/", "https://registry-3.docker.io/v1/"],
+// "Secure" : true,
+// "Official" : true,
+// },
+// "RemoteName" : "library/debian",
+// "LocalName" : "debian",
+// "CanonicalName" : "docker.io/debian"
+// "Official" : true,
+// }
+//
+// {
+// "Index" : {
+// "Name" : "127.0.0.1:5000",
+// "Mirrors" : [],
+// "Secure" : false,
+// "Official" : false,
+// },
+// "RemoteName" : "user/repo",
+// "LocalName" : "127.0.0.1:5000/user/repo",
+// "CanonicalName" : "127.0.0.1:5000/user/repo",
+// "Official" : false,
+// }
+type IndexInfo struct {
+ // Name is the name of the registry, such as "docker.io"
+ Name string
+ // Mirrors is a list of mirrors, expressed as URIs
+ Mirrors []string
+ // Secure is set to false if the registry is part of the list of
+ // insecure registries. Insecure registries accept HTTP and/or accept
+ // HTTPS with certificates from unknown CAs.
+ Secure bool
+ // Official indicates whether this is an official registry
+ Official bool
+}
+
+// SearchResult describes a search result returned from a registry
+type SearchResult struct {
+ // StarCount indicates the number of stars this repository has
+ StarCount int `json:"star_count"`
+ // IsOfficial is true if the result is from an official repository.
+ IsOfficial bool `json:"is_official"`
+ // Name is the name of the repository
+ Name string `json:"name"`
+ // IsAutomated indicates whether the result is automated
+ IsAutomated bool `json:"is_automated"`
+ // Description is a textual description of the repository
+ Description string `json:"description"`
+}
+
+// SearchResults lists a collection search results returned from a registry
+type SearchResults struct {
+ // Query contains the query string that generated the search results
+ Query string `json:"query"`
+ // NumResults indicates the number of results the query returned
+ NumResults int `json:"num_results"`
+ // Results is a slice containing the actual results for the search
+ Results []SearchResult `json:"results"`
+}
+
+// DistributionInspect describes the result obtained from contacting the
+// registry to retrieve image metadata
+type DistributionInspect struct {
+ // Descriptor contains information about the manifest, including
+ // the content addressable digest
+ Descriptor v1.Descriptor
+ // Platforms contains the list of platforms supported by the image,
+ // obtained by parsing the manifest
+ Platforms []v1.Platform
+}
diff --git a/unum/vendor/github.com/docker/docker/api/types/seccomp.go b/unum/vendor/github.com/docker/docker/api/types/seccomp.go
new file mode 100644
index 0000000..7d62c9a
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/api/types/seccomp.go
@@ -0,0 +1,93 @@
+package types
+
+// Seccomp represents the config for a seccomp profile for syscall restriction.
+type Seccomp struct {
+ DefaultAction Action `json:"defaultAction"`
+ // Architectures is kept to maintain backward compatibility with the old
+ // seccomp profile.
+ Architectures []Arch `json:"architectures,omitempty"`
+ ArchMap []Architecture `json:"archMap,omitempty"`
+ Syscalls []*Syscall `json:"syscalls"`
+}
+
+// Architecture is used to represent a specific architecture
+// and its sub-architectures
+type Architecture struct {
+ Arch Arch `json:"architecture"`
+ SubArches []Arch `json:"subArchitectures"`
+}
+
+// Arch used for architectures
+type Arch string
+
+// Additional architectures permitted to be used for system calls
+// By default only the native architecture of the kernel is permitted
+const (
+ ArchX86 Arch = "SCMP_ARCH_X86"
+ ArchX86_64 Arch = "SCMP_ARCH_X86_64"
+ ArchX32 Arch = "SCMP_ARCH_X32"
+ ArchARM Arch = "SCMP_ARCH_ARM"
+ ArchAARCH64 Arch = "SCMP_ARCH_AARCH64"
+ ArchMIPS Arch = "SCMP_ARCH_MIPS"
+ ArchMIPS64 Arch = "SCMP_ARCH_MIPS64"
+ ArchMIPS64N32 Arch = "SCMP_ARCH_MIPS64N32"
+ ArchMIPSEL Arch = "SCMP_ARCH_MIPSEL"
+ ArchMIPSEL64 Arch = "SCMP_ARCH_MIPSEL64"
+ ArchMIPSEL64N32 Arch = "SCMP_ARCH_MIPSEL64N32"
+ ArchPPC Arch = "SCMP_ARCH_PPC"
+ ArchPPC64 Arch = "SCMP_ARCH_PPC64"
+ ArchPPC64LE Arch = "SCMP_ARCH_PPC64LE"
+ ArchS390 Arch = "SCMP_ARCH_S390"
+ ArchS390X Arch = "SCMP_ARCH_S390X"
+)
+
+// Action taken upon Seccomp rule match
+type Action string
+
+// Define actions for Seccomp rules
+const (
+ ActKill Action = "SCMP_ACT_KILL"
+ ActTrap Action = "SCMP_ACT_TRAP"
+ ActErrno Action = "SCMP_ACT_ERRNO"
+ ActTrace Action = "SCMP_ACT_TRACE"
+ ActAllow Action = "SCMP_ACT_ALLOW"
+)
+
+// Operator used to match syscall arguments in Seccomp
+type Operator string
+
+// Define operators for syscall arguments in Seccomp
+const (
+ OpNotEqual Operator = "SCMP_CMP_NE"
+ OpLessThan Operator = "SCMP_CMP_LT"
+ OpLessEqual Operator = "SCMP_CMP_LE"
+ OpEqualTo Operator = "SCMP_CMP_EQ"
+ OpGreaterEqual Operator = "SCMP_CMP_GE"
+ OpGreaterThan Operator = "SCMP_CMP_GT"
+ OpMaskedEqual Operator = "SCMP_CMP_MASKED_EQ"
+)
+
+// Arg used for matching specific syscall arguments in Seccomp
+type Arg struct {
+ Index uint `json:"index"`
+ Value uint64 `json:"value"`
+ ValueTwo uint64 `json:"valueTwo"`
+ Op Operator `json:"op"`
+}
+
+// Filter is used to conditionally apply Seccomp rules
+type Filter struct {
+ Caps []string `json:"caps,omitempty"`
+ Arches []string `json:"arches,omitempty"`
+}
+
+// Syscall is used to match a group of syscalls in Seccomp
+type Syscall struct {
+ Name string `json:"name,omitempty"`
+ Names []string `json:"names,omitempty"`
+ Action Action `json:"action"`
+ Args []*Arg `json:"args"`
+ Comment string `json:"comment"`
+ Includes Filter `json:"includes"`
+ Excludes Filter `json:"excludes"`
+}
diff --git a/unum/vendor/github.com/docker/docker/api/types/service_update_response.go b/unum/vendor/github.com/docker/docker/api/types/service_update_response.go
new file mode 100644
index 0000000..74ea64b
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/api/types/service_update_response.go
@@ -0,0 +1,12 @@
+package types
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+// ServiceUpdateResponse service update response
+// swagger:model ServiceUpdateResponse
+type ServiceUpdateResponse struct {
+
+ // Optional warning messages
+ Warnings []string `json:"Warnings"`
+}
diff --git a/unum/vendor/github.com/docker/docker/api/types/stats.go b/unum/vendor/github.com/docker/docker/api/types/stats.go
new file mode 100644
index 0000000..7ca76a5
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/api/types/stats.go
@@ -0,0 +1,181 @@
+// Package types is used for API stability in the types and response to the
+// consumers of the API stats endpoint.
+package types
+
+import "time"
+
+// ThrottlingData stores CPU throttling stats of one running container.
+// Not used on Windows.
+type ThrottlingData struct {
+ // Number of periods with throttling active
+ Periods uint64 `json:"periods"`
+ // Number of periods when the container hits its throttling limit.
+ ThrottledPeriods uint64 `json:"throttled_periods"`
+ // Aggregate time the container was throttled for in nanoseconds.
+ ThrottledTime uint64 `json:"throttled_time"`
+}
+
+// CPUUsage stores All CPU stats aggregated since container inception.
+type CPUUsage struct {
+ // Total CPU time consumed.
+ // Units: nanoseconds (Linux)
+ // Units: 100's of nanoseconds (Windows)
+ TotalUsage uint64 `json:"total_usage"`
+
+ // Total CPU time consumed per core (Linux). Not used on Windows.
+ // Units: nanoseconds.
+ PercpuUsage []uint64 `json:"percpu_usage,omitempty"`
+
+ // Time spent by tasks of the cgroup in kernel mode (Linux).
+ // Time spent by all container processes in kernel mode (Windows).
+ // Units: nanoseconds (Linux).
+ // Units: 100's of nanoseconds (Windows). Not populated for Hyper-V Containers.
+ UsageInKernelmode uint64 `json:"usage_in_kernelmode"`
+
+ // Time spent by tasks of the cgroup in user mode (Linux).
+ // Time spent by all container processes in user mode (Windows).
+ // Units: nanoseconds (Linux).
+ // Units: 100's of nanoseconds (Windows). Not populated for Hyper-V Containers
+ UsageInUsermode uint64 `json:"usage_in_usermode"`
+}
+
+// CPUStats aggregates and wraps all CPU related info of container
+type CPUStats struct {
+ // CPU Usage. Linux and Windows.
+ CPUUsage CPUUsage `json:"cpu_usage"`
+
+ // System Usage. Linux only.
+ SystemUsage uint64 `json:"system_cpu_usage,omitempty"`
+
+ // Online CPUs. Linux only.
+ OnlineCPUs uint32 `json:"online_cpus,omitempty"`
+
+ // Throttling Data. Linux only.
+ ThrottlingData ThrottlingData `json:"throttling_data,omitempty"`
+}
+
+// MemoryStats aggregates all memory stats since container inception on Linux.
+// Windows returns stats for commit and private working set only.
+type MemoryStats struct {
+ // Linux Memory Stats
+
+ // current res_counter usage for memory
+ Usage uint64 `json:"usage,omitempty"`
+ // maximum usage ever recorded.
+ MaxUsage uint64 `json:"max_usage,omitempty"`
+ // TODO(vishh): Export these as stronger types.
+ // all the stats exported via memory.stat.
+ Stats map[string]uint64 `json:"stats,omitempty"`
+ // number of times memory usage hits limits.
+ Failcnt uint64 `json:"failcnt,omitempty"`
+ Limit uint64 `json:"limit,omitempty"`
+
+ // Windows Memory Stats
+ // See https://technet.microsoft.com/en-us/magazine/ff382715.aspx
+
+ // committed bytes
+ Commit uint64 `json:"commitbytes,omitempty"`
+ // peak committed bytes
+ CommitPeak uint64 `json:"commitpeakbytes,omitempty"`
+ // private working set
+ PrivateWorkingSet uint64 `json:"privateworkingset,omitempty"`
+}
+
+// BlkioStatEntry is one small entity to store a piece of Blkio stats
+// Not used on Windows.
+type BlkioStatEntry struct {
+ Major uint64 `json:"major"`
+ Minor uint64 `json:"minor"`
+ Op string `json:"op"`
+ Value uint64 `json:"value"`
+}
+
+// BlkioStats stores All IO service stats for data read and write.
+// This is a Linux specific structure as the differences between expressing
+// block I/O on Windows and Linux are sufficiently significant to make
+// little sense attempting to morph into a combined structure.
+type BlkioStats struct {
+ // number of bytes transferred to and from the block device
+ IoServiceBytesRecursive []BlkioStatEntry `json:"io_service_bytes_recursive"`
+ IoServicedRecursive []BlkioStatEntry `json:"io_serviced_recursive"`
+ IoQueuedRecursive []BlkioStatEntry `json:"io_queue_recursive"`
+ IoServiceTimeRecursive []BlkioStatEntry `json:"io_service_time_recursive"`
+ IoWaitTimeRecursive []BlkioStatEntry `json:"io_wait_time_recursive"`
+ IoMergedRecursive []BlkioStatEntry `json:"io_merged_recursive"`
+ IoTimeRecursive []BlkioStatEntry `json:"io_time_recursive"`
+ SectorsRecursive []BlkioStatEntry `json:"sectors_recursive"`
+}
+
+// StorageStats is the disk I/O stats for read/write on Windows.
+type StorageStats struct {
+ ReadCountNormalized uint64 `json:"read_count_normalized,omitempty"`
+ ReadSizeBytes uint64 `json:"read_size_bytes,omitempty"`
+ WriteCountNormalized uint64 `json:"write_count_normalized,omitempty"`
+ WriteSizeBytes uint64 `json:"write_size_bytes,omitempty"`
+}
+
+// NetworkStats aggregates the network stats of one container
+type NetworkStats struct {
+ // Bytes received. Windows and Linux.
+ RxBytes uint64 `json:"rx_bytes"`
+ // Packets received. Windows and Linux.
+ RxPackets uint64 `json:"rx_packets"`
+ // Received errors. Not used on Windows. Note that we dont `omitempty` this
+ // field as it is expected in the >=v1.21 API stats structure.
+ RxErrors uint64 `json:"rx_errors"`
+ // Incoming packets dropped. Windows and Linux.
+ RxDropped uint64 `json:"rx_dropped"`
+ // Bytes sent. Windows and Linux.
+ TxBytes uint64 `json:"tx_bytes"`
+ // Packets sent. Windows and Linux.
+ TxPackets uint64 `json:"tx_packets"`
+ // Sent errors. Not used on Windows. Note that we dont `omitempty` this
+ // field as it is expected in the >=v1.21 API stats structure.
+ TxErrors uint64 `json:"tx_errors"`
+ // Outgoing packets dropped. Windows and Linux.
+ TxDropped uint64 `json:"tx_dropped"`
+ // Endpoint ID. Not used on Linux.
+ EndpointID string `json:"endpoint_id,omitempty"`
+ // Instance ID. Not used on Linux.
+ InstanceID string `json:"instance_id,omitempty"`
+}
+
+// PidsStats contains the stats of a container's pids
+type PidsStats struct {
+ // Current is the number of pids in the cgroup
+ Current uint64 `json:"current,omitempty"`
+ // Limit is the hard limit on the number of pids in the cgroup.
+ // A "Limit" of 0 means that there is no limit.
+ Limit uint64 `json:"limit,omitempty"`
+}
+
+// Stats is Ultimate struct aggregating all types of stats of one container
+type Stats struct {
+ // Common stats
+ Read time.Time `json:"read"`
+ PreRead time.Time `json:"preread"`
+
+ // Linux specific stats, not populated on Windows.
+ PidsStats PidsStats `json:"pids_stats,omitempty"`
+ BlkioStats BlkioStats `json:"blkio_stats,omitempty"`
+
+ // Windows specific stats, not populated on Linux.
+ NumProcs uint32 `json:"num_procs"`
+ StorageStats StorageStats `json:"storage_stats,omitempty"`
+
+ // Shared stats
+ CPUStats CPUStats `json:"cpu_stats,omitempty"`
+ PreCPUStats CPUStats `json:"precpu_stats,omitempty"` // "Pre"="Previous"
+ MemoryStats MemoryStats `json:"memory_stats,omitempty"`
+}
+
+// StatsJSON is newly used Networks
+type StatsJSON struct {
+ Stats
+
+ Name string `json:"name,omitempty"`
+ ID string `json:"id,omitempty"`
+
+ // Networks request version >=1.21
+ Networks map[string]NetworkStats `json:"networks,omitempty"`
+}
diff --git a/unum/vendor/github.com/docker/docker/api/types/strslice/strslice.go b/unum/vendor/github.com/docker/docker/api/types/strslice/strslice.go
new file mode 100644
index 0000000..bad493f
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/api/types/strslice/strslice.go
@@ -0,0 +1,30 @@
+package strslice
+
+import "encoding/json"
+
+// StrSlice represents a string or an array of strings.
+// We need to override the json decoder to accept both options.
+type StrSlice []string
+
+// UnmarshalJSON decodes the byte slice whether it's a string or an array of
+// strings. This method is needed to implement json.Unmarshaler.
+func (e *StrSlice) UnmarshalJSON(b []byte) error {
+ if len(b) == 0 {
+ // With no input, we preserve the existing value by returning nil and
+ // leaving the target alone. This allows defining default values for
+ // the type.
+ return nil
+ }
+
+ p := make([]string, 0, 1)
+ if err := json.Unmarshal(b, &p); err != nil {
+ var s string
+ if err := json.Unmarshal(b, &s); err != nil {
+ return err
+ }
+ p = append(p, s)
+ }
+
+ *e = p
+ return nil
+}
diff --git a/unum/vendor/github.com/docker/docker/api/types/swarm/common.go b/unum/vendor/github.com/docker/docker/api/types/swarm/common.go
new file mode 100644
index 0000000..2834cf2
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/api/types/swarm/common.go
@@ -0,0 +1,40 @@
+package swarm
+
+import "time"
+
+// Version represents the internal object version.
+type Version struct {
+ Index uint64 `json:",omitempty"`
+}
+
+// Meta is a base object inherited by most of the other once.
+type Meta struct {
+ Version Version `json:",omitempty"`
+ CreatedAt time.Time `json:",omitempty"`
+ UpdatedAt time.Time `json:",omitempty"`
+}
+
+// Annotations represents how to describe an object.
+type Annotations struct {
+ Name string `json:",omitempty"`
+ Labels map[string]string `json:"Labels"`
+}
+
+// Driver represents a driver (network, logging, secrets backend).
+type Driver struct {
+ Name string `json:",omitempty"`
+ Options map[string]string `json:",omitempty"`
+}
+
+// TLSInfo represents the TLS information about what CA certificate is trusted,
+// and who the issuer for a TLS certificate is
+type TLSInfo struct {
+ // TrustRoot is the trusted CA root certificate in PEM format
+ TrustRoot string `json:",omitempty"`
+
+ // CertIssuer is the raw subject bytes of the issuer
+ CertIssuerSubject []byte `json:",omitempty"`
+
+ // CertIssuerPublicKey is the raw public key bytes of the issuer
+ CertIssuerPublicKey []byte `json:",omitempty"`
+}
diff --git a/unum/vendor/github.com/docker/docker/api/types/swarm/config.go b/unum/vendor/github.com/docker/docker/api/types/swarm/config.go
new file mode 100644
index 0000000..0fb021c
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/api/types/swarm/config.go
@@ -0,0 +1,31 @@
+package swarm
+
+import "os"
+
+// Config represents a config.
+type Config struct {
+ ID string
+ Meta
+ Spec ConfigSpec
+}
+
+// ConfigSpec represents a config specification from a config in swarm
+type ConfigSpec struct {
+ Annotations
+ Data []byte `json:",omitempty"`
+}
+
+// ConfigReferenceFileTarget is a file target in a config reference
+type ConfigReferenceFileTarget struct {
+ Name string
+ UID string
+ GID string
+ Mode os.FileMode
+}
+
+// ConfigReference is a reference to a config in swarm
+type ConfigReference struct {
+ File *ConfigReferenceFileTarget
+ ConfigID string
+ ConfigName string
+}
diff --git a/unum/vendor/github.com/docker/docker/api/types/swarm/container.go b/unum/vendor/github.com/docker/docker/api/types/swarm/container.go
new file mode 100644
index 0000000..6f8b45f
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/api/types/swarm/container.go
@@ -0,0 +1,72 @@
+package swarm
+
+import (
+ "time"
+
+ "github.com/docker/docker/api/types/container"
+ "github.com/docker/docker/api/types/mount"
+)
+
+// DNSConfig specifies DNS related configurations in resolver configuration file (resolv.conf)
+// Detailed documentation is available in:
+// http://man7.org/linux/man-pages/man5/resolv.conf.5.html
+// `nameserver`, `search`, `options` have been supported.
+// TODO: `domain` is not supported yet.
+type DNSConfig struct {
+ // Nameservers specifies the IP addresses of the name servers
+ Nameservers []string `json:",omitempty"`
+ // Search specifies the search list for host-name lookup
+ Search []string `json:",omitempty"`
+ // Options allows certain internal resolver variables to be modified
+ Options []string `json:",omitempty"`
+}
+
+// SELinuxContext contains the SELinux labels of the container.
+type SELinuxContext struct {
+ Disable bool
+
+ User string
+ Role string
+ Type string
+ Level string
+}
+
+// CredentialSpec for managed service account (Windows only)
+type CredentialSpec struct {
+ File string
+ Registry string
+}
+
+// Privileges defines the security options for the container.
+type Privileges struct {
+ CredentialSpec *CredentialSpec
+ SELinuxContext *SELinuxContext
+}
+
+// ContainerSpec represents the spec of a container.
+type ContainerSpec struct {
+ Image string `json:",omitempty"`
+ Labels map[string]string `json:",omitempty"`
+ Command []string `json:",omitempty"`
+ Args []string `json:",omitempty"`
+ Hostname string `json:",omitempty"`
+ Env []string `json:",omitempty"`
+ Dir string `json:",omitempty"`
+ User string `json:",omitempty"`
+ Groups []string `json:",omitempty"`
+ Privileges *Privileges `json:",omitempty"`
+ StopSignal string `json:",omitempty"`
+ TTY bool `json:",omitempty"`
+ OpenStdin bool `json:",omitempty"`
+ ReadOnly bool `json:",omitempty"`
+ Mounts []mount.Mount `json:",omitempty"`
+ StopGracePeriod *time.Duration `json:",omitempty"`
+ Healthcheck *container.HealthConfig `json:",omitempty"`
+ // The format of extra hosts on swarmkit is specified in:
+ // http://man7.org/linux/man-pages/man5/hosts.5.html
+ // IP_address canonical_hostname [aliases...]
+ Hosts []string `json:",omitempty"`
+ DNSConfig *DNSConfig `json:",omitempty"`
+ Secrets []*SecretReference `json:",omitempty"`
+ Configs []*ConfigReference `json:",omitempty"`
+}
diff --git a/unum/vendor/github.com/docker/docker/api/types/swarm/network.go b/unum/vendor/github.com/docker/docker/api/types/swarm/network.go
new file mode 100644
index 0000000..97c484e
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/api/types/swarm/network.go
@@ -0,0 +1,119 @@
+package swarm
+
+import (
+ "github.com/docker/docker/api/types/network"
+)
+
+// Endpoint represents an endpoint.
+type Endpoint struct {
+ Spec EndpointSpec `json:",omitempty"`
+ Ports []PortConfig `json:",omitempty"`
+ VirtualIPs []EndpointVirtualIP `json:",omitempty"`
+}
+
+// EndpointSpec represents the spec of an endpoint.
+type EndpointSpec struct {
+ Mode ResolutionMode `json:",omitempty"`
+ Ports []PortConfig `json:",omitempty"`
+}
+
+// ResolutionMode represents a resolution mode.
+type ResolutionMode string
+
+const (
+ // ResolutionModeVIP VIP
+ ResolutionModeVIP ResolutionMode = "vip"
+ // ResolutionModeDNSRR DNSRR
+ ResolutionModeDNSRR ResolutionMode = "dnsrr"
+)
+
+// PortConfig represents the config of a port.
+type PortConfig struct {
+ Name string `json:",omitempty"`
+ Protocol PortConfigProtocol `json:",omitempty"`
+ // TargetPort is the port inside the container
+ TargetPort uint32 `json:",omitempty"`
+ // PublishedPort is the port on the swarm hosts
+ PublishedPort uint32 `json:",omitempty"`
+ // PublishMode is the mode in which port is published
+ PublishMode PortConfigPublishMode `json:",omitempty"`
+}
+
+// PortConfigPublishMode represents the mode in which the port is to
+// be published.
+type PortConfigPublishMode string
+
+const (
+ // PortConfigPublishModeIngress is used for ports published
+ // for ingress load balancing using routing mesh.
+ PortConfigPublishModeIngress PortConfigPublishMode = "ingress"
+ // PortConfigPublishModeHost is used for ports published
+ // for direct host level access on the host where the task is running.
+ PortConfigPublishModeHost PortConfigPublishMode = "host"
+)
+
+// PortConfigProtocol represents the protocol of a port.
+type PortConfigProtocol string
+
+const (
+ // TODO(stevvooe): These should be used generally, not just for PortConfig.
+
+ // PortConfigProtocolTCP TCP
+ PortConfigProtocolTCP PortConfigProtocol = "tcp"
+ // PortConfigProtocolUDP UDP
+ PortConfigProtocolUDP PortConfigProtocol = "udp"
+)
+
+// EndpointVirtualIP represents the virtual ip of a port.
+type EndpointVirtualIP struct {
+ NetworkID string `json:",omitempty"`
+ Addr string `json:",omitempty"`
+}
+
+// Network represents a network.
+type Network struct {
+ ID string
+ Meta
+ Spec NetworkSpec `json:",omitempty"`
+ DriverState Driver `json:",omitempty"`
+ IPAMOptions *IPAMOptions `json:",omitempty"`
+}
+
+// NetworkSpec represents the spec of a network.
+type NetworkSpec struct {
+ Annotations
+ DriverConfiguration *Driver `json:",omitempty"`
+ IPv6Enabled bool `json:",omitempty"`
+ Internal bool `json:",omitempty"`
+ Attachable bool `json:",omitempty"`
+ Ingress bool `json:",omitempty"`
+ IPAMOptions *IPAMOptions `json:",omitempty"`
+ ConfigFrom *network.ConfigReference `json:",omitempty"`
+ Scope string `json:",omitempty"`
+}
+
+// NetworkAttachmentConfig represents the configuration of a network attachment.
+type NetworkAttachmentConfig struct {
+ Target string `json:",omitempty"`
+ Aliases []string `json:",omitempty"`
+ DriverOpts map[string]string `json:",omitempty"`
+}
+
+// NetworkAttachment represents a network attachment.
+type NetworkAttachment struct {
+ Network Network `json:",omitempty"`
+ Addresses []string `json:",omitempty"`
+}
+
+// IPAMOptions represents ipam options.
+type IPAMOptions struct {
+ Driver Driver `json:",omitempty"`
+ Configs []IPAMConfig `json:",omitempty"`
+}
+
+// IPAMConfig represents ipam configuration.
+type IPAMConfig struct {
+ Subnet string `json:",omitempty"`
+ Range string `json:",omitempty"`
+ Gateway string `json:",omitempty"`
+}
diff --git a/unum/vendor/github.com/docker/docker/api/types/swarm/node.go b/unum/vendor/github.com/docker/docker/api/types/swarm/node.go
new file mode 100644
index 0000000..28c6851
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/api/types/swarm/node.go
@@ -0,0 +1,115 @@
+package swarm
+
+// Node represents a node.
+type Node struct {
+ ID string
+ Meta
+ // Spec defines the desired state of the node as specified by the user.
+ // The system will honor this and will *never* modify it.
+ Spec NodeSpec `json:",omitempty"`
+ // Description encapsulates the properties of the Node as reported by the
+ // agent.
+ Description NodeDescription `json:",omitempty"`
+ // Status provides the current status of the node, as seen by the manager.
+ Status NodeStatus `json:",omitempty"`
+ // ManagerStatus provides the current status of the node's manager
+ // component, if the node is a manager.
+ ManagerStatus *ManagerStatus `json:",omitempty"`
+}
+
+// NodeSpec represents the spec of a node.
+type NodeSpec struct {
+ Annotations
+ Role NodeRole `json:",omitempty"`
+ Availability NodeAvailability `json:",omitempty"`
+}
+
+// NodeRole represents the role of a node.
+type NodeRole string
+
+const (
+ // NodeRoleWorker WORKER
+ NodeRoleWorker NodeRole = "worker"
+ // NodeRoleManager MANAGER
+ NodeRoleManager NodeRole = "manager"
+)
+
+// NodeAvailability represents the availability of a node.
+type NodeAvailability string
+
+const (
+ // NodeAvailabilityActive ACTIVE
+ NodeAvailabilityActive NodeAvailability = "active"
+ // NodeAvailabilityPause PAUSE
+ NodeAvailabilityPause NodeAvailability = "pause"
+ // NodeAvailabilityDrain DRAIN
+ NodeAvailabilityDrain NodeAvailability = "drain"
+)
+
+// NodeDescription represents the description of a node.
+type NodeDescription struct {
+ Hostname string `json:",omitempty"`
+ Platform Platform `json:",omitempty"`
+ Resources Resources `json:",omitempty"`
+ Engine EngineDescription `json:",omitempty"`
+ TLSInfo TLSInfo `json:",omitempty"`
+}
+
+// Platform represents the platform (Arch/OS).
+type Platform struct {
+ Architecture string `json:",omitempty"`
+ OS string `json:",omitempty"`
+}
+
+// EngineDescription represents the description of an engine.
+type EngineDescription struct {
+ EngineVersion string `json:",omitempty"`
+ Labels map[string]string `json:",omitempty"`
+ Plugins []PluginDescription `json:",omitempty"`
+}
+
+// PluginDescription represents the description of an engine plugin.
+type PluginDescription struct {
+ Type string `json:",omitempty"`
+ Name string `json:",omitempty"`
+}
+
+// NodeStatus represents the status of a node.
+type NodeStatus struct {
+ State NodeState `json:",omitempty"`
+ Message string `json:",omitempty"`
+ Addr string `json:",omitempty"`
+}
+
+// Reachability represents the reachability of a node.
+type Reachability string
+
+const (
+ // ReachabilityUnknown UNKNOWN
+ ReachabilityUnknown Reachability = "unknown"
+ // ReachabilityUnreachable UNREACHABLE
+ ReachabilityUnreachable Reachability = "unreachable"
+ // ReachabilityReachable REACHABLE
+ ReachabilityReachable Reachability = "reachable"
+)
+
+// ManagerStatus represents the status of a manager.
+type ManagerStatus struct {
+ Leader bool `json:",omitempty"`
+ Reachability Reachability `json:",omitempty"`
+ Addr string `json:",omitempty"`
+}
+
+// NodeState represents the state of a node.
+type NodeState string
+
+const (
+ // NodeStateUnknown UNKNOWN
+ NodeStateUnknown NodeState = "unknown"
+ // NodeStateDown DOWN
+ NodeStateDown NodeState = "down"
+ // NodeStateReady READY
+ NodeStateReady NodeState = "ready"
+ // NodeStateDisconnected DISCONNECTED
+ NodeStateDisconnected NodeState = "disconnected"
+)
diff --git a/unum/vendor/github.com/docker/docker/api/types/swarm/runtime.go b/unum/vendor/github.com/docker/docker/api/types/swarm/runtime.go
new file mode 100644
index 0000000..c4c731d
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/api/types/swarm/runtime.go
@@ -0,0 +1,19 @@
+package swarm
+
+// RuntimeType is the type of runtime used for the TaskSpec
+type RuntimeType string
+
+// RuntimeURL is the proto type url
+type RuntimeURL string
+
+const (
+ // RuntimeContainer is the container based runtime
+ RuntimeContainer RuntimeType = "container"
+ // RuntimePlugin is the plugin based runtime
+ RuntimePlugin RuntimeType = "plugin"
+
+ // RuntimeURLContainer is the proto url for the container type
+ RuntimeURLContainer RuntimeURL = "types.docker.com/RuntimeContainer"
+ // RuntimeURLPlugin is the proto url for the plugin type
+ RuntimeURLPlugin RuntimeURL = "types.docker.com/RuntimePlugin"
+)
diff --git a/unum/vendor/github.com/docker/docker/api/types/swarm/runtime/gen.go b/unum/vendor/github.com/docker/docker/api/types/swarm/runtime/gen.go
new file mode 100644
index 0000000..47ae234
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/api/types/swarm/runtime/gen.go
@@ -0,0 +1,3 @@
+//go:generate protoc -I . --gogofast_out=import_path=github.com/docker/docker/api/types/swarm/runtime:. plugin.proto
+
+package runtime
diff --git a/unum/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.pb.go b/unum/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.pb.go
new file mode 100644
index 0000000..1fdc9b0
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.pb.go
@@ -0,0 +1,712 @@
+// Code generated by protoc-gen-gogo.
+// source: plugin.proto
+// DO NOT EDIT!
+
+/*
+ Package runtime is a generated protocol buffer package.
+
+ It is generated from these files:
+ plugin.proto
+
+ It has these top-level messages:
+ PluginSpec
+ PluginPrivilege
+*/
+package runtime
+
+import proto "github.com/gogo/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+import io "io"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
+
+// PluginSpec defines the base payload which clients can specify for creating
+// a service with the plugin runtime.
+type PluginSpec struct {
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ Remote string `protobuf:"bytes,2,opt,name=remote,proto3" json:"remote,omitempty"`
+ Privileges []*PluginPrivilege `protobuf:"bytes,3,rep,name=privileges" json:"privileges,omitempty"`
+ Disabled bool `protobuf:"varint,4,opt,name=disabled,proto3" json:"disabled,omitempty"`
+}
+
+func (m *PluginSpec) Reset() { *m = PluginSpec{} }
+func (m *PluginSpec) String() string { return proto.CompactTextString(m) }
+func (*PluginSpec) ProtoMessage() {}
+func (*PluginSpec) Descriptor() ([]byte, []int) { return fileDescriptorPlugin, []int{0} }
+
+func (m *PluginSpec) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+func (m *PluginSpec) GetRemote() string {
+ if m != nil {
+ return m.Remote
+ }
+ return ""
+}
+
+func (m *PluginSpec) GetPrivileges() []*PluginPrivilege {
+ if m != nil {
+ return m.Privileges
+ }
+ return nil
+}
+
+func (m *PluginSpec) GetDisabled() bool {
+ if m != nil {
+ return m.Disabled
+ }
+ return false
+}
+
+// PluginPrivilege describes a permission the user has to accept
+// upon installing a plugin.
+type PluginPrivilege struct {
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"`
+ Value []string `protobuf:"bytes,3,rep,name=value" json:"value,omitempty"`
+}
+
+func (m *PluginPrivilege) Reset() { *m = PluginPrivilege{} }
+func (m *PluginPrivilege) String() string { return proto.CompactTextString(m) }
+func (*PluginPrivilege) ProtoMessage() {}
+func (*PluginPrivilege) Descriptor() ([]byte, []int) { return fileDescriptorPlugin, []int{1} }
+
+func (m *PluginPrivilege) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+func (m *PluginPrivilege) GetDescription() string {
+ if m != nil {
+ return m.Description
+ }
+ return ""
+}
+
+func (m *PluginPrivilege) GetValue() []string {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+func init() {
+ proto.RegisterType((*PluginSpec)(nil), "PluginSpec")
+ proto.RegisterType((*PluginPrivilege)(nil), "PluginPrivilege")
+}
+func (m *PluginSpec) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *PluginSpec) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.Name) > 0 {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintPlugin(dAtA, i, uint64(len(m.Name)))
+ i += copy(dAtA[i:], m.Name)
+ }
+ if len(m.Remote) > 0 {
+ dAtA[i] = 0x12
+ i++
+ i = encodeVarintPlugin(dAtA, i, uint64(len(m.Remote)))
+ i += copy(dAtA[i:], m.Remote)
+ }
+ if len(m.Privileges) > 0 {
+ for _, msg := range m.Privileges {
+ dAtA[i] = 0x1a
+ i++
+ i = encodeVarintPlugin(dAtA, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ if m.Disabled {
+ dAtA[i] = 0x20
+ i++
+ if m.Disabled {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i++
+ }
+ return i, nil
+}
+
+func (m *PluginPrivilege) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *PluginPrivilege) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.Name) > 0 {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintPlugin(dAtA, i, uint64(len(m.Name)))
+ i += copy(dAtA[i:], m.Name)
+ }
+ if len(m.Description) > 0 {
+ dAtA[i] = 0x12
+ i++
+ i = encodeVarintPlugin(dAtA, i, uint64(len(m.Description)))
+ i += copy(dAtA[i:], m.Description)
+ }
+ if len(m.Value) > 0 {
+ for _, s := range m.Value {
+ dAtA[i] = 0x1a
+ i++
+ l = len(s)
+ for l >= 1<<7 {
+ dAtA[i] = uint8(uint64(l)&0x7f | 0x80)
+ l >>= 7
+ i++
+ }
+ dAtA[i] = uint8(l)
+ i++
+ i += copy(dAtA[i:], s)
+ }
+ }
+ return i, nil
+}
+
+func encodeFixed64Plugin(dAtA []byte, offset int, v uint64) int {
+ dAtA[offset] = uint8(v)
+ dAtA[offset+1] = uint8(v >> 8)
+ dAtA[offset+2] = uint8(v >> 16)
+ dAtA[offset+3] = uint8(v >> 24)
+ dAtA[offset+4] = uint8(v >> 32)
+ dAtA[offset+5] = uint8(v >> 40)
+ dAtA[offset+6] = uint8(v >> 48)
+ dAtA[offset+7] = uint8(v >> 56)
+ return offset + 8
+}
+func encodeFixed32Plugin(dAtA []byte, offset int, v uint32) int {
+ dAtA[offset] = uint8(v)
+ dAtA[offset+1] = uint8(v >> 8)
+ dAtA[offset+2] = uint8(v >> 16)
+ dAtA[offset+3] = uint8(v >> 24)
+ return offset + 4
+}
+func encodeVarintPlugin(dAtA []byte, offset int, v uint64) int {
+ for v >= 1<<7 {
+ dAtA[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ dAtA[offset] = uint8(v)
+ return offset + 1
+}
+func (m *PluginSpec) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Name)
+ if l > 0 {
+ n += 1 + l + sovPlugin(uint64(l))
+ }
+ l = len(m.Remote)
+ if l > 0 {
+ n += 1 + l + sovPlugin(uint64(l))
+ }
+ if len(m.Privileges) > 0 {
+ for _, e := range m.Privileges {
+ l = e.Size()
+ n += 1 + l + sovPlugin(uint64(l))
+ }
+ }
+ if m.Disabled {
+ n += 2
+ }
+ return n
+}
+
+func (m *PluginPrivilege) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Name)
+ if l > 0 {
+ n += 1 + l + sovPlugin(uint64(l))
+ }
+ l = len(m.Description)
+ if l > 0 {
+ n += 1 + l + sovPlugin(uint64(l))
+ }
+ if len(m.Value) > 0 {
+ for _, s := range m.Value {
+ l = len(s)
+ n += 1 + l + sovPlugin(uint64(l))
+ }
+ }
+ return n
+}
+
+func sovPlugin(x uint64) (n int) {
+ for {
+ n++
+ x >>= 7
+ if x == 0 {
+ break
+ }
+ }
+ return n
+}
+func sozPlugin(x uint64) (n int) {
+ return sovPlugin(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (m *PluginSpec) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowPlugin
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: PluginSpec: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: PluginSpec: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowPlugin
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthPlugin
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Remote", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowPlugin
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthPlugin
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Remote = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Privileges", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowPlugin
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthPlugin
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Privileges = append(m.Privileges, &PluginPrivilege{})
+ if err := m.Privileges[len(m.Privileges)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Disabled", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowPlugin
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Disabled = bool(v != 0)
+ default:
+ iNdEx = preIndex
+ skippy, err := skipPlugin(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthPlugin
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *PluginPrivilege) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowPlugin
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: PluginPrivilege: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: PluginPrivilege: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowPlugin
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthPlugin
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowPlugin
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthPlugin
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Description = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowPlugin
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthPlugin
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Value = append(m.Value, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipPlugin(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthPlugin
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func skipPlugin(dAtA []byte) (n int, err error) {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowPlugin
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowPlugin
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ iNdEx++
+ if dAtA[iNdEx-1] < 0x80 {
+ break
+ }
+ }
+ return iNdEx, nil
+ case 1:
+ iNdEx += 8
+ return iNdEx, nil
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowPlugin
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ iNdEx += length
+ if length < 0 {
+ return 0, ErrInvalidLengthPlugin
+ }
+ return iNdEx, nil
+ case 3:
+ for {
+ var innerWire uint64
+ var start int = iNdEx
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowPlugin
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ innerWire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ innerWireType := int(innerWire & 0x7)
+ if innerWireType == 4 {
+ break
+ }
+ next, err := skipPlugin(dAtA[start:])
+ if err != nil {
+ return 0, err
+ }
+ iNdEx = start + next
+ }
+ return iNdEx, nil
+ case 4:
+ return iNdEx, nil
+ case 5:
+ iNdEx += 4
+ return iNdEx, nil
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ }
+ panic("unreachable")
+}
+
+var (
+ ErrInvalidLengthPlugin = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowPlugin = fmt.Errorf("proto: integer overflow")
+)
+
+func init() { proto.RegisterFile("plugin.proto", fileDescriptorPlugin) }
+
+var fileDescriptorPlugin = []byte{
+ // 196 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0x29, 0xc8, 0x29, 0x4d,
+ 0xcf, 0xcc, 0xd3, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x57, 0x6a, 0x63, 0xe4, 0xe2, 0x0a, 0x00, 0x0b,
+ 0x04, 0x17, 0xa4, 0x26, 0x0b, 0x09, 0x71, 0xb1, 0xe4, 0x25, 0xe6, 0xa6, 0x4a, 0x30, 0x2a, 0x30,
+ 0x6a, 0x70, 0x06, 0x81, 0xd9, 0x42, 0x62, 0x5c, 0x6c, 0x45, 0xa9, 0xb9, 0xf9, 0x25, 0xa9, 0x12,
+ 0x4c, 0x60, 0x51, 0x28, 0x4f, 0xc8, 0x80, 0x8b, 0xab, 0xa0, 0x28, 0xb3, 0x2c, 0x33, 0x27, 0x35,
+ 0x3d, 0xb5, 0x58, 0x82, 0x59, 0x81, 0x59, 0x83, 0xdb, 0x48, 0x40, 0x0f, 0x62, 0x58, 0x00, 0x4c,
+ 0x22, 0x08, 0x49, 0x8d, 0x90, 0x14, 0x17, 0x47, 0x4a, 0x66, 0x71, 0x62, 0x52, 0x4e, 0x6a, 0x8a,
+ 0x04, 0x8b, 0x02, 0xa3, 0x06, 0x47, 0x10, 0x9c, 0xaf, 0x14, 0xcb, 0xc5, 0x8f, 0xa6, 0x15, 0xab,
+ 0x63, 0x14, 0xb8, 0xb8, 0x53, 0x52, 0x8b, 0x93, 0x8b, 0x32, 0x0b, 0x4a, 0x32, 0xf3, 0xf3, 0xa0,
+ 0x2e, 0x42, 0x16, 0x12, 0x12, 0xe1, 0x62, 0x2d, 0x4b, 0xcc, 0x29, 0x4d, 0x05, 0xbb, 0x88, 0x33,
+ 0x08, 0xc2, 0x71, 0xe2, 0x39, 0xf1, 0x48, 0x8e, 0xf1, 0xc2, 0x23, 0x39, 0xc6, 0x07, 0x8f, 0xe4,
+ 0x18, 0x93, 0xd8, 0xc0, 0x9e, 0x37, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0xb8, 0x84, 0xad, 0x79,
+ 0x0c, 0x01, 0x00, 0x00,
+}
diff --git a/unum/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.proto b/unum/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.proto
new file mode 100644
index 0000000..06eb7ba
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.proto
@@ -0,0 +1,18 @@
+syntax = "proto3";
+
+// PluginSpec defines the base payload which clients can specify for creating
+// a service with the plugin runtime.
+message PluginSpec {
+ string name = 1;
+ string remote = 2;
+ repeated PluginPrivilege privileges = 3;
+ bool disabled = 4;
+}
+
+// PluginPrivilege describes a permission the user has to accept
+// upon installing a plugin.
+message PluginPrivilege {
+ string name = 1;
+ string description = 2;
+ repeated string value = 3;
+}
diff --git a/unum/vendor/github.com/docker/docker/api/types/swarm/secret.go b/unum/vendor/github.com/docker/docker/api/types/swarm/secret.go
new file mode 100644
index 0000000..f9b1e92
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/api/types/swarm/secret.go
@@ -0,0 +1,32 @@
+package swarm
+
+import "os"
+
+// Secret represents a secret.
+type Secret struct {
+ ID string
+ Meta
+ Spec SecretSpec
+}
+
+// SecretSpec represents a secret specification from a secret in swarm
+type SecretSpec struct {
+ Annotations
+ Data []byte `json:",omitempty"`
+ Driver *Driver `json:",omitempty"` // name of the secrets driver used to fetch the secret's value from an external secret store
+}
+
+// SecretReferenceFileTarget is a file target in a secret reference
+type SecretReferenceFileTarget struct {
+ Name string
+ UID string
+ GID string
+ Mode os.FileMode
+}
+
+// SecretReference is a reference to a secret in swarm
+type SecretReference struct {
+ File *SecretReferenceFileTarget
+ SecretID string
+ SecretName string
+}
diff --git a/unum/vendor/github.com/docker/docker/api/types/swarm/service.go b/unum/vendor/github.com/docker/docker/api/types/swarm/service.go
new file mode 100644
index 0000000..fa31a7e
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/api/types/swarm/service.go
@@ -0,0 +1,124 @@
+package swarm
+
+import "time"
+
+// Service represents a service.
+type Service struct {
+ ID string
+ Meta
+ Spec ServiceSpec `json:",omitempty"`
+ PreviousSpec *ServiceSpec `json:",omitempty"`
+ Endpoint Endpoint `json:",omitempty"`
+ UpdateStatus *UpdateStatus `json:",omitempty"`
+}
+
+// ServiceSpec represents the spec of a service.
+type ServiceSpec struct {
+ Annotations
+
+ // TaskTemplate defines how the service should construct new tasks when
+ // orchestrating this service.
+ TaskTemplate TaskSpec `json:",omitempty"`
+ Mode ServiceMode `json:",omitempty"`
+ UpdateConfig *UpdateConfig `json:",omitempty"`
+ RollbackConfig *UpdateConfig `json:",omitempty"`
+
+ // Networks field in ServiceSpec is deprecated. The
+ // same field in TaskSpec should be used instead.
+ // This field will be removed in a future release.
+ Networks []NetworkAttachmentConfig `json:",omitempty"`
+ EndpointSpec *EndpointSpec `json:",omitempty"`
+}
+
+// ServiceMode represents the mode of a service.
+type ServiceMode struct {
+ Replicated *ReplicatedService `json:",omitempty"`
+ Global *GlobalService `json:",omitempty"`
+}
+
+// UpdateState is the state of a service update.
+type UpdateState string
+
+const (
+ // UpdateStateUpdating is the updating state.
+ UpdateStateUpdating UpdateState = "updating"
+ // UpdateStatePaused is the paused state.
+ UpdateStatePaused UpdateState = "paused"
+ // UpdateStateCompleted is the completed state.
+ UpdateStateCompleted UpdateState = "completed"
+ // UpdateStateRollbackStarted is the state with a rollback in progress.
+ UpdateStateRollbackStarted UpdateState = "rollback_started"
+ // UpdateStateRollbackPaused is the state with a rollback in progress.
+ UpdateStateRollbackPaused UpdateState = "rollback_paused"
+ // UpdateStateRollbackCompleted is the state with a rollback in progress.
+ UpdateStateRollbackCompleted UpdateState = "rollback_completed"
+)
+
+// UpdateStatus reports the status of a service update.
+type UpdateStatus struct {
+ State UpdateState `json:",omitempty"`
+ StartedAt *time.Time `json:",omitempty"`
+ CompletedAt *time.Time `json:",omitempty"`
+ Message string `json:",omitempty"`
+}
+
+// ReplicatedService is a kind of ServiceMode.
+type ReplicatedService struct {
+ Replicas *uint64 `json:",omitempty"`
+}
+
+// GlobalService is a kind of ServiceMode.
+type GlobalService struct{}
+
+const (
+ // UpdateFailureActionPause PAUSE
+ UpdateFailureActionPause = "pause"
+ // UpdateFailureActionContinue CONTINUE
+ UpdateFailureActionContinue = "continue"
+ // UpdateFailureActionRollback ROLLBACK
+ UpdateFailureActionRollback = "rollback"
+
+ // UpdateOrderStopFirst STOP_FIRST
+ UpdateOrderStopFirst = "stop-first"
+ // UpdateOrderStartFirst START_FIRST
+ UpdateOrderStartFirst = "start-first"
+)
+
+// UpdateConfig represents the update configuration.
+type UpdateConfig struct {
+ // Maximum number of tasks to be updated in one iteration.
+ // 0 means unlimited parallelism.
+ Parallelism uint64
+
+ // Amount of time between updates.
+ Delay time.Duration `json:",omitempty"`
+
+ // FailureAction is the action to take when an update failures.
+ FailureAction string `json:",omitempty"`
+
+ // Monitor indicates how long to monitor a task for failure after it is
+ // created. If the task fails by ending up in one of the states
+ // REJECTED, COMPLETED, or FAILED, within Monitor from its creation,
+ // this counts as a failure. If it fails after Monitor, it does not
+ // count as a failure. If Monitor is unspecified, a default value will
+ // be used.
+ Monitor time.Duration `json:",omitempty"`
+
+ // MaxFailureRatio is the fraction of tasks that may fail during
+ // an update before the failure action is invoked. Any task created by
+ // the current update which ends up in one of the states REJECTED,
+ // COMPLETED or FAILED within Monitor from its creation counts as a
+ // failure. The number of failures is divided by the number of tasks
+ // being updated, and if this fraction is greater than
+ // MaxFailureRatio, the failure action is invoked.
+ //
+ // If the failure action is CONTINUE, there is no effect.
+ // If the failure action is PAUSE, no more tasks will be updated until
+ // another update is started.
+ MaxFailureRatio float32
+
+ // Order indicates the order of operations when rolling out an updated
+ // task. Either the old task is shut down before the new task is
+ // started, or the new task is started before the old task is shut down.
+ Order string
+}
diff --git a/unum/vendor/github.com/docker/docker/api/types/swarm/swarm.go b/unum/vendor/github.com/docker/docker/api/types/swarm/swarm.go
new file mode 100644
index 0000000..b65fa86
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/api/types/swarm/swarm.go
@@ -0,0 +1,217 @@
+package swarm
+
+import "time"
+
+// ClusterInfo represents info about the cluster for outputting in "info"
+// it contains the same information as "Swarm", but without the JoinTokens
+type ClusterInfo struct {
+ ID string
+ Meta
+ Spec Spec
+ TLSInfo TLSInfo
+ RootRotationInProgress bool
+}
+
+// Swarm represents a swarm.
+type Swarm struct {
+ ClusterInfo
+ JoinTokens JoinTokens
+}
+
+// JoinTokens contains the tokens workers and managers need to join the swarm.
+type JoinTokens struct {
+ // Worker is the join token workers may use to join the swarm.
+ Worker string
+ // Manager is the join token managers may use to join the swarm.
+ Manager string
+}
+
+// Spec represents the spec of a swarm.
+type Spec struct {
+ Annotations
+
+ Orchestration OrchestrationConfig `json:",omitempty"`
+ Raft RaftConfig `json:",omitempty"`
+ Dispatcher DispatcherConfig `json:",omitempty"`
+ CAConfig CAConfig `json:",omitempty"`
+ TaskDefaults TaskDefaults `json:",omitempty"`
+ EncryptionConfig EncryptionConfig `json:",omitempty"`
+}
+
+// OrchestrationConfig represents orchestration configuration.
+type OrchestrationConfig struct {
+ // TaskHistoryRetentionLimit is the number of historic tasks to keep per instance or
+ // node. If negative, never remove completed or failed tasks.
+ TaskHistoryRetentionLimit *int64 `json:",omitempty"`
+}
+
+// TaskDefaults parameterizes cluster-level task creation with default values.
+type TaskDefaults struct {
+ // LogDriver selects the log driver to use for tasks created in the
+ // orchestrator if unspecified by a service.
+ //
+ // Updating this value will only have an affect on new tasks. Old tasks
+ // will continue use their previously configured log driver until
+ // recreated.
+ LogDriver *Driver `json:",omitempty"`
+}
+
+// EncryptionConfig controls at-rest encryption of data and keys.
+type EncryptionConfig struct {
+ // AutoLockManagers specifies whether or not managers TLS keys and raft data
+ // should be encrypted at rest in such a way that they must be unlocked
+ // before the manager node starts up again.
+ AutoLockManagers bool
+}
+
+// RaftConfig represents raft configuration.
+type RaftConfig struct {
+ // SnapshotInterval is the number of log entries between snapshots.
+ SnapshotInterval uint64 `json:",omitempty"`
+
+ // KeepOldSnapshots is the number of snapshots to keep beyond the
+ // current snapshot.
+ KeepOldSnapshots *uint64 `json:",omitempty"`
+
+ // LogEntriesForSlowFollowers is the number of log entries to keep
+ // around to sync up slow followers after a snapshot is created.
+ LogEntriesForSlowFollowers uint64 `json:",omitempty"`
+
+ // ElectionTick is the number of ticks that a follower will wait for a message
+ // from the leader before becoming a candidate and starting an election.
+ // ElectionTick must be greater than HeartbeatTick.
+ //
+ // A tick currently defaults to one second, so these translate directly to
+ // seconds currently, but this is NOT guaranteed.
+ ElectionTick int
+
+ // HeartbeatTick is the number of ticks between heartbeats. Every
+ // HeartbeatTick ticks, the leader will send a heartbeat to the
+ // followers.
+ //
+ // A tick currently defaults to one second, so these translate directly to
+ // seconds currently, but this is NOT guaranteed.
+ HeartbeatTick int
+}
+
+// DispatcherConfig represents dispatcher configuration.
+type DispatcherConfig struct {
+ // HeartbeatPeriod defines how often agent should send heartbeats to
+ // dispatcher.
+ HeartbeatPeriod time.Duration `json:",omitempty"`
+}
+
+// CAConfig represents CA configuration.
+type CAConfig struct {
+ // NodeCertExpiry is the duration certificates should be issued for
+ NodeCertExpiry time.Duration `json:",omitempty"`
+
+ // ExternalCAs is a list of CAs to which a manager node will make
+ // certificate signing requests for node certificates.
+ ExternalCAs []*ExternalCA `json:",omitempty"`
+
+ // SigningCACert and SigningCAKey specify the desired signing root CA and
+ // root CA key for the swarm. When inspecting the cluster, the key will
+ // be redacted.
+ SigningCACert string `json:",omitempty"`
+ SigningCAKey string `json:",omitempty"`
+
+ // If this value changes, and there is no specified signing cert and key,
+ // then the swarm is forced to generate a new root certificate ane key.
+ ForceRotate uint64 `json:",omitempty"`
+}
+
+// ExternalCAProtocol represents type of external CA.
+type ExternalCAProtocol string
+
+// ExternalCAProtocolCFSSL CFSSL
+const ExternalCAProtocolCFSSL ExternalCAProtocol = "cfssl"
+
+// ExternalCA defines external CA to be used by the cluster.
+type ExternalCA struct {
+ // Protocol is the protocol used by this external CA.
+ Protocol ExternalCAProtocol
+
+ // URL is the URL where the external CA can be reached.
+ URL string
+
+ // Options is a set of additional key/value pairs whose interpretation
+ // depends on the specified CA type.
+ Options map[string]string `json:",omitempty"`
+
+ // CACert specifies which root CA is used by this external CA. This certificate must
+ // be in PEM format.
+ CACert string
+}
+
+// InitRequest is the request used to init a swarm.
+type InitRequest struct {
+ ListenAddr string
+ AdvertiseAddr string
+ DataPathAddr string
+ ForceNewCluster bool
+ Spec Spec
+ AutoLockManagers bool
+ Availability NodeAvailability
+}
+
+// JoinRequest is the request used to join a swarm.
+type JoinRequest struct {
+ ListenAddr string
+ AdvertiseAddr string
+ DataPathAddr string
+ RemoteAddrs []string
+ JoinToken string // accept by secret
+ Availability NodeAvailability
+}
+
+// UnlockRequest is the request used to unlock a swarm.
+type UnlockRequest struct {
+ // UnlockKey is the unlock key in ASCII-armored format.
+ UnlockKey string
+}
+
+// LocalNodeState represents the state of the local node.
+type LocalNodeState string
+
+const (
+ // LocalNodeStateInactive INACTIVE
+ LocalNodeStateInactive LocalNodeState = "inactive"
+ // LocalNodeStatePending PENDING
+ LocalNodeStatePending LocalNodeState = "pending"
+ // LocalNodeStateActive ACTIVE
+ LocalNodeStateActive LocalNodeState = "active"
+ // LocalNodeStateError ERROR
+ LocalNodeStateError LocalNodeState = "error"
+ // LocalNodeStateLocked LOCKED
+ LocalNodeStateLocked LocalNodeState = "locked"
+)
+
+// Info represents generic information about swarm.
+type Info struct {
+ NodeID string
+ NodeAddr string
+
+ LocalNodeState LocalNodeState
+ ControlAvailable bool
+ Error string
+
+ RemoteManagers []Peer
+ Nodes int `json:",omitempty"`
+ Managers int `json:",omitempty"`
+
+ Cluster *ClusterInfo `json:",omitempty"`
+}
+
+// Peer represents a peer.
+type Peer struct {
+ NodeID string
+ Addr string
+}
+
+// UpdateFlags contains flags for SwarmUpdate.
+type UpdateFlags struct {
+ RotateWorkerToken bool
+ RotateManagerToken bool
+ RotateManagerUnlockKey bool
+}
diff --git a/unum/vendor/github.com/docker/docker/api/types/swarm/task.go b/unum/vendor/github.com/docker/docker/api/types/swarm/task.go
new file mode 100644
index 0000000..ff11b07
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/api/types/swarm/task.go
@@ -0,0 +1,184 @@
+package swarm
+
+import (
+ "time"
+
+ "github.com/docker/docker/api/types/swarm/runtime"
+)
+
+// TaskState represents the state of a task.
+type TaskState string
+
+const (
+ // TaskStateNew NEW
+ TaskStateNew TaskState = "new"
+ // TaskStateAllocated ALLOCATED
+ TaskStateAllocated TaskState = "allocated"
+ // TaskStatePending PENDING
+ TaskStatePending TaskState = "pending"
+ // TaskStateAssigned ASSIGNED
+ TaskStateAssigned TaskState = "assigned"
+ // TaskStateAccepted ACCEPTED
+ TaskStateAccepted TaskState = "accepted"
+ // TaskStatePreparing PREPARING
+ TaskStatePreparing TaskState = "preparing"
+ // TaskStateReady READY
+ TaskStateReady TaskState = "ready"
+ // TaskStateStarting STARTING
+ TaskStateStarting TaskState = "starting"
+ // TaskStateRunning RUNNING
+ TaskStateRunning TaskState = "running"
+ // TaskStateComplete COMPLETE
+ TaskStateComplete TaskState = "complete"
+ // TaskStateShutdown SHUTDOWN
+ TaskStateShutdown TaskState = "shutdown"
+ // TaskStateFailed FAILED
+ TaskStateFailed TaskState = "failed"
+ // TaskStateRejected REJECTED
+ TaskStateRejected TaskState = "rejected"
+)
+
+// Task represents a task.
+type Task struct {
+ ID string
+ Meta
+ Annotations
+
+ Spec TaskSpec `json:",omitempty"`
+ ServiceID string `json:",omitempty"`
+ Slot int `json:",omitempty"`
+ NodeID string `json:",omitempty"`
+ Status TaskStatus `json:",omitempty"`
+ DesiredState TaskState `json:",omitempty"`
+ NetworksAttachments []NetworkAttachment `json:",omitempty"`
+ GenericResources []GenericResource `json:",omitempty"`
+}
+
+// TaskSpec represents the spec of a task.
+type TaskSpec struct {
+ // ContainerSpec and PluginSpec are mutually exclusive.
+ // PluginSpec will only be used when the `Runtime` field is set to `plugin`
+ ContainerSpec *ContainerSpec `json:",omitempty"`
+ PluginSpec *runtime.PluginSpec `json:",omitempty"`
+
+ Resources *ResourceRequirements `json:",omitempty"`
+ RestartPolicy *RestartPolicy `json:",omitempty"`
+ Placement *Placement `json:",omitempty"`
+ Networks []NetworkAttachmentConfig `json:",omitempty"`
+
+ // LogDriver specifies the LogDriver to use for tasks created from this
+ // spec. If not present, the one on cluster default on swarm.Spec will be
+ // used, finally falling back to the engine default if not specified.
+ LogDriver *Driver `json:",omitempty"`
+
+ // ForceUpdate is a counter that triggers an update even if no relevant
+ // parameters have been changed.
+ ForceUpdate uint64
+
+ Runtime RuntimeType `json:",omitempty"`
+}
+
+// Resources represents resources (CPU/Memory).
+type Resources struct {
+ NanoCPUs int64 `json:",omitempty"`
+ MemoryBytes int64 `json:",omitempty"`
+ GenericResources []GenericResource `json:",omitempty"`
+}
+
+// GenericResource represents a "user defined" resource which can
+// be either an integer (e.g: SSD=3) or a string (e.g: SSD=sda1)
+type GenericResource struct {
+ NamedResourceSpec *NamedGenericResource `json:",omitempty"`
+ DiscreteResourceSpec *DiscreteGenericResource `json:",omitempty"`
+}
+
+// NamedGenericResource represents a "user defined" resource which is defined
+// as a string.
+// "Kind" is used to describe the Kind of a resource (e.g: "GPU", "FPGA", "SSD", ...)
+// Value is used to identify the resource (GPU="UUID-1", FPGA="/dev/sdb5", ...)
+type NamedGenericResource struct {
+ Kind string `json:",omitempty"`
+ Value string `json:",omitempty"`
+}
+
+// DiscreteGenericResource represents a "user defined" resource which is defined
+// as an integer
+// "Kind" is used to describe the Kind of a resource (e.g: "GPU", "FPGA", "SSD", ...)
+// Value is used to count the resource (SSD=5, HDD=3, ...)
+type DiscreteGenericResource struct {
+ Kind string `json:",omitempty"`
+ Value int64 `json:",omitempty"`
+}
+
+// ResourceRequirements represents resources requirements.
+type ResourceRequirements struct {
+ Limits *Resources `json:",omitempty"`
+ Reservations *Resources `json:",omitempty"`
+}
+
+// Placement represents orchestration parameters.
+type Placement struct {
+ Constraints []string `json:",omitempty"`
+ Preferences []PlacementPreference `json:",omitempty"`
+
+ // Platforms stores all the platforms that the image can run on.
+ // This field is used in the platform filter for scheduling. If empty,
+ // then the platform filter is off, meaning there are no scheduling restrictions.
+ Platforms []Platform `json:",omitempty"`
+}
+
+// PlacementPreference provides a way to make the scheduler aware of factors
+// such as topology.
+type PlacementPreference struct {
+ Spread *SpreadOver
+}
+
+// SpreadOver is a scheduling preference that instructs the scheduler to spread
+// tasks evenly over groups of nodes identified by labels.
+type SpreadOver struct {
+ // label descriptor, such as engine.labels.az
+ SpreadDescriptor string
+}
+
+// RestartPolicy represents the restart policy.
+type RestartPolicy struct {
+ Condition RestartPolicyCondition `json:",omitempty"`
+ Delay *time.Duration `json:",omitempty"`
+ MaxAttempts *uint64 `json:",omitempty"`
+ Window *time.Duration `json:",omitempty"`
+}
+
+// RestartPolicyCondition represents when to restart.
+type RestartPolicyCondition string
+
+const (
+ // RestartPolicyConditionNone NONE
+ RestartPolicyConditionNone RestartPolicyCondition = "none"
+ // RestartPolicyConditionOnFailure ON_FAILURE
+ RestartPolicyConditionOnFailure RestartPolicyCondition = "on-failure"
+ // RestartPolicyConditionAny ANY
+ RestartPolicyConditionAny RestartPolicyCondition = "any"
+)
+
+// TaskStatus represents the status of a task.
+type TaskStatus struct {
+ Timestamp time.Time `json:",omitempty"`
+ State TaskState `json:",omitempty"`
+ Message string `json:",omitempty"`
+ Err string `json:",omitempty"`
+ ContainerStatus ContainerStatus `json:",omitempty"`
+ PortStatus PortStatus `json:",omitempty"`
+}
+
+// ContainerStatus represents the status of a container.
+type ContainerStatus struct {
+ ContainerID string `json:",omitempty"`
+ PID int `json:",omitempty"`
+ ExitCode int `json:",omitempty"`
+}
+
+// PortStatus represents the port status of a task's host ports whose
+// service has published host ports
+type PortStatus struct {
+ Ports []PortConfig `json:",omitempty"`
+}
diff --git a/unum/vendor/github.com/docker/docker/api/types/time/duration_convert.go b/unum/vendor/github.com/docker/docker/api/types/time/duration_convert.go
new file mode 100644
index 0000000..63e1eec
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/api/types/time/duration_convert.go
@@ -0,0 +1,12 @@
+package time
+
+import (
+ "strconv"
+ "time"
+)
+
+// DurationToSecondsString converts the specified duration to the number
+// seconds it represents, formatted as a string.
+func DurationToSecondsString(duration time.Duration) string {
+ return strconv.FormatFloat(duration.Seconds(), 'f', 0, 64)
+}
diff --git a/unum/vendor/github.com/docker/docker/api/types/time/timestamp.go b/unum/vendor/github.com/docker/docker/api/types/time/timestamp.go
new file mode 100644
index 0000000..9aa9702
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/api/types/time/timestamp.go
@@ -0,0 +1,124 @@
+package time
+
+import (
+ "fmt"
+ "math"
+ "strconv"
+ "strings"
+ "time"
+)
+
+// These are additional predefined layouts for use in Time.Format and Time.Parse
+// with --since and --until parameters for `docker logs` and `docker events`
+const (
+ rFC3339Local = "2006-01-02T15:04:05" // RFC3339 with local timezone
+ rFC3339NanoLocal = "2006-01-02T15:04:05.999999999" // RFC3339Nano with local timezone
+ dateWithZone = "2006-01-02Z07:00" // RFC3339 with time at 00:00:00
+ dateLocal = "2006-01-02" // RFC3339 with local timezone and time at 00:00:00
+)
+
+// GetTimestamp tries to parse given string as golang duration,
+// then RFC3339 time and finally as a Unix timestamp. If
+// any of these were successful, it returns a Unix timestamp
+// as string otherwise returns the given value back.
+// In case of duration input, the returned timestamp is computed
+// as the given reference time minus the amount of the duration.
+func GetTimestamp(value string, reference time.Time) (string, error) {
+ if d, err := time.ParseDuration(value); value != "0" && err == nil {
+ return strconv.FormatInt(reference.Add(-d).Unix(), 10), nil
+ }
+
+ var format string
+ var parseInLocation bool
+
+ // if the string has a Z or a + or three dashes use parse otherwise use parseinlocation
+ parseInLocation = !(strings.ContainsAny(value, "zZ+") || strings.Count(value, "-") == 3)
+
+ if strings.Contains(value, ".") {
+ if parseInLocation {
+ format = rFC3339NanoLocal
+ } else {
+ format = time.RFC3339Nano
+ }
+ } else if strings.Contains(value, "T") {
+ // we want the number of colons in the T portion of the timestamp
+ tcolons := strings.Count(value, ":")
+ // if parseInLocation is off and we have a +/- zone offset (not Z) then
+ // there will be an extra colon in the input for the tz offset subtract that
+ // colon from the tcolons count
+ if !parseInLocation && !strings.ContainsAny(value, "zZ") && tcolons > 0 {
+ tcolons--
+ }
+ if parseInLocation {
+ switch tcolons {
+ case 0:
+ format = "2006-01-02T15"
+ case 1:
+ format = "2006-01-02T15:04"
+ default:
+ format = rFC3339Local
+ }
+ } else {
+ switch tcolons {
+ case 0:
+ format = "2006-01-02T15Z07:00"
+ case 1:
+ format = "2006-01-02T15:04Z07:00"
+ default:
+ format = time.RFC3339
+ }
+ }
+ } else if parseInLocation {
+ format = dateLocal
+ } else {
+ format = dateWithZone
+ }
+
+ var t time.Time
+ var err error
+
+ if parseInLocation {
+ t, err = time.ParseInLocation(format, value, time.FixedZone(reference.Zone()))
+ } else {
+ t, err = time.Parse(format, value)
+ }
+
+ if err != nil {
+ // if there is a `-` then it's an RFC3339 like timestamp otherwise assume unixtimestamp
+ if strings.Contains(value, "-") {
+ return "", err // was probably an RFC3339 like timestamp but the parser failed with an error
+ }
+ return value, nil // unixtimestamp in and out case (meaning: the value passed at the command line is already in the right format for passing to the server)
+ }
+
+ return fmt.Sprintf("%d.%09d", t.Unix(), int64(t.Nanosecond())), nil
+}
+
+// ParseTimestamps returns seconds and nanoseconds from a timestamp that has the
+// format "%d.%09d", time.Unix(), int64(time.Nanosecond()))
+// if the incoming nanosecond portion is longer or shorter than 9 digits it is
+// converted to nanoseconds. The expectation is that the seconds and
+// seconds will be used to create a time variable. For example:
+// seconds, nanoseconds, err := ParseTimestamp("1136073600.000000001",0)
+// if err == nil since := time.Unix(seconds, nanoseconds)
+// returns seconds as def(aultSeconds) if value == ""
+func ParseTimestamps(value string, def int64) (int64, int64, error) {
+ if value == "" {
+ return def, 0, nil
+ }
+ sa := strings.SplitN(value, ".", 2)
+ s, err := strconv.ParseInt(sa[0], 10, 64)
+ if err != nil {
+ return s, 0, err
+ }
+ if len(sa) != 2 {
+ return s, 0, nil
+ }
+ n, err := strconv.ParseInt(sa[1], 10, 64)
+ if err != nil {
+ return s, n, err
+ }
+ // should already be in nanoseconds but just in case convert n to nanoseconds
+ n = int64(float64(n) * math.Pow(float64(10), float64(9-len(sa[1]))))
+ return s, n, nil
+}
diff --git a/unum/vendor/github.com/docker/docker/api/types/types.go b/unum/vendor/github.com/docker/docker/api/types/types.go
new file mode 100644
index 0000000..f7ac772
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/api/types/types.go
@@ -0,0 +1,575 @@
+package types
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "strings"
+ "time"
+
+ "github.com/docker/docker/api/types/container"
+ "github.com/docker/docker/api/types/filters"
+ "github.com/docker/docker/api/types/mount"
+ "github.com/docker/docker/api/types/network"
+ "github.com/docker/docker/api/types/registry"
+ "github.com/docker/docker/api/types/swarm"
+ "github.com/docker/go-connections/nat"
+)
+
+// RootFS returns Image's RootFS description including the layer IDs.
+type RootFS struct {
+ Type string
+ Layers []string `json:",omitempty"`
+ BaseLayer string `json:",omitempty"`
+}
+
+// ImageInspect contains response of Engine API:
+// GET "/images/{name:.*}/json"
+type ImageInspect struct {
+ ID string `json:"Id"`
+ RepoTags []string
+ RepoDigests []string
+ Parent string
+ Comment string
+ Created string
+ Container string
+ ContainerConfig *container.Config
+ DockerVersion string
+ Author string
+ Config *container.Config
+ Architecture string
+ Os string
+ OsVersion string `json:",omitempty"`
+ Size int64
+ VirtualSize int64
+ GraphDriver GraphDriverData
+ RootFS RootFS
+ Metadata ImageMetadata
+}
+
+// ImageMetadata contains engine-local data about the image
+type ImageMetadata struct {
+ LastTagTime time.Time `json:",omitempty"`
+}
+
+// Container contains response of Engine API:
+// GET "/containers/json"
+type Container struct {
+ ID string `json:"Id"`
+ Names []string
+ Image string
+ ImageID string
+ Command string
+ Created int64
+ Ports []Port
+ SizeRw int64 `json:",omitempty"`
+ SizeRootFs int64 `json:",omitempty"`
+ Labels map[string]string
+ State string
+ Status string
+ HostConfig struct {
+ NetworkMode string `json:",omitempty"`
+ }
+ NetworkSettings *SummaryNetworkSettings
+ Mounts []MountPoint
+}
+
+// CopyConfig contains request body of Engine API:
+// POST "/containers/"+containerID+"/copy"
+type CopyConfig struct {
+ Resource string
+}
+
+// ContainerPathStat is used to encode the header from
+// GET "/containers/{name:.*}/archive"
+// "Name" is the file or directory name.
+type ContainerPathStat struct {
+ Name string `json:"name"`
+ Size int64 `json:"size"`
+ Mode os.FileMode `json:"mode"`
+ Mtime time.Time `json:"mtime"`
+ LinkTarget string `json:"linkTarget"`
+}
+
+// ContainerStats contains response of Engine API:
+// GET "/stats"
+type ContainerStats struct {
+ Body io.ReadCloser `json:"body"`
+ OSType string `json:"ostype"`
+}
+
+// Ping contains response of Engine API:
+// GET "/_ping"
+type Ping struct {
+ APIVersion string
+ OSType string
+ Experimental bool
+}
+
+// Version contains response of Engine API:
+// GET "/version"
+type Version struct {
+ Version string
+ APIVersion string `json:"ApiVersion"`
+ MinAPIVersion string `json:"MinAPIVersion,omitempty"`
+ GitCommit string
+ GoVersion string
+ Os string
+ Arch string
+ KernelVersion string `json:",omitempty"`
+ Experimental bool `json:",omitempty"`
+ BuildTime string `json:",omitempty"`
+}
+
+// Commit holds the Git-commit (SHA1) that a binary was built from, as reported
+// in the version-string of external tools, such as containerd, or runC.
+type Commit struct {
+ ID string // ID is the actual commit ID of external tool.
+ Expected string // Expected is the commit ID of external tool expected by dockerd as set at build time.
+}
+
+// Info contains response of Engine API:
+// GET "/info"
+type Info struct {
+ ID string
+ Containers int
+ ContainersRunning int
+ ContainersPaused int
+ ContainersStopped int
+ Images int
+ Driver string
+ DriverStatus [][2]string
+ SystemStatus [][2]string
+ Plugins PluginsInfo
+ MemoryLimit bool
+ SwapLimit bool
+ KernelMemory bool
+ CPUCfsPeriod bool `json:"CpuCfsPeriod"`
+ CPUCfsQuota bool `json:"CpuCfsQuota"`
+ CPUShares bool
+ CPUSet bool
+ IPv4Forwarding bool
+ BridgeNfIptables bool
+ BridgeNfIP6tables bool `json:"BridgeNfIp6tables"`
+ Debug bool
+ NFd int
+ OomKillDisable bool
+ NGoroutines int
+ SystemTime string
+ LoggingDriver string
+ CgroupDriver string
+ NEventsListener int
+ KernelVersion string
+ OperatingSystem string
+ OSType string
+ Architecture string
+ IndexServerAddress string
+ RegistryConfig *registry.ServiceConfig
+ NCPU int
+ MemTotal int64
+ GenericResources []swarm.GenericResource
+ DockerRootDir string
+ HTTPProxy string `json:"HttpProxy"`
+ HTTPSProxy string `json:"HttpsProxy"`
+ NoProxy string
+ Name string
+ Labels []string
+ ExperimentalBuild bool
+ ServerVersion string
+ ClusterStore string
+ ClusterAdvertise string
+ Runtimes map[string]Runtime
+ DefaultRuntime string
+ Swarm swarm.Info
+ // LiveRestoreEnabled determines whether containers should be kept
+ // running when the daemon is shutdown or upon daemon start if
+ // running containers are detected
+ LiveRestoreEnabled bool
+ Isolation container.Isolation
+ InitBinary string
+ ContainerdCommit Commit
+ RuncCommit Commit
+ InitCommit Commit
+ SecurityOptions []string
+}
+
+// KeyValue holds a key/value pair
+type KeyValue struct {
+ Key, Value string
+}
+
+// SecurityOpt contains the name and options of a security option
+type SecurityOpt struct {
+ Name string
+ Options []KeyValue
+}
+
+// DecodeSecurityOptions decodes a security options string slice to a type safe
+// SecurityOpt
+func DecodeSecurityOptions(opts []string) ([]SecurityOpt, error) {
+ so := []SecurityOpt{}
+ for _, opt := range opts {
+ // support output from a < 1.13 docker daemon
+ if !strings.Contains(opt, "=") {
+ so = append(so, SecurityOpt{Name: opt})
+ continue
+ }
+ secopt := SecurityOpt{}
+ split := strings.Split(opt, ",")
+ for _, s := range split {
+ kv := strings.SplitN(s, "=", 2)
+ if len(kv) != 2 {
+ return nil, fmt.Errorf("invalid security option %q", s)
+ }
+ if kv[0] == "" || kv[1] == "" {
+ return nil, errors.New("invalid empty security option")
+ }
+ if kv[0] == "name" {
+ secopt.Name = kv[1]
+ continue
+ }
+ secopt.Options = append(secopt.Options, KeyValue{Key: kv[0], Value: kv[1]})
+ }
+ so = append(so, secopt)
+ }
+ return so, nil
+}
+
+// PluginsInfo is a temp struct holding Plugins name
+// registered with docker daemon. It is used by Info struct
+type PluginsInfo struct {
+ // List of Volume plugins registered
+ Volume []string
+ // List of Network plugins registered
+ Network []string
+ // List of Authorization plugins registered
+ Authorization []string
+ // List of Log plugins registered
+ Log []string
+}
+
+// ExecStartCheck is a temp struct used by execStart
+// Config fields is part of ExecConfig in runconfig package
+type ExecStartCheck struct {
+ // ExecStart will first check if it's detached
+ Detach bool
+ // Check if there's a tty
+ Tty bool
+}
+
+// HealthcheckResult stores information about a single run of a healthcheck probe
+type HealthcheckResult struct {
+ Start time.Time // Start is the time this check started
+ End time.Time // End is the time this check ended
+ ExitCode int // ExitCode meanings: 0=healthy, 1=unhealthy, 2=reserved (considered unhealthy), else=error running probe
+ Output string // Output from last check
+}
+
+// Health states
+const (
+ NoHealthcheck = "none" // Indicates there is no healthcheck
+ Starting = "starting" // Starting indicates that the container is not yet ready
+ Healthy = "healthy" // Healthy indicates that the container is running correctly
+ Unhealthy = "unhealthy" // Unhealthy indicates that the container has a problem
+)
+
+// Health stores information about the container's healthcheck results
+type Health struct {
+ Status string // Status is one of Starting, Healthy or Unhealthy
+ FailingStreak int // FailingStreak is the number of consecutive failures
+ Log []*HealthcheckResult // Log contains the last few results (oldest first)
+}
+
+// ContainerState stores container's running state
+// it's part of ContainerJSONBase and will return by "inspect" command
+type ContainerState struct {
+ Status string // String representation of the container state. Can be one of "created", "running", "paused", "restarting", "removing", "exited", or "dead"
+ Running bool
+ Paused bool
+ Restarting bool
+ OOMKilled bool
+ Dead bool
+ Pid int
+ ExitCode int
+ Error string
+ StartedAt string
+ FinishedAt string
+ Health *Health `json:",omitempty"`
+}
+
+// ContainerNode stores information about the node that a container
+// is running on. It's only available in Docker Swarm
+type ContainerNode struct {
+ ID string
+ IPAddress string `json:"IP"`
+ Addr string
+ Name string
+ Cpus int
+ Memory int64
+ Labels map[string]string
+}
+
+// ContainerJSONBase contains response of Engine API:
+// GET "/containers/{name:.*}/json"
+type ContainerJSONBase struct {
+ ID string `json:"Id"`
+ Created string
+ Path string
+ Args []string
+ State *ContainerState
+ Image string
+ ResolvConfPath string
+ HostnamePath string
+ HostsPath string
+ LogPath string
+ Node *ContainerNode `json:",omitempty"`
+ Name string
+ RestartCount int
+ Driver string
+ Platform string
+ MountLabel string
+ ProcessLabel string
+ AppArmorProfile string
+ ExecIDs []string
+ HostConfig *container.HostConfig
+ GraphDriver GraphDriverData
+ SizeRw *int64 `json:",omitempty"`
+ SizeRootFs *int64 `json:",omitempty"`
+}
+
+// ContainerJSON is newly used struct along with MountPoint
+type ContainerJSON struct {
+ *ContainerJSONBase
+ Mounts []MountPoint
+ Config *container.Config
+ NetworkSettings *NetworkSettings
+}
+
+// NetworkSettings exposes the network settings in the api
+type NetworkSettings struct {
+ NetworkSettingsBase
+ DefaultNetworkSettings
+ Networks map[string]*network.EndpointSettings
+}
+
+// SummaryNetworkSettings provides a summary of container's networks
+// in /containers/json
+type SummaryNetworkSettings struct {
+ Networks map[string]*network.EndpointSettings
+}
+
+// NetworkSettingsBase holds basic information about networks
+type NetworkSettingsBase struct {
+ Bridge string // Bridge is the Bridge name the network uses(e.g. `docker0`)
+ SandboxID string // SandboxID uniquely represents a container's network stack
+ HairpinMode bool // HairpinMode specifies if hairpin NAT should be enabled on the virtual interface
+ LinkLocalIPv6Address string // LinkLocalIPv6Address is an IPv6 unicast address using the link-local prefix
+ LinkLocalIPv6PrefixLen int // LinkLocalIPv6PrefixLen is the prefix length of an IPv6 unicast address
+ Ports nat.PortMap // Ports is a collection of PortBinding indexed by Port
+ SandboxKey string // SandboxKey identifies the sandbox
+ SecondaryIPAddresses []network.Address
+ SecondaryIPv6Addresses []network.Address
+}
+
+// DefaultNetworkSettings holds network information
+// during the 2 release deprecation period.
+// It will be removed in Docker 1.11.
+type DefaultNetworkSettings struct {
+ EndpointID string // EndpointID uniquely represents a service endpoint in a Sandbox
+ Gateway string // Gateway holds the gateway address for the network
+ GlobalIPv6Address string // GlobalIPv6Address holds network's global IPv6 address
+ GlobalIPv6PrefixLen int // GlobalIPv6PrefixLen represents mask length of network's global IPv6 address
+ IPAddress string // IPAddress holds the IPv4 address for the network
+ IPPrefixLen int // IPPrefixLen represents mask length of network's IPv4 address
+ IPv6Gateway string // IPv6Gateway holds gateway address specific for IPv6
+ MacAddress string // MacAddress holds the MAC address for the network
+}
+
+// MountPoint represents a mount point configuration inside the container.
+// This is used for reporting the mountpoints in use by a container.
+type MountPoint struct {
+ Type mount.Type `json:",omitempty"`
+ Name string `json:",omitempty"`
+ Source string
+ Destination string
+ Driver string `json:",omitempty"`
+ Mode string
+ RW bool
+ Propagation mount.Propagation
+}
+
+// NetworkResource is the body of the "get network" http response message
+type NetworkResource struct {
+ Name string // Name is the requested name of the network
+ ID string `json:"Id"` // ID uniquely identifies a network on a single machine
+ Created time.Time // Created is the time the network created
+ Scope string // Scope describes the level at which the network exists (e.g. `swarm` for cluster-wide or `local` for machine level)
+ Driver string // Driver is the Driver name used to create the network (e.g. `bridge`, `overlay`)
+ EnableIPv6 bool // EnableIPv6 represents whether to enable IPv6
+ IPAM network.IPAM // IPAM is the network's IP Address Management
+ Internal bool // Internal represents if the network is used internal only
+ Attachable bool // Attachable represents if the global scope is manually attachable by regular containers from workers in swarm mode.
+ Ingress bool // Ingress indicates the network is providing the routing-mesh for the swarm cluster.
+ ConfigFrom network.ConfigReference // ConfigFrom specifies the source which will provide the configuration for this network.
+ ConfigOnly bool // ConfigOnly networks are place-holder networks for network configurations to be used by other networks. ConfigOnly networks cannot be used directly to run containers or services.
+ Containers map[string]EndpointResource // Containers contains endpoints belonging to the network
+ Options map[string]string // Options holds the network specific options to use for when creating the network
+ Labels map[string]string // Labels holds metadata specific to the network being created
+ Peers []network.PeerInfo `json:",omitempty"` // List of peer nodes for an overlay network
+ Services map[string]network.ServiceInfo `json:",omitempty"`
+}
+
+// EndpointResource contains network resources allocated and used for a container in a network
+type EndpointResource struct {
+ Name string
+ EndpointID string
+ MacAddress string
+ IPv4Address string
+ IPv6Address string
+}
+
+// NetworkCreate is the expected body of the "create network" http request message
+type NetworkCreate struct {
+ // Check for networks with duplicate names.
+ // Network is primarily keyed based on a random ID and not on the name.
+ // Network name is strictly a user-friendly alias to the network
+ // which is uniquely identified using ID.
+ // And there is no guaranteed way to check for duplicates.
+ // Option CheckDuplicate is there to provide a best effort checking of any networks
+ // which has the same name but it is not guaranteed to catch all name collisions.
+ CheckDuplicate bool
+ Driver string
+ Scope string
+ EnableIPv6 bool
+ IPAM *network.IPAM
+ Internal bool
+ Attachable bool
+ Ingress bool
+ ConfigOnly bool
+ ConfigFrom *network.ConfigReference
+ Options map[string]string
+ Labels map[string]string
+}
+
+// NetworkCreateRequest is the request message sent to the server for network create call.
+type NetworkCreateRequest struct {
+ NetworkCreate
+ Name string
+}
+
+// NetworkCreateResponse is the response message sent by the server for network create call
+type NetworkCreateResponse struct {
+ ID string `json:"Id"`
+ Warning string
+}
+
+// NetworkConnect represents the data to be used to connect a container to the network
+type NetworkConnect struct {
+ Container string
+ EndpointConfig *network.EndpointSettings `json:",omitempty"`
+}
+
+// NetworkDisconnect represents the data to be used to disconnect a container from the network
+type NetworkDisconnect struct {
+ Container string
+ Force bool
+}
+
+// NetworkInspectOptions holds parameters to inspect network
+type NetworkInspectOptions struct {
+ Scope string
+ Verbose bool
+}
+
+// Checkpoint represents the details of a checkpoint
+type Checkpoint struct {
+ Name string // Name is the name of the checkpoint
+}
+
+// Runtime describes an OCI runtime
+type Runtime struct {
+ Path string `json:"path"`
+ Args []string `json:"runtimeArgs,omitempty"`
+}
+
+// DiskUsage contains response of Engine API:
+// GET "/system/df"
+type DiskUsage struct {
+ LayersSize int64
+ Images []*ImageSummary
+ Containers []*Container
+ Volumes []*Volume
+ BuilderSize int64
+}
+
+// ContainersPruneReport contains the response for Engine API:
+// POST "/containers/prune"
+type ContainersPruneReport struct {
+ ContainersDeleted []string
+ SpaceReclaimed uint64
+}
+
+// VolumesPruneReport contains the response for Engine API:
+// POST "/volumes/prune"
+type VolumesPruneReport struct {
+ VolumesDeleted []string
+ SpaceReclaimed uint64
+}
+
+// ImagesPruneReport contains the response for Engine API:
+// POST "/images/prune"
+type ImagesPruneReport struct {
+ ImagesDeleted []ImageDeleteResponseItem
+ SpaceReclaimed uint64
+}
+
+// BuildCachePruneReport contains the response for Engine API:
+// POST "/build/prune"
+type BuildCachePruneReport struct {
+ SpaceReclaimed uint64
+}
+
+// NetworksPruneReport contains the response for Engine API:
+// POST "/networks/prune"
+type NetworksPruneReport struct {
+ NetworksDeleted []string
+}
+
+// SecretCreateResponse contains the information returned to a client
+// on the creation of a new secret.
+type SecretCreateResponse struct {
+ // ID is the id of the created secret.
+ ID string
+}
+
+// SecretListOptions holds parameters to list secrets
+type SecretListOptions struct {
+ Filters filters.Args
+}
+
+// ConfigCreateResponse contains the information returned to a client
+// on the creation of a new config.
+type ConfigCreateResponse struct {
+ // ID is the id of the created config.
+ ID string
+}
+
+// ConfigListOptions holds parameters to list configs
+type ConfigListOptions struct {
+ Filters filters.Args
+}
+
+// PushResult contains the tag, manifest digest, and manifest size from the
+// push. It's used to signal this information to the trust code in the client
+// so it can sign the manifest if necessary.
+type PushResult struct {
+ Tag string
+ Digest string
+ Size int
+}
+
+// BuildResult contains the image id of a successful build
+type BuildResult struct {
+ ID string
+}
diff --git a/unum/vendor/github.com/docker/docker/api/types/versions/README.md b/unum/vendor/github.com/docker/docker/api/types/versions/README.md
new file mode 100644
index 0000000..1ef911e
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/api/types/versions/README.md
@@ -0,0 +1,14 @@
+# Legacy API type versions
+
+This package includes types for legacy API versions. The stable version of the API types live in `api/types/*.go`.
+
+Consider moving a type here when you need to keep backwards compatibility in the API. This legacy types are organized by the latest API version they appear in. For instance, types in the `v1p19` package are valid for API versions below or equal `1.19`. Types in the `v1p20` package are valid for the API version `1.20`, since the versions below that will use the legacy types in `v1p19`.
+
+## Package name conventions
+
+The package name convention is to use `v` as a prefix for the version number and `p`(patch) as a separator. We use this nomenclature due to a few restrictions in the Go package name convention:
+
+1. We cannot use `.` because it's interpreted by the language, think of `v1.20.CallFunction`.
+2. We cannot use `_` because golint complains about it. The code is actually valid, but it looks probably more weird: `v1_20.CallFunction`.
+
+For instance, if you want to modify a type that was available in the version `1.21` of the API but it will have different fields in the version `1.22`, you want to create a new package under `api/types/versions/v1p21`.
diff --git a/unum/vendor/github.com/docker/docker/api/types/versions/compare.go b/unum/vendor/github.com/docker/docker/api/types/versions/compare.go
new file mode 100644
index 0000000..611d4fe
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/api/types/versions/compare.go
@@ -0,0 +1,62 @@
+package versions
+
+import (
+ "strconv"
+ "strings"
+)
+
+// compare compares two version strings
+// returns -1 if v1 < v2, 1 if v1 > v2, 0 otherwise.
+func compare(v1, v2 string) int {
+ var (
+ currTab = strings.Split(v1, ".")
+ otherTab = strings.Split(v2, ".")
+ )
+
+ max := len(currTab)
+ if len(otherTab) > max {
+ max = len(otherTab)
+ }
+ for i := 0; i < max; i++ {
+ var currInt, otherInt int
+
+ if len(currTab) > i {
+ currInt, _ = strconv.Atoi(currTab[i])
+ }
+ if len(otherTab) > i {
+ otherInt, _ = strconv.Atoi(otherTab[i])
+ }
+ if currInt > otherInt {
+ return 1
+ }
+ if otherInt > currInt {
+ return -1
+ }
+ }
+ return 0
+}
+
+// LessThan checks if a version is less than another
+func LessThan(v, other string) bool {
+ return compare(v, other) == -1
+}
+
+// LessThanOrEqualTo checks if a version is less than or equal to another
+func LessThanOrEqualTo(v, other string) bool {
+ return compare(v, other) <= 0
+}
+
+// GreaterThan checks if a version is greater than another
+func GreaterThan(v, other string) bool {
+ return compare(v, other) == 1
+}
+
+// GreaterThanOrEqualTo checks if a version is greater than or equal to another
+func GreaterThanOrEqualTo(v, other string) bool {
+ return compare(v, other) >= 0
+}
+
+// Equal checks if a version is equal to another
+func Equal(v, other string) bool {
+ return compare(v, other) == 0
+}
diff --git a/unum/vendor/github.com/docker/docker/api/types/volume.go b/unum/vendor/github.com/docker/docker/api/types/volume.go
new file mode 100644
index 0000000..b5ee96a
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/api/types/volume.go
@@ -0,0 +1,69 @@
+package types
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+// Volume volume
+// swagger:model Volume
+type Volume struct {
+
+ // Date/Time the volume was created.
+ CreatedAt string `json:"CreatedAt,omitempty"`
+
+ // Name of the volume driver used by the volume.
+ // Required: true
+ Driver string `json:"Driver"`
+
+ // User-defined key/value metadata.
+ // Required: true
+ Labels map[string]string `json:"Labels"`
+
+ // Mount path of the volume on the host.
+ // Required: true
+ Mountpoint string `json:"Mountpoint"`
+
+ // Name of the volume.
+ // Required: true
+ Name string `json:"Name"`
+
+ // The driver specific options used when creating the volume.
+ // Required: true
+ Options map[string]string `json:"Options"`
+
+ // The level at which the volume exists. Either `global` for cluster-wide, or `local` for machine level.
+ // Required: true
+ Scope string `json:"Scope"`
+
+ // Low-level details about the volume, provided by the volume driver.
+ // Details are returned as a map with key/value pairs:
+ // `{"key":"value","key2":"value2"}`.
+ //
+ // The `Status` field is optional, and is omitted if the volume driver
+ // does not support this feature.
+ //
+ Status map[string]interface{} `json:"Status,omitempty"`
+
+ // usage data
+ UsageData *VolumeUsageData `json:"UsageData,omitempty"`
+}
+
+// VolumeUsageData Usage details about the volume. This information is used by the
+// `GET /system/df` endpoint, and omitted in other endpoints.
+//
+// swagger:model VolumeUsageData
+type VolumeUsageData struct {
+
+ // The number of containers referencing this volume. This field
+ // is set to `-1` if the reference-count is not available.
+ //
+ // Required: true
+ RefCount int64 `json:"RefCount"`
+
+ // Amount of disk space used by the volume (in bytes). This information
+ // is only available for volumes created with the `"local"` volume
+ // driver. For volumes created with other volume drivers, this field
+ // is set to `-1` ("not available")
+ //
+ // Required: true
+ Size int64 `json:"Size"`
+}
diff --git a/unum/vendor/github.com/docker/docker/api/types/volume/volumes_create.go b/unum/vendor/github.com/docker/docker/api/types/volume/volumes_create.go
new file mode 100644
index 0000000..9f70e43
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/api/types/volume/volumes_create.go
@@ -0,0 +1,29 @@
+package volume
+
+// ----------------------------------------------------------------------------
+// DO NOT EDIT THIS FILE
+// This file was generated by `swagger generate operation`
+//
+// See hack/generate-swagger-api.sh
+// ----------------------------------------------------------------------------
+
+// VolumesCreateBody volumes create body
+// swagger:model VolumesCreateBody
+type VolumesCreateBody struct {
+
+ // Name of the volume driver to use.
+ // Required: true
+ Driver string `json:"Driver"`
+
+ // A mapping of driver options and values. These options are passed directly to the driver and are driver specific.
+ // Required: true
+ DriverOpts map[string]string `json:"DriverOpts"`
+
+ // User-defined key/value metadata.
+ // Required: true
+ Labels map[string]string `json:"Labels"`
+
+ // The new volume's name. If not specified, Docker generates a name.
+ // Required: true
+ Name string `json:"Name"`
+}
diff --git a/unum/vendor/github.com/docker/docker/api/types/volume/volumes_list.go b/unum/vendor/github.com/docker/docker/api/types/volume/volumes_list.go
new file mode 100644
index 0000000..833dad9
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/api/types/volume/volumes_list.go
@@ -0,0 +1,23 @@
+package volume
+
+// ----------------------------------------------------------------------------
+// DO NOT EDIT THIS FILE
+// This file was generated by `swagger generate operation`
+//
+// See hack/generate-swagger-api.sh
+// ----------------------------------------------------------------------------
+
+import "github.com/docker/docker/api/types"
+
+// VolumesListOKBody volumes list o k body
+// swagger:model VolumesListOKBody
+type VolumesListOKBody struct {
+
+ // List of volumes
+ // Required: true
+ Volumes []*types.Volume `json:"Volumes"`
+
+ // Warnings that occurred when fetching the list of volumes
+ // Required: true
+ Warnings []string `json:"Warnings"`
+}
diff --git a/unum/vendor/github.com/docker/docker/client/README.md b/unum/vendor/github.com/docker/docker/client/README.md
new file mode 100644
index 0000000..059dfb3
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/client/README.md
@@ -0,0 +1,35 @@
+# Go client for the Docker Engine API
+
+The `docker` command uses this package to communicate with the daemon. It can also be used by your own Go applications to do anything the command-line interface does – running containers, pulling images, managing swarms, etc.
+
+For example, to list running containers (the equivalent of `docker ps`):
+
+```go
+package main
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/docker/docker/api/types"
+ "github.com/docker/docker/client"
+)
+
+func main() {
+ cli, err := client.NewEnvClient()
+ if err != nil {
+ panic(err)
+ }
+
+ containers, err := cli.ContainerList(context.Background(), types.ContainerListOptions{})
+ if err != nil {
+ panic(err)
+ }
+
+ for _, container := range containers {
+ fmt.Printf("%s %s\n", container.ID[:10], container.Image)
+ }
+}
+```
+
+[Full documentation is available on GoDoc.](https://godoc.org/github.com/docker/docker/client)
diff --git a/unum/vendor/github.com/docker/docker/client/build_prune.go b/unum/vendor/github.com/docker/docker/client/build_prune.go
new file mode 100644
index 0000000..ccab115
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/client/build_prune.go
@@ -0,0 +1,30 @@
+package client
+
+import (
+ "encoding/json"
+ "fmt"
+
+ "github.com/docker/docker/api/types"
+ "golang.org/x/net/context"
+)
+
+// BuildCachePrune requests the daemon to delete unused cache data
+func (cli *Client) BuildCachePrune(ctx context.Context) (*types.BuildCachePruneReport, error) {
+ if err := cli.NewVersionError("1.31", "build prune"); err != nil {
+ return nil, err
+ }
+
+ report := types.BuildCachePruneReport{}
+
+ serverResp, err := cli.post(ctx, "/build/prune", nil, nil, nil)
+ if err != nil {
+ return nil, err
+ }
+ defer ensureReaderClosed(serverResp)
+
+ if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil {
+ return nil, fmt.Errorf("Error retrieving disk usage: %v", err)
+ }
+
+ return &report, nil
+}
diff --git a/unum/vendor/github.com/docker/docker/client/checkpoint_create.go b/unum/vendor/github.com/docker/docker/client/checkpoint_create.go
new file mode 100644
index 0000000..0effe49
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/client/checkpoint_create.go
@@ -0,0 +1,13 @@
+package client
+
+import (
+ "github.com/docker/docker/api/types"
+ "golang.org/x/net/context"
+)
+
+// CheckpointCreate creates a checkpoint from the given container with the given name
+func (cli *Client) CheckpointCreate(ctx context.Context, container string, options types.CheckpointCreateOptions) error {
+ resp, err := cli.post(ctx, "/containers/"+container+"/checkpoints", nil, options, nil)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/unum/vendor/github.com/docker/docker/client/checkpoint_delete.go b/unum/vendor/github.com/docker/docker/client/checkpoint_delete.go
new file mode 100644
index 0000000..e6e7558
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/client/checkpoint_delete.go
@@ -0,0 +1,20 @@
+package client
+
+import (
+ "net/url"
+
+ "github.com/docker/docker/api/types"
+ "golang.org/x/net/context"
+)
+
+// CheckpointDelete deletes the checkpoint with the given name from the given container
+func (cli *Client) CheckpointDelete(ctx context.Context, containerID string, options types.CheckpointDeleteOptions) error {
+ query := url.Values{}
+ if options.CheckpointDir != "" {
+ query.Set("dir", options.CheckpointDir)
+ }
+
+ resp, err := cli.delete(ctx, "/containers/"+containerID+"/checkpoints/"+options.CheckpointID, query, nil)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/unum/vendor/github.com/docker/docker/client/checkpoint_list.go b/unum/vendor/github.com/docker/docker/client/checkpoint_list.go
new file mode 100644
index 0000000..ffe44bc
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/client/checkpoint_list.go
@@ -0,0 +1,32 @@
+package client
+
+import (
+ "encoding/json"
+ "net/http"
+ "net/url"
+
+ "github.com/docker/docker/api/types"
+ "golang.org/x/net/context"
+)
+
+// CheckpointList returns the checkpoints of the given container in the docker host
+func (cli *Client) CheckpointList(ctx context.Context, container string, options types.CheckpointListOptions) ([]types.Checkpoint, error) {
+ var checkpoints []types.Checkpoint
+
+ query := url.Values{}
+ if options.CheckpointDir != "" {
+ query.Set("dir", options.CheckpointDir)
+ }
+
+ resp, err := cli.get(ctx, "/containers/"+container+"/checkpoints", query, nil)
+ if err != nil {
+ if resp.statusCode == http.StatusNotFound {
+ return checkpoints, containerNotFoundError{container}
+ }
+ return checkpoints, err
+ }
+
+ err = json.NewDecoder(resp.body).Decode(&checkpoints)
+ ensureReaderClosed(resp)
+ return checkpoints, err
+}
diff --git a/unum/vendor/github.com/docker/docker/client/client.go b/unum/vendor/github.com/docker/docker/client/client.go
new file mode 100644
index 0000000..f7a8c07
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/client/client.go
@@ -0,0 +1,315 @@
+/*
+Package client is a Go client for the Docker Engine API.
+
+The "docker" command uses this package to communicate with the daemon. It can also
+be used by your own Go applications to do anything the command-line interface does
+- running containers, pulling images, managing swarms, etc.
+
+For more information about the Engine API, see the documentation:
+https://docs.docker.com/engine/reference/api/
+
+Usage
+
+You use the library by creating a client object and calling methods on it. The
+client can be created either from environment variables with NewEnvClient, or
+configured manually with NewClient.
+
+For example, to list running containers (the equivalent of "docker ps"):
+
+ package main
+
+ import (
+ "context"
+ "fmt"
+
+ "github.com/docker/docker/api/types"
+ "github.com/docker/docker/client"
+ )
+
+ func main() {
+ cli, err := client.NewEnvClient()
+ if err != nil {
+ panic(err)
+ }
+
+ containers, err := cli.ContainerList(context.Background(), types.ContainerListOptions{})
+ if err != nil {
+ panic(err)
+ }
+
+ for _, container := range containers {
+ fmt.Printf("%s %s\n", container.ID[:10], container.Image)
+ }
+ }
+
+*/
+package client
+
+import (
+ "errors"
+ "fmt"
+ "net/http"
+ "net/url"
+ "os"
+ "path"
+ "path/filepath"
+ "strings"
+
+ "github.com/docker/docker/api"
+ "github.com/docker/docker/api/types"
+ "github.com/docker/docker/api/types/versions"
+ "github.com/docker/go-connections/sockets"
+ "github.com/docker/go-connections/tlsconfig"
+ "golang.org/x/net/context"
+)
+
+// ErrRedirect is the error returned by checkRedirect when the request is non-GET.
+var ErrRedirect = errors.New("unexpected redirect in response")
+
+// Client is the API client that performs all operations
+// against a docker server.
+type Client struct {
+ // scheme sets the scheme for the client
+ scheme string
+ // host holds the server address to connect to
+ host string
+ // proto holds the client protocol i.e. unix.
+ proto string
+ // addr holds the client address.
+ addr string
+ // basePath holds the path to prepend to the requests.
+ basePath string
+ // client used to send and receive http requests.
+ client *http.Client
+ // version of the server to talk to.
+ version string
+ // custom http headers configured by users.
+ customHTTPHeaders map[string]string
+ // manualOverride is set to true when the version was set by users.
+ manualOverride bool
+}
+
+// CheckRedirect specifies the policy for dealing with redirect responses:
+// If the request is non-GET return `ErrRedirect`. Otherwise use the last response.
+//
+// Go 1.8 changes behavior for HTTP redirects (specifically 301, 307, and 308) in the client .
+// The Docker client (and by extension docker API client) can be made to to send a request
+// like POST /containers//start where what would normally be in the name section of the URL is empty.
+// This triggers an HTTP 301 from the daemon.
+// In go 1.8 this 301 will be converted to a GET request, and ends up getting a 404 from the daemon.
+// This behavior change manifests in the client in that before the 301 was not followed and
+// the client did not generate an error, but now results in a message like Error response from daemon: page not found.
+func CheckRedirect(req *http.Request, via []*http.Request) error {
+ if via[0].Method == http.MethodGet {
+ return http.ErrUseLastResponse
+ }
+ return ErrRedirect
+}
+
+// NewEnvClient initializes a new API client based on environment variables.
+// Use DOCKER_HOST to set the url to the docker server.
+// Use DOCKER_API_VERSION to set the version of the API to reach, leave empty for latest.
+// Use DOCKER_CERT_PATH to load the TLS certificates from.
+// Use DOCKER_TLS_VERIFY to enable or disable TLS verification, off by default.
+func NewEnvClient() (*Client, error) {
+ var client *http.Client
+ if dockerCertPath := os.Getenv("DOCKER_CERT_PATH"); dockerCertPath != "" {
+ options := tlsconfig.Options{
+ CAFile: filepath.Join(dockerCertPath, "ca.pem"),
+ CertFile: filepath.Join(dockerCertPath, "cert.pem"),
+ KeyFile: filepath.Join(dockerCertPath, "key.pem"),
+ InsecureSkipVerify: os.Getenv("DOCKER_TLS_VERIFY") == "",
+ }
+ tlsc, err := tlsconfig.Client(options)
+ if err != nil {
+ return nil, err
+ }
+
+ client = &http.Client{
+ Transport: &http.Transport{
+ TLSClientConfig: tlsc,
+ },
+ CheckRedirect: CheckRedirect,
+ }
+ }
+
+ host := os.Getenv("DOCKER_HOST")
+ if host == "" {
+ host = DefaultDockerHost
+ }
+ version := os.Getenv("DOCKER_API_VERSION")
+ if version == "" {
+ version = api.DefaultVersion
+ }
+
+ cli, err := NewClient(host, version, client, nil)
+ if err != nil {
+ return cli, err
+ }
+ if os.Getenv("DOCKER_API_VERSION") != "" {
+ cli.manualOverride = true
+ }
+ return cli, nil
+}
+
+// NewClient initializes a new API client for the given host and API version.
+// It uses the given http client as transport.
+// It also initializes the custom http headers to add to each request.
+//
+// It won't send any version information if the version number is empty. It is
+// highly recommended that you set a version or your client may break if the
+// server is upgraded.
+func NewClient(host string, version string, client *http.Client, httpHeaders map[string]string) (*Client, error) {
+ proto, addr, basePath, err := ParseHost(host)
+ if err != nil {
+ return nil, err
+ }
+
+ if client != nil {
+ if _, ok := client.Transport.(http.RoundTripper); !ok {
+ return nil, fmt.Errorf("unable to verify TLS configuration, invalid transport %v", client.Transport)
+ }
+ } else {
+ transport := new(http.Transport)
+ sockets.ConfigureTransport(transport, proto, addr)
+ client = &http.Client{
+ Transport: transport,
+ CheckRedirect: CheckRedirect,
+ }
+ }
+
+ scheme := "http"
+ tlsConfig := resolveTLSConfig(client.Transport)
+ if tlsConfig != nil {
+ // TODO(stevvooe): This isn't really the right way to write clients in Go.
+ // `NewClient` should probably only take an `*http.Client` and work from there.
+ // Unfortunately, the model of having a host-ish/url-thingy as the connection
+ // string has us confusing protocol and transport layers. We continue doing
+ // this to avoid breaking existing clients but this should be addressed.
+ scheme = "https"
+ }
+
+ return &Client{
+ scheme: scheme,
+ host: host,
+ proto: proto,
+ addr: addr,
+ basePath: basePath,
+ client: client,
+ version: version,
+ customHTTPHeaders: httpHeaders,
+ }, nil
+}
+
+// Close ensures that transport.Client is closed
+// especially needed while using NewClient with *http.Client = nil
+// for example
+// client.NewClient("unix:///var/run/docker.sock", nil, "v1.18", map[string]string{"User-Agent": "engine-api-cli-1.0"})
+func (cli *Client) Close() error {
+
+ if t, ok := cli.client.Transport.(*http.Transport); ok {
+ t.CloseIdleConnections()
+ }
+
+ return nil
+}
+
+// getAPIPath returns the versioned request path to call the api.
+// It appends the query parameters to the path if they are not empty.
+func (cli *Client) getAPIPath(p string, query url.Values) string {
+ var apiPath string
+ if cli.version != "" {
+ v := strings.TrimPrefix(cli.version, "v")
+ apiPath = path.Join(cli.basePath, "/v"+v+p)
+ } else {
+ apiPath = path.Join(cli.basePath, p)
+ }
+
+ u := &url.URL{
+ Path: apiPath,
+ }
+ if len(query) > 0 {
+ u.RawQuery = query.Encode()
+ }
+ return u.String()
+}
+
+// ClientVersion returns the version string associated with this
+// instance of the Client. Note that this value can be changed
+// via the DOCKER_API_VERSION env var.
+// This operation doesn't acquire a mutex.
+func (cli *Client) ClientVersion() string {
+ return cli.version
+}
+
+// NegotiateAPIVersion updates the version string associated with this
+// instance of the Client to match the latest version the server supports
+func (cli *Client) NegotiateAPIVersion(ctx context.Context) {
+ ping, _ := cli.Ping(ctx)
+ cli.NegotiateAPIVersionPing(ping)
+}
+
+// NegotiateAPIVersionPing updates the version string associated with this
+// instance of the Client to match the latest version the server supports
+func (cli *Client) NegotiateAPIVersionPing(p types.Ping) {
+ if cli.manualOverride {
+ return
+ }
+
+ // try the latest version before versioning headers existed
+ if p.APIVersion == "" {
+ p.APIVersion = "1.24"
+ }
+
+ // if the client is not initialized with a version, start with the latest supported version
+ if cli.version == "" {
+ cli.version = api.DefaultVersion
+ }
+
+ // if server version is lower than the maximum version supported by the Client, downgrade
+ if versions.LessThan(p.APIVersion, api.DefaultVersion) {
+ cli.version = p.APIVersion
+ }
+}
+
+// DaemonHost returns the host associated with this instance of the Client.
+// This operation doesn't acquire a mutex.
+func (cli *Client) DaemonHost() string {
+ return cli.host
+}
+
+// ParseHost verifies that the given host strings is valid.
+func ParseHost(host string) (string, string, string, error) {
+ protoAddrParts := strings.SplitN(host, "://", 2)
+ if len(protoAddrParts) == 1 {
+ return "", "", "", fmt.Errorf("unable to parse docker host `%s`", host)
+ }
+
+ var basePath string
+ proto, addr := protoAddrParts[0], protoAddrParts[1]
+ if proto == "tcp" {
+ parsed, err := url.Parse("tcp://" + addr)
+ if err != nil {
+ return "", "", "", err
+ }
+ addr = parsed.Host
+ basePath = parsed.Path
+ }
+ return proto, addr, basePath, nil
+}
+
+// CustomHTTPHeaders returns the custom http headers associated with this
+// instance of the Client. This operation doesn't acquire a mutex.
+func (cli *Client) CustomHTTPHeaders() map[string]string {
+ m := make(map[string]string)
+ for k, v := range cli.customHTTPHeaders {
+ m[k] = v
+ }
+ return m
+}
+
+// SetCustomHTTPHeaders updates the custom http headers associated with this
+// instance of the Client. This operation doesn't acquire a mutex.
+func (cli *Client) SetCustomHTTPHeaders(headers map[string]string) {
+ cli.customHTTPHeaders = headers
+}
diff --git a/unum/vendor/github.com/docker/docker/client/client_unix.go b/unum/vendor/github.com/docker/docker/client/client_unix.go
new file mode 100644
index 0000000..89de892
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/client/client_unix.go
@@ -0,0 +1,6 @@
+// +build linux freebsd solaris openbsd darwin
+
+package client
+
+// DefaultDockerHost defines os specific default if DOCKER_HOST is unset
+const DefaultDockerHost = "unix:///var/run/docker.sock"
diff --git a/unum/vendor/github.com/docker/docker/client/client_windows.go b/unum/vendor/github.com/docker/docker/client/client_windows.go
new file mode 100644
index 0000000..07c0c7a
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/client/client_windows.go
@@ -0,0 +1,4 @@
+package client
+
+// DefaultDockerHost defines os specific default if DOCKER_HOST is unset
+const DefaultDockerHost = "npipe:////./pipe/docker_engine"
diff --git a/unum/vendor/github.com/docker/docker/client/config_create.go b/unum/vendor/github.com/docker/docker/client/config_create.go
new file mode 100644
index 0000000..bc4a952
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/client/config_create.go
@@ -0,0 +1,25 @@
+package client
+
+import (
+ "encoding/json"
+
+ "github.com/docker/docker/api/types"
+ "github.com/docker/docker/api/types/swarm"
+ "golang.org/x/net/context"
+)
+
+// ConfigCreate creates a new Config.
+func (cli *Client) ConfigCreate(ctx context.Context, config swarm.ConfigSpec) (types.ConfigCreateResponse, error) {
+ var response types.ConfigCreateResponse
+ if err := cli.NewVersionError("1.30", "config create"); err != nil {
+ return response, err
+ }
+ resp, err := cli.post(ctx, "/configs/create", nil, config, nil)
+ if err != nil {
+ return response, err
+ }
+
+ err = json.NewDecoder(resp.body).Decode(&response)
+ ensureReaderClosed(resp)
+ return response, err
+}
diff --git a/unum/vendor/github.com/docker/docker/client/config_inspect.go b/unum/vendor/github.com/docker/docker/client/config_inspect.go
new file mode 100644
index 0000000..ebb6d63
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/client/config_inspect.go
@@ -0,0 +1,37 @@
+package client
+
+import (
+ "bytes"
+ "encoding/json"
+ "io/ioutil"
+ "net/http"
+
+ "github.com/docker/docker/api/types/swarm"
+ "golang.org/x/net/context"
+)
+
+// ConfigInspectWithRaw returns the config information with raw data
+func (cli *Client) ConfigInspectWithRaw(ctx context.Context, id string) (swarm.Config, []byte, error) {
+ if err := cli.NewVersionError("1.30", "config inspect"); err != nil {
+ return swarm.Config{}, nil, err
+ }
+ resp, err := cli.get(ctx, "/configs/"+id, nil, nil)
+ if err != nil {
+ if resp.statusCode == http.StatusNotFound {
+ return swarm.Config{}, nil, configNotFoundError{id}
+ }
+ return swarm.Config{}, nil, err
+ }
+ defer ensureReaderClosed(resp)
+
+ body, err := ioutil.ReadAll(resp.body)
+ if err != nil {
+ return swarm.Config{}, nil, err
+ }
+
+ var config swarm.Config
+ rdr := bytes.NewReader(body)
+ err = json.NewDecoder(rdr).Decode(&config)
+
+ return config, body, err
+}
diff --git a/unum/vendor/github.com/docker/docker/client/config_list.go b/unum/vendor/github.com/docker/docker/client/config_list.go
new file mode 100644
index 0000000..8483ca1
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/client/config_list.go
@@ -0,0 +1,38 @@
+package client
+
+import (
+ "encoding/json"
+ "net/url"
+
+ "github.com/docker/docker/api/types"
+ "github.com/docker/docker/api/types/filters"
+ "github.com/docker/docker/api/types/swarm"
+ "golang.org/x/net/context"
+)
+
+// ConfigList returns the list of configs.
+func (cli *Client) ConfigList(ctx context.Context, options types.ConfigListOptions) ([]swarm.Config, error) {
+ if err := cli.NewVersionError("1.30", "config list"); err != nil {
+ return nil, err
+ }
+ query := url.Values{}
+
+ if options.Filters.Len() > 0 {
+ filterJSON, err := filters.ToParam(options.Filters)
+ if err != nil {
+ return nil, err
+ }
+
+ query.Set("filters", filterJSON)
+ }
+
+ resp, err := cli.get(ctx, "/configs", query, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ var configs []swarm.Config
+ err = json.NewDecoder(resp.body).Decode(&configs)
+ ensureReaderClosed(resp)
+ return configs, err
+}
diff --git a/unum/vendor/github.com/docker/docker/client/config_remove.go b/unum/vendor/github.com/docker/docker/client/config_remove.go
new file mode 100644
index 0000000..726b5c8
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/client/config_remove.go
@@ -0,0 +1,13 @@
+package client
+
+import "golang.org/x/net/context"
+
+// ConfigRemove removes a Config.
+func (cli *Client) ConfigRemove(ctx context.Context, id string) error {
+ if err := cli.NewVersionError("1.30", "config remove"); err != nil {
+ return err
+ }
+ resp, err := cli.delete(ctx, "/configs/"+id, nil, nil)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/unum/vendor/github.com/docker/docker/client/config_update.go b/unum/vendor/github.com/docker/docker/client/config_update.go
new file mode 100644
index 0000000..823751b
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/client/config_update.go
@@ -0,0 +1,21 @@
+package client
+
+import (
+ "net/url"
+ "strconv"
+
+ "github.com/docker/docker/api/types/swarm"
+ "golang.org/x/net/context"
+)
+
+// ConfigUpdate attempts to update a Config
+func (cli *Client) ConfigUpdate(ctx context.Context, id string, version swarm.Version, config swarm.ConfigSpec) error {
+ if err := cli.NewVersionError("1.30", "config update"); err != nil {
+ return err
+ }
+ query := url.Values{}
+ query.Set("version", strconv.FormatUint(version.Index, 10))
+ resp, err := cli.post(ctx, "/configs/"+id+"/update", query, config, nil)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/unum/vendor/github.com/docker/docker/client/container_attach.go b/unum/vendor/github.com/docker/docker/client/container_attach.go
new file mode 100644
index 0000000..0fdf3ed
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/client/container_attach.go
@@ -0,0 +1,57 @@
+package client
+
+import (
+ "net/url"
+
+ "github.com/docker/docker/api/types"
+ "golang.org/x/net/context"
+)
+
+// ContainerAttach attaches a connection to a container in the server.
+// It returns a types.HijackedConnection with the hijacked connection
+// and the a reader to get output. It's up to the called to close
+// the hijacked connection by calling types.HijackedResponse.Close.
+//
+// The stream format on the response will be in one of two formats:
+//
+// If the container is using a TTY, there is only a single stream (stdout), and
+// data is copied directly from the container output stream, no extra
+// multiplexing or headers.
+//
+// If the container is *not* using a TTY, streams for stdout and stderr are
+// multiplexed.
+// The format of the multiplexed stream is as follows:
+//
+// [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4}[]byte{OUTPUT}
+//
+// STREAM_TYPE can be 1 for stdout and 2 for stderr
+//
+// SIZE1, SIZE2, SIZE3, and SIZE4 are four bytes of uint32 encoded as big endian.
+// This is the size of OUTPUT.
+//
+// You can use github.com/docker/docker/pkg/stdcopy.StdCopy to demultiplex this
+// stream.
+func (cli *Client) ContainerAttach(ctx context.Context, container string, options types.ContainerAttachOptions) (types.HijackedResponse, error) {
+ query := url.Values{}
+ if options.Stream {
+ query.Set("stream", "1")
+ }
+ if options.Stdin {
+ query.Set("stdin", "1")
+ }
+ if options.Stdout {
+ query.Set("stdout", "1")
+ }
+ if options.Stderr {
+ query.Set("stderr", "1")
+ }
+ if options.DetachKeys != "" {
+ query.Set("detachKeys", options.DetachKeys)
+ }
+ if options.Logs {
+ query.Set("logs", "1")
+ }
+
+ headers := map[string][]string{"Content-Type": {"text/plain"}}
+ return cli.postHijacked(ctx, "/containers/"+container+"/attach", query, nil, headers)
+}
diff --git a/unum/vendor/github.com/docker/docker/client/container_commit.go b/unum/vendor/github.com/docker/docker/client/container_commit.go
new file mode 100644
index 0000000..531d796
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/client/container_commit.go
@@ -0,0 +1,55 @@
+package client
+
+import (
+ "encoding/json"
+ "errors"
+ "net/url"
+
+ "github.com/docker/distribution/reference"
+ "github.com/docker/docker/api/types"
+ "golang.org/x/net/context"
+)
+
+// ContainerCommit applies changes into a container and creates a new tagged image.
+func (cli *Client) ContainerCommit(ctx context.Context, container string, options types.ContainerCommitOptions) (types.IDResponse, error) {
+ var repository, tag string
+ if options.Reference != "" {
+ ref, err := reference.ParseNormalizedNamed(options.Reference)
+ if err != nil {
+ return types.IDResponse{}, err
+ }
+
+ if _, isCanonical := ref.(reference.Canonical); isCanonical {
+ return types.IDResponse{}, errors.New("refusing to create a tag with a digest reference")
+ }
+ ref = reference.TagNameOnly(ref)
+
+ if tagged, ok := ref.(reference.Tagged); ok {
+ tag = tagged.Tag()
+ }
+ repository = reference.FamiliarName(ref)
+ }
+
+ query := url.Values{}
+ query.Set("container", container)
+ query.Set("repo", repository)
+ query.Set("tag", tag)
+ query.Set("comment", options.Comment)
+ query.Set("author", options.Author)
+ for _, change := range options.Changes {
+ query.Add("changes", change)
+ }
+ if options.Pause != true {
+ query.Set("pause", "0")
+ }
+
+ var response types.IDResponse
+ resp, err := cli.post(ctx, "/commit", query, options.Config, nil)
+ if err != nil {
+ return response, err
+ }
+
+ err = json.NewDecoder(resp.body).Decode(&response)
+ ensureReaderClosed(resp)
+ return response, err
+}
diff --git a/unum/vendor/github.com/docker/docker/client/container_copy.go b/unum/vendor/github.com/docker/docker/client/container_copy.go
new file mode 100644
index 0000000..30ba680
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/client/container_copy.go
@@ -0,0 +1,102 @@
+package client
+
+import (
+ "encoding/base64"
+ "encoding/json"
+ "fmt"
+ "io"
+ "net/http"
+ "net/url"
+ "path/filepath"
+ "strings"
+
+ "golang.org/x/net/context"
+
+ "github.com/docker/docker/api/types"
+)
+
+// ContainerStatPath returns Stat information about a path inside the container filesystem.
+func (cli *Client) ContainerStatPath(ctx context.Context, containerID, path string) (types.ContainerPathStat, error) {
+ query := url.Values{}
+ query.Set("path", filepath.ToSlash(path)) // Normalize the paths used in the API.
+
+ urlStr := "/containers/" + containerID + "/archive"
+ response, err := cli.head(ctx, urlStr, query, nil)
+ if err != nil {
+ return types.ContainerPathStat{}, err
+ }
+ defer ensureReaderClosed(response)
+ return getContainerPathStatFromHeader(response.header)
+}
+
+// CopyToContainer copies content into the container filesystem.
+// Note that `content` must be a Reader for a TAR
+func (cli *Client) CopyToContainer(ctx context.Context, container, path string, content io.Reader, options types.CopyToContainerOptions) error {
+ query := url.Values{}
+ query.Set("path", filepath.ToSlash(path)) // Normalize the paths used in the API.
+ // Do not allow for an existing directory to be overwritten by a non-directory and vice versa.
+ if !options.AllowOverwriteDirWithFile {
+ query.Set("noOverwriteDirNonDir", "true")
+ }
+
+ if options.CopyUIDGID {
+ query.Set("copyUIDGID", "true")
+ }
+
+ apiPath := "/containers/" + container + "/archive"
+
+ response, err := cli.putRaw(ctx, apiPath, query, content, nil)
+ if err != nil {
+ return err
+ }
+ defer ensureReaderClosed(response)
+
+ if response.statusCode != http.StatusOK {
+ return fmt.Errorf("unexpected status code from daemon: %d", response.statusCode)
+ }
+
+ return nil
+}
+
+// CopyFromContainer gets the content from the container and returns it as a Reader
+// to manipulate it in the host. It's up to the caller to close the reader.
+func (cli *Client) CopyFromContainer(ctx context.Context, container, srcPath string) (io.ReadCloser, types.ContainerPathStat, error) {
+ query := make(url.Values, 1)
+ query.Set("path", filepath.ToSlash(srcPath)) // Normalize the paths used in the API.
+
+ apiPath := "/containers/" + container + "/archive"
+ response, err := cli.get(ctx, apiPath, query, nil)
+ if err != nil {
+ return nil, types.ContainerPathStat{}, err
+ }
+
+ if response.statusCode != http.StatusOK {
+ return nil, types.ContainerPathStat{}, fmt.Errorf("unexpected status code from daemon: %d", response.statusCode)
+ }
+
+ // In order to get the copy behavior right, we need to know information
+ // about both the source and the destination. The response headers include
+ // stat info about the source that we can use in deciding exactly how to
+ // copy it locally. Along with the stat info about the local destination,
+ // we have everything we need to handle the multiple possibilities there
+ // can be when copying a file/dir from one location to another file/dir.
+ stat, err := getContainerPathStatFromHeader(response.header)
+ if err != nil {
+ return nil, stat, fmt.Errorf("unable to get resource stat from response: %s", err)
+ }
+ return response.body, stat, err
+}
+
+func getContainerPathStatFromHeader(header http.Header) (types.ContainerPathStat, error) {
+ var stat types.ContainerPathStat
+
+ encodedStat := header.Get("X-Docker-Container-Path-Stat")
+ statDecoder := base64.NewDecoder(base64.StdEncoding, strings.NewReader(encodedStat))
+
+ err := json.NewDecoder(statDecoder).Decode(&stat)
+ if err != nil {
+ err = fmt.Errorf("unable to decode container path stat header: %s", err)
+ }
+
+ return stat, err
+}
diff --git a/unum/vendor/github.com/docker/docker/client/container_create.go b/unum/vendor/github.com/docker/docker/client/container_create.go
new file mode 100644
index 0000000..6841b0b
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/client/container_create.go
@@ -0,0 +1,56 @@
+package client
+
+import (
+ "encoding/json"
+ "net/url"
+ "strings"
+
+ "github.com/docker/docker/api/types/container"
+ "github.com/docker/docker/api/types/network"
+ "github.com/docker/docker/api/types/versions"
+ "golang.org/x/net/context"
+)
+
+type configWrapper struct {
+ *container.Config
+ HostConfig *container.HostConfig
+ NetworkingConfig *network.NetworkingConfig
+}
+
+// ContainerCreate creates a new container based in the given configuration.
+// It can be associated with a name, but it's not mandatory.
+func (cli *Client) ContainerCreate(ctx context.Context, config *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig, containerName string) (container.ContainerCreateCreatedBody, error) {
+ var response container.ContainerCreateCreatedBody
+
+ if err := cli.NewVersionError("1.25", "stop timeout"); config != nil && config.StopTimeout != nil && err != nil {
+ return response, err
+ }
+
+ // When using API 1.24 and under, the client is responsible for removing the container
+ if hostConfig != nil && versions.LessThan(cli.ClientVersion(), "1.25") {
+ hostConfig.AutoRemove = false
+ }
+
+ query := url.Values{}
+ if containerName != "" {
+ query.Set("name", containerName)
+ }
+
+ body := configWrapper{
+ Config: config,
+ HostConfig: hostConfig,
+ NetworkingConfig: networkingConfig,
+ }
+
+ serverResp, err := cli.post(ctx, "/containers/create", query, body, nil)
+ if err != nil {
+ if serverResp.statusCode == 404 && strings.Contains(err.Error(), "No such image") {
+ return response, imageNotFoundError{config.Image}
+ }
+ return response, err
+ }
+
+ err = json.NewDecoder(serverResp.body).Decode(&response)
+ ensureReaderClosed(serverResp)
+ return response, err
+}
diff --git a/unum/vendor/github.com/docker/docker/client/container_diff.go b/unum/vendor/github.com/docker/docker/client/container_diff.go
new file mode 100644
index 0000000..884dc9f
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/client/container_diff.go
@@ -0,0 +1,23 @@
+package client
+
+import (
+ "encoding/json"
+ "net/url"
+
+ "github.com/docker/docker/api/types/container"
+ "golang.org/x/net/context"
+)
+
+// ContainerDiff shows differences in a container filesystem since it was started.
+func (cli *Client) ContainerDiff(ctx context.Context, containerID string) ([]container.ContainerChangeResponseItem, error) {
+ var changes []container.ContainerChangeResponseItem
+
+ serverResp, err := cli.get(ctx, "/containers/"+containerID+"/changes", url.Values{}, nil)
+ if err != nil {
+ return changes, err
+ }
+
+ err = json.NewDecoder(serverResp.body).Decode(&changes)
+ ensureReaderClosed(serverResp)
+ return changes, err
+}
diff --git a/unum/vendor/github.com/docker/docker/client/container_exec.go b/unum/vendor/github.com/docker/docker/client/container_exec.go
new file mode 100644
index 0000000..0665c54
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/client/container_exec.go
@@ -0,0 +1,54 @@
+package client
+
+import (
+ "encoding/json"
+
+ "github.com/docker/docker/api/types"
+ "golang.org/x/net/context"
+)
+
+// ContainerExecCreate creates a new exec configuration to run an exec process.
+func (cli *Client) ContainerExecCreate(ctx context.Context, container string, config types.ExecConfig) (types.IDResponse, error) {
+ var response types.IDResponse
+
+ if err := cli.NewVersionError("1.25", "env"); len(config.Env) != 0 && err != nil {
+ return response, err
+ }
+
+ resp, err := cli.post(ctx, "/containers/"+container+"/exec", nil, config, nil)
+ if err != nil {
+ return response, err
+ }
+ err = json.NewDecoder(resp.body).Decode(&response)
+ ensureReaderClosed(resp)
+ return response, err
+}
+
+// ContainerExecStart starts an exec process already created in the docker host.
+func (cli *Client) ContainerExecStart(ctx context.Context, execID string, config types.ExecStartCheck) error {
+ resp, err := cli.post(ctx, "/exec/"+execID+"/start", nil, config, nil)
+ ensureReaderClosed(resp)
+ return err
+}
+
+// ContainerExecAttach attaches a connection to an exec process in the server.
+// It returns a types.HijackedConnection with the hijacked connection
+// and the a reader to get output. It's up to the called to close
+// the hijacked connection by calling types.HijackedResponse.Close.
+func (cli *Client) ContainerExecAttach(ctx context.Context, execID string, config types.ExecConfig) (types.HijackedResponse, error) {
+ headers := map[string][]string{"Content-Type": {"application/json"}}
+ return cli.postHijacked(ctx, "/exec/"+execID+"/start", nil, config, headers)
+}
+
+// ContainerExecInspect returns information about a specific exec process on the docker host.
+func (cli *Client) ContainerExecInspect(ctx context.Context, execID string) (types.ContainerExecInspect, error) {
+ var response types.ContainerExecInspect
+ resp, err := cli.get(ctx, "/exec/"+execID+"/json", nil, nil)
+ if err != nil {
+ return response, err
+ }
+
+ err = json.NewDecoder(resp.body).Decode(&response)
+ ensureReaderClosed(resp)
+ return response, err
+}
diff --git a/unum/vendor/github.com/docker/docker/client/container_export.go b/unum/vendor/github.com/docker/docker/client/container_export.go
new file mode 100644
index 0000000..52194f3
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/client/container_export.go
@@ -0,0 +1,20 @@
+package client
+
+import (
+ "io"
+ "net/url"
+
+ "golang.org/x/net/context"
+)
+
+// ContainerExport retrieves the raw contents of a container
+// and returns them as an io.ReadCloser. It's up to the caller
+// to close the stream.
+func (cli *Client) ContainerExport(ctx context.Context, containerID string) (io.ReadCloser, error) {
+ serverResp, err := cli.get(ctx, "/containers/"+containerID+"/export", url.Values{}, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ return serverResp.body, nil
+}
diff --git a/unum/vendor/github.com/docker/docker/client/container_inspect.go b/unum/vendor/github.com/docker/docker/client/container_inspect.go
new file mode 100644
index 0000000..17f1809
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/client/container_inspect.go
@@ -0,0 +1,54 @@
+package client
+
+import (
+ "bytes"
+ "encoding/json"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+
+ "github.com/docker/docker/api/types"
+ "golang.org/x/net/context"
+)
+
+// ContainerInspect returns the container information.
+func (cli *Client) ContainerInspect(ctx context.Context, containerID string) (types.ContainerJSON, error) {
+ serverResp, err := cli.get(ctx, "/containers/"+containerID+"/json", nil, nil)
+ if err != nil {
+ if serverResp.statusCode == http.StatusNotFound {
+ return types.ContainerJSON{}, containerNotFoundError{containerID}
+ }
+ return types.ContainerJSON{}, err
+ }
+
+ var response types.ContainerJSON
+ err = json.NewDecoder(serverResp.body).Decode(&response)
+ ensureReaderClosed(serverResp)
+ return response, err
+}
+
+// ContainerInspectWithRaw returns the container information and its raw representation.
+func (cli *Client) ContainerInspectWithRaw(ctx context.Context, containerID string, getSize bool) (types.ContainerJSON, []byte, error) {
+ query := url.Values{}
+ if getSize {
+ query.Set("size", "1")
+ }
+ serverResp, err := cli.get(ctx, "/containers/"+containerID+"/json", query, nil)
+ if err != nil {
+ if serverResp.statusCode == http.StatusNotFound {
+ return types.ContainerJSON{}, nil, containerNotFoundError{containerID}
+ }
+ return types.ContainerJSON{}, nil, err
+ }
+ defer ensureReaderClosed(serverResp)
+
+ body, err := ioutil.ReadAll(serverResp.body)
+ if err != nil {
+ return types.ContainerJSON{}, nil, err
+ }
+
+ var response types.ContainerJSON
+ rdr := bytes.NewReader(body)
+ err = json.NewDecoder(rdr).Decode(&response)
+ return response, body, err
+}
diff --git a/unum/vendor/github.com/docker/docker/client/container_kill.go b/unum/vendor/github.com/docker/docker/client/container_kill.go
new file mode 100644
index 0000000..29f80c7
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/client/container_kill.go
@@ -0,0 +1,17 @@
+package client
+
+import (
+ "net/url"
+
+ "golang.org/x/net/context"
+)
+
+// ContainerKill terminates the container process but does not remove the container from the docker host.
+func (cli *Client) ContainerKill(ctx context.Context, containerID, signal string) error {
+ query := url.Values{}
+ query.Set("signal", signal)
+
+ resp, err := cli.post(ctx, "/containers/"+containerID+"/kill", query, nil, nil)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/unum/vendor/github.com/docker/docker/client/container_list.go b/unum/vendor/github.com/docker/docker/client/container_list.go
new file mode 100644
index 0000000..4398912
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/client/container_list.go
@@ -0,0 +1,56 @@
+package client
+
+import (
+ "encoding/json"
+ "net/url"
+ "strconv"
+
+ "github.com/docker/docker/api/types"
+ "github.com/docker/docker/api/types/filters"
+ "golang.org/x/net/context"
+)
+
+// ContainerList returns the list of containers in the docker host.
+func (cli *Client) ContainerList(ctx context.Context, options types.ContainerListOptions) ([]types.Container, error) {
+ query := url.Values{}
+
+ if options.All {
+ query.Set("all", "1")
+ }
+
+ if options.Limit != -1 {
+ query.Set("limit", strconv.Itoa(options.Limit))
+ }
+
+ if options.Since != "" {
+ query.Set("since", options.Since)
+ }
+
+ if options.Before != "" {
+ query.Set("before", options.Before)
+ }
+
+ if options.Size {
+ query.Set("size", "1")
+ }
+
+ if options.Filters.Len() > 0 {
+ filterJSON, err := filters.ToParamWithVersion(cli.version, options.Filters)
+
+ if err != nil {
+ return nil, err
+ }
+
+ query.Set("filters", filterJSON)
+ }
+
+ resp, err := cli.get(ctx, "/containers/json", query, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ var containers []types.Container
+ err = json.NewDecoder(resp.body).Decode(&containers)
+ ensureReaderClosed(resp)
+ return containers, err
+}
diff --git a/unum/vendor/github.com/docker/docker/client/container_logs.go b/unum/vendor/github.com/docker/docker/client/container_logs.go
new file mode 100644
index 0000000..0f32e9f
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/client/container_logs.go
@@ -0,0 +1,72 @@
+package client
+
+import (
+ "io"
+ "net/url"
+ "time"
+
+ "golang.org/x/net/context"
+
+ "github.com/docker/docker/api/types"
+ timetypes "github.com/docker/docker/api/types/time"
+)
+
+// ContainerLogs returns the logs generated by a container in an io.ReadCloser.
+// It's up to the caller to close the stream.
+//
+// The stream format on the response will be in one of two formats:
+//
+// If the container is using a TTY, there is only a single stream (stdout), and
+// data is copied directly from the container output stream, no extra
+// multiplexing or headers.
+//
+// If the container is *not* using a TTY, streams for stdout and stderr are
+// multiplexed.
+// The format of the multiplexed stream is as follows:
+//
+// [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4}[]byte{OUTPUT}
+//
+// STREAM_TYPE can be 1 for stdout and 2 for stderr
+//
+// SIZE1, SIZE2, SIZE3, and SIZE4 are four bytes of uint32 encoded as big endian.
+// This is the size of OUTPUT.
+//
+// You can use github.com/docker/docker/pkg/stdcopy.StdCopy to demultiplex this
+// stream.
+func (cli *Client) ContainerLogs(ctx context.Context, container string, options types.ContainerLogsOptions) (io.ReadCloser, error) {
+ query := url.Values{}
+ if options.ShowStdout {
+ query.Set("stdout", "1")
+ }
+
+ if options.ShowStderr {
+ query.Set("stderr", "1")
+ }
+
+ if options.Since != "" {
+ ts, err := timetypes.GetTimestamp(options.Since, time.Now())
+ if err != nil {
+ return nil, err
+ }
+ query.Set("since", ts)
+ }
+
+ if options.Timestamps {
+ query.Set("timestamps", "1")
+ }
+
+ if options.Details {
+ query.Set("details", "1")
+ }
+
+ if options.Follow {
+ query.Set("follow", "1")
+ }
+ query.Set("tail", options.Tail)
+
+ resp, err := cli.get(ctx, "/containers/"+container+"/logs", query, nil)
+ if err != nil {
+ return nil, err
+ }
+ return resp.body, nil
+}
diff --git a/unum/vendor/github.com/docker/docker/client/container_pause.go b/unum/vendor/github.com/docker/docker/client/container_pause.go
new file mode 100644
index 0000000..412067a
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/client/container_pause.go
@@ -0,0 +1,10 @@
+package client
+
+import "golang.org/x/net/context"
+
+// ContainerPause pauses the main process of a given container without terminating it.
+func (cli *Client) ContainerPause(ctx context.Context, containerID string) error {
+ resp, err := cli.post(ctx, "/containers/"+containerID+"/pause", nil, nil, nil)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/unum/vendor/github.com/docker/docker/client/container_prune.go b/unum/vendor/github.com/docker/docker/client/container_prune.go
new file mode 100644
index 0000000..b582170
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/client/container_prune.go
@@ -0,0 +1,36 @@
+package client
+
+import (
+ "encoding/json"
+ "fmt"
+
+ "github.com/docker/docker/api/types"
+ "github.com/docker/docker/api/types/filters"
+ "golang.org/x/net/context"
+)
+
+// ContainersPrune requests the daemon to delete unused data
+func (cli *Client) ContainersPrune(ctx context.Context, pruneFilters filters.Args) (types.ContainersPruneReport, error) {
+ var report types.ContainersPruneReport
+
+ if err := cli.NewVersionError("1.25", "container prune"); err != nil {
+ return report, err
+ }
+
+ query, err := getFiltersQuery(pruneFilters)
+ if err != nil {
+ return report, err
+ }
+
+ serverResp, err := cli.post(ctx, "/containers/prune", query, nil, nil)
+ if err != nil {
+ return report, err
+ }
+ defer ensureReaderClosed(serverResp)
+
+ if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil {
+ return report, fmt.Errorf("Error retrieving disk usage: %v", err)
+ }
+
+ return report, nil
+}
diff --git a/unum/vendor/github.com/docker/docker/client/container_remove.go b/unum/vendor/github.com/docker/docker/client/container_remove.go
new file mode 100644
index 0000000..3a79590
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/client/container_remove.go
@@ -0,0 +1,27 @@
+package client
+
+import (
+ "net/url"
+
+ "github.com/docker/docker/api/types"
+ "golang.org/x/net/context"
+)
+
+// ContainerRemove kills and removes a container from the docker host.
+func (cli *Client) ContainerRemove(ctx context.Context, containerID string, options types.ContainerRemoveOptions) error {
+ query := url.Values{}
+ if options.RemoveVolumes {
+ query.Set("v", "1")
+ }
+ if options.RemoveLinks {
+ query.Set("link", "1")
+ }
+
+ if options.Force {
+ query.Set("force", "1")
+ }
+
+ resp, err := cli.delete(ctx, "/containers/"+containerID, query, nil)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/unum/vendor/github.com/docker/docker/client/container_rename.go b/unum/vendor/github.com/docker/docker/client/container_rename.go
new file mode 100644
index 0000000..0e718da
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/client/container_rename.go
@@ -0,0 +1,16 @@
+package client
+
+import (
+ "net/url"
+
+ "golang.org/x/net/context"
+)
+
+// ContainerRename changes the name of a given container.
+func (cli *Client) ContainerRename(ctx context.Context, containerID, newContainerName string) error {
+ query := url.Values{}
+ query.Set("name", newContainerName)
+ resp, err := cli.post(ctx, "/containers/"+containerID+"/rename", query, nil, nil)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/unum/vendor/github.com/docker/docker/client/container_resize.go b/unum/vendor/github.com/docker/docker/client/container_resize.go
new file mode 100644
index 0000000..66c3cc1
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/client/container_resize.go
@@ -0,0 +1,29 @@
+package client
+
+import (
+ "net/url"
+ "strconv"
+
+ "github.com/docker/docker/api/types"
+ "golang.org/x/net/context"
+)
+
+// ContainerResize changes the size of the tty for a container.
+func (cli *Client) ContainerResize(ctx context.Context, containerID string, options types.ResizeOptions) error {
+ return cli.resize(ctx, "/containers/"+containerID, options.Height, options.Width)
+}
+
+// ContainerExecResize changes the size of the tty for an exec process running inside a container.
+func (cli *Client) ContainerExecResize(ctx context.Context, execID string, options types.ResizeOptions) error {
+ return cli.resize(ctx, "/exec/"+execID, options.Height, options.Width)
+}
+
+func (cli *Client) resize(ctx context.Context, basePath string, height, width uint) error {
+ query := url.Values{}
+ query.Set("h", strconv.Itoa(int(height)))
+ query.Set("w", strconv.Itoa(int(width)))
+
+ resp, err := cli.post(ctx, basePath+"/resize", query, nil, nil)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/unum/vendor/github.com/docker/docker/client/container_restart.go b/unum/vendor/github.com/docker/docker/client/container_restart.go
new file mode 100644
index 0000000..74d7455
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/client/container_restart.go
@@ -0,0 +1,22 @@
+package client
+
+import (
+ "net/url"
+ "time"
+
+ timetypes "github.com/docker/docker/api/types/time"
+ "golang.org/x/net/context"
+)
+
+// ContainerRestart stops and starts a container again.
+// It makes the daemon to wait for the container to be up again for
+// a specific amount of time, given the timeout.
+func (cli *Client) ContainerRestart(ctx context.Context, containerID string, timeout *time.Duration) error {
+ query := url.Values{}
+ if timeout != nil {
+ query.Set("t", timetypes.DurationToSecondsString(*timeout))
+ }
+ resp, err := cli.post(ctx, "/containers/"+containerID+"/restart", query, nil, nil)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/unum/vendor/github.com/docker/docker/client/container_start.go b/unum/vendor/github.com/docker/docker/client/container_start.go
new file mode 100644
index 0000000..b1f08de
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/client/container_start.go
@@ -0,0 +1,24 @@
+package client
+
+import (
+ "net/url"
+
+ "golang.org/x/net/context"
+
+ "github.com/docker/docker/api/types"
+)
+
+// ContainerStart sends a request to the docker daemon to start a container.
+func (cli *Client) ContainerStart(ctx context.Context, containerID string, options types.ContainerStartOptions) error {
+ query := url.Values{}
+ if len(options.CheckpointID) != 0 {
+ query.Set("checkpoint", options.CheckpointID)
+ }
+ if len(options.CheckpointDir) != 0 {
+ query.Set("checkpoint-dir", options.CheckpointDir)
+ }
+
+ resp, err := cli.post(ctx, "/containers/"+containerID+"/start", query, nil, nil)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/unum/vendor/github.com/docker/docker/client/container_stats.go b/unum/vendor/github.com/docker/docker/client/container_stats.go
new file mode 100644
index 0000000..4758c66
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/client/container_stats.go
@@ -0,0 +1,26 @@
+package client
+
+import (
+ "net/url"
+
+ "github.com/docker/docker/api/types"
+ "golang.org/x/net/context"
+)
+
+// ContainerStats returns near realtime stats for a given container.
+// It's up to the caller to close the io.ReadCloser returned.
+func (cli *Client) ContainerStats(ctx context.Context, containerID string, stream bool) (types.ContainerStats, error) {
+ query := url.Values{}
+ query.Set("stream", "0")
+ if stream {
+ query.Set("stream", "1")
+ }
+
+ resp, err := cli.get(ctx, "/containers/"+containerID+"/stats", query, nil)
+ if err != nil {
+ return types.ContainerStats{}, err
+ }
+
+ osType := getDockerOS(resp.header.Get("Server"))
+ return types.ContainerStats{Body: resp.body, OSType: osType}, err
+}
diff --git a/unum/vendor/github.com/docker/docker/client/container_stop.go b/unum/vendor/github.com/docker/docker/client/container_stop.go
new file mode 100644
index 0000000..b5418ae
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/client/container_stop.go
@@ -0,0 +1,21 @@
+package client
+
+import (
+ "net/url"
+ "time"
+
+ timetypes "github.com/docker/docker/api/types/time"
+ "golang.org/x/net/context"
+)
+
+// ContainerStop stops a container without terminating the process.
+// The process is blocked until the container stops or the timeout expires.
+func (cli *Client) ContainerStop(ctx context.Context, containerID string, timeout *time.Duration) error {
+ query := url.Values{}
+ if timeout != nil {
+ query.Set("t", timetypes.DurationToSecondsString(*timeout))
+ }
+ resp, err := cli.post(ctx, "/containers/"+containerID+"/stop", query, nil, nil)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/unum/vendor/github.com/docker/docker/client/container_top.go b/unum/vendor/github.com/docker/docker/client/container_top.go
new file mode 100644
index 0000000..9689123
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/client/container_top.go
@@ -0,0 +1,28 @@
+package client
+
+import (
+ "encoding/json"
+ "net/url"
+ "strings"
+
+ "github.com/docker/docker/api/types/container"
+ "golang.org/x/net/context"
+)
+
+// ContainerTop shows process information from within a container.
+func (cli *Client) ContainerTop(ctx context.Context, containerID string, arguments []string) (container.ContainerTopOKBody, error) {
+ var response container.ContainerTopOKBody
+ query := url.Values{}
+ if len(arguments) > 0 {
+ query.Set("ps_args", strings.Join(arguments, " "))
+ }
+
+ resp, err := cli.get(ctx, "/containers/"+containerID+"/top", query, nil)
+ if err != nil {
+ return response, err
+ }
+
+ err = json.NewDecoder(resp.body).Decode(&response)
+ ensureReaderClosed(resp)
+ return response, err
+}
diff --git a/unum/vendor/github.com/docker/docker/client/container_unpause.go b/unum/vendor/github.com/docker/docker/client/container_unpause.go
new file mode 100644
index 0000000..5c76211
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/client/container_unpause.go
@@ -0,0 +1,10 @@
+package client
+
+import "golang.org/x/net/context"
+
+// ContainerUnpause resumes the process execution within a container
+func (cli *Client) ContainerUnpause(ctx context.Context, containerID string) error {
+ resp, err := cli.post(ctx, "/containers/"+containerID+"/unpause", nil, nil, nil)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/unum/vendor/github.com/docker/docker/client/container_update.go b/unum/vendor/github.com/docker/docker/client/container_update.go
new file mode 100644
index 0000000..5082f22
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/client/container_update.go
@@ -0,0 +1,22 @@
+package client
+
+import (
+ "encoding/json"
+
+ "github.com/docker/docker/api/types/container"
+ "golang.org/x/net/context"
+)
+
+// ContainerUpdate updates resources of a container
+func (cli *Client) ContainerUpdate(ctx context.Context, containerID string, updateConfig container.UpdateConfig) (container.ContainerUpdateOKBody, error) {
+ var response container.ContainerUpdateOKBody
+ serverResp, err := cli.post(ctx, "/containers/"+containerID+"/update", nil, updateConfig, nil)
+ if err != nil {
+ return response, err
+ }
+
+ err = json.NewDecoder(serverResp.body).Decode(&response)
+
+ ensureReaderClosed(serverResp)
+ return response, err
+}
diff --git a/unum/vendor/github.com/docker/docker/client/container_wait.go b/unum/vendor/github.com/docker/docker/client/container_wait.go
new file mode 100644
index 0000000..854c6c0
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/client/container_wait.go
@@ -0,0 +1,84 @@
+package client
+
+import (
+ "encoding/json"
+ "net/url"
+
+ "golang.org/x/net/context"
+
+ "github.com/docker/docker/api/types/container"
+ "github.com/docker/docker/api/types/versions"
+)
+
+// ContainerWait waits until the specified container is in a certain state
+// indicated by the given condition, either "not-running" (default),
+// "next-exit", or "removed".
+//
+// If this client's API version is before 1.30, condition is ignored and
+// ContainerWait will return immediately with the two channels, as the server
+// will wait as if the condition were "not-running".
+//
+// If this client's API version is at least 1.30, ContainerWait blocks until
+// the request has been acknowledged by the server (with a response header),
+// then returns two channels on which the caller can wait for the exit status
+// of the container or an error if there was a problem either beginning the
+// wait request or in getting the response. This allows the caller to
+// synchronize ContainerWait with other calls, such as specifying a
+// "next-exit" condition before issuing a ContainerStart request.
+func (cli *Client) ContainerWait(ctx context.Context, containerID string, condition container.WaitCondition) (<-chan container.ContainerWaitOKBody, <-chan error) {
+ if versions.LessThan(cli.ClientVersion(), "1.30") {
+ return cli.legacyContainerWait(ctx, containerID)
+ }
+
+ resultC := make(chan container.ContainerWaitOKBody)
+ errC := make(chan error, 1)
+
+ query := url.Values{}
+ query.Set("condition", string(condition))
+
+ resp, err := cli.post(ctx, "/containers/"+containerID+"/wait", query, nil, nil)
+ if err != nil {
+ defer ensureReaderClosed(resp)
+ errC <- err
+ return resultC, errC
+ }
+
+ go func() {
+ defer ensureReaderClosed(resp)
+ var res container.ContainerWaitOKBody
+ if err := json.NewDecoder(resp.body).Decode(&res); err != nil {
+ errC <- err
+ return
+ }
+
+ resultC <- res
+ }()
+
+ return resultC, errC
+}
+
+// legacyContainerWait returns immediately and doesn't have an option to wait
+// until the container is removed.
+func (cli *Client) legacyContainerWait(ctx context.Context, containerID string) (<-chan container.ContainerWaitOKBody, <-chan error) {
+ resultC := make(chan container.ContainerWaitOKBody)
+ errC := make(chan error)
+
+ go func() {
+ resp, err := cli.post(ctx, "/containers/"+containerID+"/wait", nil, nil, nil)
+ if err != nil {
+ errC <- err
+ return
+ }
+ defer ensureReaderClosed(resp)
+
+ var res container.ContainerWaitOKBody
+ if err := json.NewDecoder(resp.body).Decode(&res); err != nil {
+ errC <- err
+ return
+ }
+
+ resultC <- res
+ }()
+
+ return resultC, errC
+}
diff --git a/unum/vendor/github.com/docker/docker/client/disk_usage.go b/unum/vendor/github.com/docker/docker/client/disk_usage.go
new file mode 100644
index 0000000..03c80b3
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/client/disk_usage.go
@@ -0,0 +1,26 @@
+package client
+
+import (
+ "encoding/json"
+ "fmt"
+
+ "github.com/docker/docker/api/types"
+ "golang.org/x/net/context"
+)
+
+// DiskUsage requests the current data usage from the daemon
+func (cli *Client) DiskUsage(ctx context.Context) (types.DiskUsage, error) {
+ var du types.DiskUsage
+
+ serverResp, err := cli.get(ctx, "/system/df", nil, nil)
+ if err != nil {
+ return du, err
+ }
+ defer ensureReaderClosed(serverResp)
+
+ if err := json.NewDecoder(serverResp.body).Decode(&du); err != nil {
+ return du, fmt.Errorf("Error retrieving disk usage: %v", err)
+ }
+
+ return du, nil
+}
diff --git a/unum/vendor/github.com/docker/docker/client/distribution_inspect.go b/unum/vendor/github.com/docker/docker/client/distribution_inspect.go
new file mode 100644
index 0000000..aa5bc6a
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/client/distribution_inspect.go
@@ -0,0 +1,35 @@
+package client
+
+import (
+ "encoding/json"
+ "net/url"
+
+ registrytypes "github.com/docker/docker/api/types/registry"
+ "golang.org/x/net/context"
+)
+
+// DistributionInspect returns the image digest with full Manifest
+func (cli *Client) DistributionInspect(ctx context.Context, image, encodedRegistryAuth string) (registrytypes.DistributionInspect, error) {
+ // Contact the registry to retrieve digest and platform information
+ var distributionInspect registrytypes.DistributionInspect
+
+ if err := cli.NewVersionError("1.30", "distribution inspect"); err != nil {
+ return distributionInspect, err
+ }
+ var headers map[string][]string
+
+ if encodedRegistryAuth != "" {
+ headers = map[string][]string{
+ "X-Registry-Auth": {encodedRegistryAuth},
+ }
+ }
+
+ resp, err := cli.get(ctx, "/distribution/"+image+"/json", url.Values{}, headers)
+ if err != nil {
+ return distributionInspect, err
+ }
+
+ err = json.NewDecoder(resp.body).Decode(&distributionInspect)
+ ensureReaderClosed(resp)
+ return distributionInspect, err
+}
diff --git a/unum/vendor/github.com/docker/docker/client/errors.go b/unum/vendor/github.com/docker/docker/client/errors.go
new file mode 100644
index 0000000..fc7df9f
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/client/errors.go
@@ -0,0 +1,300 @@
+package client
+
+import (
+ "fmt"
+
+ "github.com/docker/docker/api/types/versions"
+ "github.com/pkg/errors"
+)
+
+// errConnectionFailed implements an error returned when connection failed.
+type errConnectionFailed struct {
+ host string
+}
+
+// Error returns a string representation of an errConnectionFailed
+func (err errConnectionFailed) Error() string {
+ if err.host == "" {
+ return "Cannot connect to the Docker daemon. Is the docker daemon running on this host?"
+ }
+ return fmt.Sprintf("Cannot connect to the Docker daemon at %s. Is the docker daemon running?", err.host)
+}
+
+// IsErrConnectionFailed returns true if the error is caused by connection failed.
+func IsErrConnectionFailed(err error) bool {
+ _, ok := errors.Cause(err).(errConnectionFailed)
+ return ok
+}
+
+// ErrorConnectionFailed returns an error with host in the error message when connection to docker daemon failed.
+func ErrorConnectionFailed(host string) error {
+ return errConnectionFailed{host: host}
+}
+
+type notFound interface {
+ error
+ NotFound() bool // Is the error a NotFound error
+}
+
+// IsErrNotFound returns true if the error is caused with an
+// object (image, container, network, volume, …) is not found in the docker host.
+func IsErrNotFound(err error) bool {
+ te, ok := err.(notFound)
+ return ok && te.NotFound()
+}
+
+// imageNotFoundError implements an error returned when an image is not in the docker host.
+type imageNotFoundError struct {
+ imageID string
+}
+
+// NotFound indicates that this error type is of NotFound
+func (e imageNotFoundError) NotFound() bool {
+ return true
+}
+
+// Error returns a string representation of an imageNotFoundError
+func (e imageNotFoundError) Error() string {
+ return fmt.Sprintf("Error: No such image: %s", e.imageID)
+}
+
+// IsErrImageNotFound returns true if the error is caused
+// when an image is not found in the docker host.
+func IsErrImageNotFound(err error) bool {
+ return IsErrNotFound(err)
+}
+
+// containerNotFoundError implements an error returned when a container is not in the docker host.
+type containerNotFoundError struct {
+ containerID string
+}
+
+// NotFound indicates that this error type is of NotFound
+func (e containerNotFoundError) NotFound() bool {
+ return true
+}
+
+// Error returns a string representation of a containerNotFoundError
+func (e containerNotFoundError) Error() string {
+ return fmt.Sprintf("Error: No such container: %s", e.containerID)
+}
+
+// IsErrContainerNotFound returns true if the error is caused
+// when a container is not found in the docker host.
+func IsErrContainerNotFound(err error) bool {
+ return IsErrNotFound(err)
+}
+
+// networkNotFoundError implements an error returned when a network is not in the docker host.
+type networkNotFoundError struct {
+ networkID string
+}
+
+// NotFound indicates that this error type is of NotFound
+func (e networkNotFoundError) NotFound() bool {
+ return true
+}
+
+// Error returns a string representation of a networkNotFoundError
+func (e networkNotFoundError) Error() string {
+ return fmt.Sprintf("Error: No such network: %s", e.networkID)
+}
+
+// IsErrNetworkNotFound returns true if the error is caused
+// when a network is not found in the docker host.
+func IsErrNetworkNotFound(err error) bool {
+ return IsErrNotFound(err)
+}
+
+// volumeNotFoundError implements an error returned when a volume is not in the docker host.
+type volumeNotFoundError struct {
+ volumeID string
+}
+
+// NotFound indicates that this error type is of NotFound
+func (e volumeNotFoundError) NotFound() bool {
+ return true
+}
+
+// Error returns a string representation of a volumeNotFoundError
+func (e volumeNotFoundError) Error() string {
+ return fmt.Sprintf("Error: No such volume: %s", e.volumeID)
+}
+
+// IsErrVolumeNotFound returns true if the error is caused
+// when a volume is not found in the docker host.
+func IsErrVolumeNotFound(err error) bool {
+ return IsErrNotFound(err)
+}
+
+// unauthorizedError represents an authorization error in a remote registry.
+type unauthorizedError struct {
+ cause error
+}
+
+// Error returns a string representation of an unauthorizedError
+func (u unauthorizedError) Error() string {
+ return u.cause.Error()
+}
+
+// IsErrUnauthorized returns true if the error is caused
+// when a remote registry authentication fails
+func IsErrUnauthorized(err error) bool {
+ _, ok := err.(unauthorizedError)
+ return ok
+}
+
+// nodeNotFoundError implements an error returned when a node is not found.
+type nodeNotFoundError struct {
+ nodeID string
+}
+
+// Error returns a string representation of a nodeNotFoundError
+func (e nodeNotFoundError) Error() string {
+ return fmt.Sprintf("Error: No such node: %s", e.nodeID)
+}
+
+// NotFound indicates that this error type is of NotFound
+func (e nodeNotFoundError) NotFound() bool {
+ return true
+}
+
+// IsErrNodeNotFound returns true if the error is caused
+// when a node is not found.
+func IsErrNodeNotFound(err error) bool {
+ _, ok := err.(nodeNotFoundError)
+ return ok
+}
+
+// serviceNotFoundError implements an error returned when a service is not found.
+type serviceNotFoundError struct {
+ serviceID string
+}
+
+// Error returns a string representation of a serviceNotFoundError
+func (e serviceNotFoundError) Error() string {
+ return fmt.Sprintf("Error: No such service: %s", e.serviceID)
+}
+
+// NotFound indicates that this error type is of NotFound
+func (e serviceNotFoundError) NotFound() bool {
+ return true
+}
+
+// IsErrServiceNotFound returns true if the error is caused
+// when a service is not found.
+func IsErrServiceNotFound(err error) bool {
+ _, ok := err.(serviceNotFoundError)
+ return ok
+}
+
+// taskNotFoundError implements an error returned when a task is not found.
+type taskNotFoundError struct {
+ taskID string
+}
+
+// Error returns a string representation of a taskNotFoundError
+func (e taskNotFoundError) Error() string {
+ return fmt.Sprintf("Error: No such task: %s", e.taskID)
+}
+
+// NotFound indicates that this error type is of NotFound
+func (e taskNotFoundError) NotFound() bool {
+ return true
+}
+
+// IsErrTaskNotFound returns true if the error is caused
+// when a task is not found.
+func IsErrTaskNotFound(err error) bool {
+ _, ok := err.(taskNotFoundError)
+ return ok
+}
+
+type pluginPermissionDenied struct {
+ name string
+}
+
+func (e pluginPermissionDenied) Error() string {
+ return "Permission denied while installing plugin " + e.name
+}
+
+// IsErrPluginPermissionDenied returns true if the error is caused
+// when a user denies a plugin's permissions
+func IsErrPluginPermissionDenied(err error) bool {
+ _, ok := err.(pluginPermissionDenied)
+ return ok
+}
+
+// NewVersionError returns an error if the APIVersion required
+// if less than the current supported version
+func (cli *Client) NewVersionError(APIrequired, feature string) error {
+ if cli.version != "" && versions.LessThan(cli.version, APIrequired) {
+ return fmt.Errorf("%q requires API version %s, but the Docker daemon API version is %s", feature, APIrequired, cli.version)
+ }
+ return nil
+}
+
+// secretNotFoundError implements an error returned when a secret is not found.
+type secretNotFoundError struct {
+ name string
+}
+
+// Error returns a string representation of a secretNotFoundError
+func (e secretNotFoundError) Error() string {
+ return fmt.Sprintf("Error: no such secret: %s", e.name)
+}
+
+// NotFound indicates that this error type is of NotFound
+func (e secretNotFoundError) NotFound() bool {
+ return true
+}
+
+// IsErrSecretNotFound returns true if the error is caused
+// when a secret is not found.
+func IsErrSecretNotFound(err error) bool {
+ _, ok := err.(secretNotFoundError)
+ return ok
+}
+
+// configNotFoundError implements an error returned when a config is not found.
+type configNotFoundError struct {
+ name string
+}
+
+// Error returns a string representation of a configNotFoundError
+func (e configNotFoundError) Error() string {
+ return fmt.Sprintf("Error: no such config: %s", e.name)
+}
+
+// NotFound indicates that this error type is of NotFound
+func (e configNotFoundError) NotFound() bool {
+ return true
+}
+
+// IsErrConfigNotFound returns true if the error is caused
+// when a config is not found.
+func IsErrConfigNotFound(err error) bool {
+ _, ok := err.(configNotFoundError)
+ return ok
+}
+
+// pluginNotFoundError implements an error returned when a plugin is not in the docker host.
+type pluginNotFoundError struct {
+ name string
+}
+
+// NotFound indicates that this error type is of NotFound
+func (e pluginNotFoundError) NotFound() bool {
+ return true
+}
+
+// Error returns a string representation of a pluginNotFoundError
+func (e pluginNotFoundError) Error() string {
+ return fmt.Sprintf("Error: No such plugin: %s", e.name)
+}
+
+// IsErrPluginNotFound returns true if the error is caused
+// when a plugin is not found in the docker host.
+func IsErrPluginNotFound(err error) bool {
+ return IsErrNotFound(err)
+}
diff --git a/unum/vendor/github.com/docker/docker/client/events.go b/unum/vendor/github.com/docker/docker/client/events.go
new file mode 100644
index 0000000..af47aef
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/client/events.go
@@ -0,0 +1,102 @@
+package client
+
+import (
+ "encoding/json"
+ "net/url"
+ "time"
+
+ "golang.org/x/net/context"
+
+ "github.com/docker/docker/api/types"
+ "github.com/docker/docker/api/types/events"
+ "github.com/docker/docker/api/types/filters"
+ timetypes "github.com/docker/docker/api/types/time"
+)
+
+// Events returns a stream of events in the daemon. It's up to the caller to close the stream
+// by cancelling the context. Once the stream has been completely read an io.EOF error will
+// be sent over the error channel. If an error is sent all processing will be stopped. It's up
+// to the caller to reopen the stream in the event of an error by reinvoking this method.
+func (cli *Client) Events(ctx context.Context, options types.EventsOptions) (<-chan events.Message, <-chan error) {
+
+ messages := make(chan events.Message)
+ errs := make(chan error, 1)
+
+ started := make(chan struct{})
+ go func() {
+ defer close(errs)
+
+ query, err := buildEventsQueryParams(cli.version, options)
+ if err != nil {
+ close(started)
+ errs <- err
+ return
+ }
+
+ resp, err := cli.get(ctx, "/events", query, nil)
+ if err != nil {
+ close(started)
+ errs <- err
+ return
+ }
+ defer resp.body.Close()
+
+ decoder := json.NewDecoder(resp.body)
+
+ close(started)
+ for {
+ select {
+ case <-ctx.Done():
+ errs <- ctx.Err()
+ return
+ default:
+ var event events.Message
+ if err := decoder.Decode(&event); err != nil {
+ errs <- err
+ return
+ }
+
+ select {
+ case messages <- event:
+ case <-ctx.Done():
+ errs <- ctx.Err()
+ return
+ }
+ }
+ }
+ }()
+ <-started
+
+ return messages, errs
+}
+
+func buildEventsQueryParams(cliVersion string, options types.EventsOptions) (url.Values, error) {
+ query := url.Values{}
+ ref := time.Now()
+
+ if options.Since != "" {
+ ts, err := timetypes.GetTimestamp(options.Since, ref)
+ if err != nil {
+ return nil, err
+ }
+ query.Set("since", ts)
+ }
+
+ if options.Until != "" {
+ ts, err := timetypes.GetTimestamp(options.Until, ref)
+ if err != nil {
+ return nil, err
+ }
+ query.Set("until", ts)
+ }
+
+ if options.Filters.Len() > 0 {
+ filterJSON, err := filters.ToParamWithVersion(cliVersion, options.Filters)
+ if err != nil {
+ return nil, err
+ }
+ query.Set("filters", filterJSON)
+ }
+
+ return query, nil
+}
diff --git a/unum/vendor/github.com/docker/docker/client/hijack.go b/unum/vendor/github.com/docker/docker/client/hijack.go
new file mode 100644
index 0000000..8cf0119
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/client/hijack.go
@@ -0,0 +1,208 @@
+package client
+
+import (
+ "bufio"
+ "crypto/tls"
+ "fmt"
+ "net"
+ "net/http"
+ "net/http/httputil"
+ "net/url"
+ "strings"
+ "time"
+
+ "github.com/docker/docker/api/types"
+ "github.com/docker/docker/pkg/tlsconfig"
+ "github.com/docker/go-connections/sockets"
+ "github.com/pkg/errors"
+ "golang.org/x/net/context"
+)
+
+// tlsClientCon holds tls information and a dialed connection.
+type tlsClientCon struct {
+ *tls.Conn
+ rawConn net.Conn
+}
+
+func (c *tlsClientCon) CloseWrite() error {
+ // Go standard tls.Conn doesn't provide the CloseWrite() method so we do it
+ // on its underlying connection.
+ if conn, ok := c.rawConn.(types.CloseWriter); ok {
+ return conn.CloseWrite()
+ }
+ return nil
+}
+
+// postHijacked sends a POST request and hijacks the connection.
+func (cli *Client) postHijacked(ctx context.Context, path string, query url.Values, body interface{}, headers map[string][]string) (types.HijackedResponse, error) {
+ bodyEncoded, err := encodeData(body)
+ if err != nil {
+ return types.HijackedResponse{}, err
+ }
+
+ apiPath := cli.getAPIPath(path, query)
+ req, err := http.NewRequest("POST", apiPath, bodyEncoded)
+ if err != nil {
+ return types.HijackedResponse{}, err
+ }
+ req = cli.addHeaders(req, headers)
+
+ conn, err := cli.setupHijackConn(req, "tcp")
+ if err != nil {
+ return types.HijackedResponse{}, err
+ }
+
+ return types.HijackedResponse{Conn: conn, Reader: bufio.NewReader(conn)}, err
+}
+
+func tlsDial(network, addr string, config *tls.Config) (net.Conn, error) {
+ return tlsDialWithDialer(new(net.Dialer), network, addr, config)
+}
+
+// We need to copy Go's implementation of tls.Dial (pkg/cryptor/tls/tls.go) in
+// order to return our custom tlsClientCon struct which holds both the tls.Conn
+// object _and_ its underlying raw connection. The rationale for this is that
+// we need to be able to close the write end of the connection when attaching,
+// which tls.Conn does not provide.
+func tlsDialWithDialer(dialer *net.Dialer, network, addr string, config *tls.Config) (net.Conn, error) {
+ // We want the Timeout and Deadline values from dialer to cover the
+ // whole process: TCP connection and TLS handshake. This means that we
+ // also need to start our own timers now.
+ timeout := dialer.Timeout
+
+ if !dialer.Deadline.IsZero() {
+ deadlineTimeout := dialer.Deadline.Sub(time.Now())
+ if timeout == 0 || deadlineTimeout < timeout {
+ timeout = deadlineTimeout
+ }
+ }
+
+ var errChannel chan error
+
+ if timeout != 0 {
+ errChannel = make(chan error, 2)
+ time.AfterFunc(timeout, func() {
+ errChannel <- errors.New("")
+ })
+ }
+
+ proxyDialer, err := sockets.DialerFromEnvironment(dialer)
+ if err != nil {
+ return nil, err
+ }
+
+ rawConn, err := proxyDialer.Dial(network, addr)
+ if err != nil {
+ return nil, err
+ }
+ // When we set up a TCP connection for hijack, there could be long periods
+ // of inactivity (a long running command with no output) that in certain
+ // network setups may cause ECONNTIMEOUT, leaving the client in an unknown
+ // state. Setting TCP KeepAlive on the socket connection will prohibit
+ // ECONNTIMEOUT unless the socket connection truly is broken
+ if tcpConn, ok := rawConn.(*net.TCPConn); ok {
+ tcpConn.SetKeepAlive(true)
+ tcpConn.SetKeepAlivePeriod(30 * time.Second)
+ }
+
+ colonPos := strings.LastIndex(addr, ":")
+ if colonPos == -1 {
+ colonPos = len(addr)
+ }
+ hostname := addr[:colonPos]
+
+ // If no ServerName is set, infer the ServerName
+ // from the hostname we're connecting to.
+ if config.ServerName == "" {
+ // Make a copy to avoid polluting argument or default.
+ config = tlsconfig.Clone(config)
+ config.ServerName = hostname
+ }
+
+ conn := tls.Client(rawConn, config)
+
+ if timeout == 0 {
+ err = conn.Handshake()
+ } else {
+ go func() {
+ errChannel <- conn.Handshake()
+ }()
+
+ err = <-errChannel
+ }
+
+ if err != nil {
+ rawConn.Close()
+ return nil, err
+ }
+
+ // This is Docker difference with standard's crypto/tls package: returned a
+ // wrapper which holds both the TLS and raw connections.
+ return &tlsClientCon{conn, rawConn}, nil
+}
+
+func dial(proto, addr string, tlsConfig *tls.Config) (net.Conn, error) {
+ if tlsConfig != nil && proto != "unix" && proto != "npipe" {
+ // Notice this isn't Go standard's tls.Dial function
+ return tlsDial(proto, addr, tlsConfig)
+ }
+ if proto == "npipe" {
+ return sockets.DialPipe(addr, 32*time.Second)
+ }
+ return net.Dial(proto, addr)
+}
+
+func (cli *Client) setupHijackConn(req *http.Request, proto string) (net.Conn, error) {
+ req.Host = cli.addr
+ req.Header.Set("Connection", "Upgrade")
+ req.Header.Set("Upgrade", proto)
+
+ conn, err := dial(cli.proto, cli.addr, resolveTLSConfig(cli.client.Transport))
+ if err != nil {
+ return nil, errors.Wrap(err, "cannot connect to the Docker daemon. Is 'docker daemon' running on this host?")
+ }
+
+ // When we set up a TCP connection for hijack, there could be long periods
+ // of inactivity (a long running command with no output) that in certain
+ // network setups may cause ECONNTIMEOUT, leaving the client in an unknown
+ // state. Setting TCP KeepAlive on the socket connection will prohibit
+ // ECONNTIMEOUT unless the socket connection truly is broken
+ if tcpConn, ok := conn.(*net.TCPConn); ok {
+ tcpConn.SetKeepAlive(true)
+ tcpConn.SetKeepAlivePeriod(30 * time.Second)
+ }
+
+ clientconn := httputil.NewClientConn(conn, nil)
+ defer clientconn.Close()
+
+ // Server hijacks the connection, error 'connection closed' expected
+ resp, err := clientconn.Do(req)
+ if err != httputil.ErrPersistEOF {
+ if err != nil {
+ return nil, err
+ }
+ if resp.StatusCode != http.StatusSwitchingProtocols {
+ resp.Body.Close()
+ return nil, fmt.Errorf("unable to upgrade to %s, received %d", proto, resp.StatusCode)
+ }
+ }
+
+ c, br := clientconn.Hijack()
+ if br.Buffered() > 0 {
+ // If there is buffered content, wrap the connection
+ c = &hijackedConn{c, br}
+ } else {
+ br.Reset(nil)
+ }
+
+ return c, nil
+}
+
+type hijackedConn struct {
+ net.Conn
+ r *bufio.Reader
+}
+
+func (c *hijackedConn) Read(b []byte) (int, error) {
+ return c.r.Read(b)
+}
diff --git a/unum/vendor/github.com/docker/docker/client/image_build.go b/unum/vendor/github.com/docker/docker/client/image_build.go
new file mode 100644
index 0000000..44a215f
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/client/image_build.go
@@ -0,0 +1,128 @@
+package client
+
+import (
+ "encoding/base64"
+ "encoding/json"
+ "io"
+ "net/http"
+ "net/url"
+ "strconv"
+
+ "golang.org/x/net/context"
+
+ "github.com/docker/docker/api/types"
+ "github.com/docker/docker/api/types/container"
+)
+
+// ImageBuild sends request to the daemon to build images.
+// The Body in the response implement an io.ReadCloser and it's up to the caller to
+// close it.
+func (cli *Client) ImageBuild(ctx context.Context, buildContext io.Reader, options types.ImageBuildOptions) (types.ImageBuildResponse, error) {
+ query, err := cli.imageBuildOptionsToQuery(options)
+ if err != nil {
+ return types.ImageBuildResponse{}, err
+ }
+
+ headers := http.Header(make(map[string][]string))
+ buf, err := json.Marshal(options.AuthConfigs)
+ if err != nil {
+ return types.ImageBuildResponse{}, err
+ }
+ headers.Add("X-Registry-Config", base64.URLEncoding.EncodeToString(buf))
+ headers.Set("Content-Type", "application/x-tar")
+
+ serverResp, err := cli.postRaw(ctx, "/build", query, buildContext, headers)
+ if err != nil {
+ return types.ImageBuildResponse{}, err
+ }
+
+ osType := getDockerOS(serverResp.header.Get("Server"))
+
+ return types.ImageBuildResponse{
+ Body: serverResp.body,
+ OSType: osType,
+ }, nil
+}
+
+func (cli *Client) imageBuildOptionsToQuery(options types.ImageBuildOptions) (url.Values, error) {
+ query := url.Values{
+ "t": options.Tags,
+ "securityopt": options.SecurityOpt,
+ "extrahosts": options.ExtraHosts,
+ }
+ if options.SuppressOutput {
+ query.Set("q", "1")
+ }
+ if options.RemoteContext != "" {
+ query.Set("remote", options.RemoteContext)
+ }
+ if options.NoCache {
+ query.Set("nocache", "1")
+ }
+ if options.Remove {
+ query.Set("rm", "1")
+ } else {
+ query.Set("rm", "0")
+ }
+
+ if options.ForceRemove {
+ query.Set("forcerm", "1")
+ }
+
+ if options.PullParent {
+ query.Set("pull", "1")
+ }
+
+ if options.Squash {
+ if err := cli.NewVersionError("1.25", "squash"); err != nil {
+ return query, err
+ }
+ query.Set("squash", "1")
+ }
+
+ if !container.Isolation.IsDefault(options.Isolation) {
+ query.Set("isolation", string(options.Isolation))
+ }
+
+ query.Set("cpusetcpus", options.CPUSetCPUs)
+ query.Set("networkmode", options.NetworkMode)
+ query.Set("cpusetmems", options.CPUSetMems)
+ query.Set("cpushares", strconv.FormatInt(options.CPUShares, 10))
+ query.Set("cpuquota", strconv.FormatInt(options.CPUQuota, 10))
+ query.Set("cpuperiod", strconv.FormatInt(options.CPUPeriod, 10))
+ query.Set("memory", strconv.FormatInt(options.Memory, 10))
+ query.Set("memswap", strconv.FormatInt(options.MemorySwap, 10))
+ query.Set("cgroupparent", options.CgroupParent)
+ query.Set("shmsize", strconv.FormatInt(options.ShmSize, 10))
+ query.Set("dockerfile", options.Dockerfile)
+ query.Set("target", options.Target)
+
+ ulimitsJSON, err := json.Marshal(options.Ulimits)
+ if err != nil {
+ return query, err
+ }
+ query.Set("ulimits", string(ulimitsJSON))
+
+ buildArgsJSON, err := json.Marshal(options.BuildArgs)
+ if err != nil {
+ return query, err
+ }
+ query.Set("buildargs", string(buildArgsJSON))
+
+ labelsJSON, err := json.Marshal(options.Labels)
+ if err != nil {
+ return query, err
+ }
+ query.Set("labels", string(labelsJSON))
+
+ cacheFromJSON, err := json.Marshal(options.CacheFrom)
+ if err != nil {
+ return query, err
+ }
+ query.Set("cachefrom", string(cacheFromJSON))
+ if options.SessionID != "" {
+ query.Set("session", options.SessionID)
+ }
+
+ return query, nil
+}
diff --git a/unum/vendor/github.com/docker/docker/client/image_create.go b/unum/vendor/github.com/docker/docker/client/image_create.go
new file mode 100644
index 0000000..4436abb
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/client/image_create.go
@@ -0,0 +1,34 @@
+package client
+
+import (
+ "io"
+ "net/url"
+
+ "golang.org/x/net/context"
+
+ "github.com/docker/distribution/reference"
+ "github.com/docker/docker/api/types"
+)
+
+// ImageCreate creates a new image based in the parent options.
+// It returns the JSON content in the response body.
+func (cli *Client) ImageCreate(ctx context.Context, parentReference string, options types.ImageCreateOptions) (io.ReadCloser, error) {
+ ref, err := reference.ParseNormalizedNamed(parentReference)
+ if err != nil {
+ return nil, err
+ }
+
+ query := url.Values{}
+ query.Set("fromImage", reference.FamiliarName(ref))
+ query.Set("tag", getAPITagFromNamedRef(ref))
+ resp, err := cli.tryImageCreate(ctx, query, options.RegistryAuth)
+ if err != nil {
+ return nil, err
+ }
+ return resp.body, nil
+}
+
+func (cli *Client) tryImageCreate(ctx context.Context, query url.Values, registryAuth string) (serverResponse, error) {
+ headers := map[string][]string{"X-Registry-Auth": {registryAuth}}
+ return cli.post(ctx, "/images/create", query, nil, headers)
+}
diff --git a/unum/vendor/github.com/docker/docker/client/image_history.go b/unum/vendor/github.com/docker/docker/client/image_history.go
new file mode 100644
index 0000000..7b4babc
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/client/image_history.go
@@ -0,0 +1,22 @@
+package client
+
+import (
+ "encoding/json"
+ "net/url"
+
+ "github.com/docker/docker/api/types/image"
+ "golang.org/x/net/context"
+)
+
+// ImageHistory returns the changes in an image in history format.
+func (cli *Client) ImageHistory(ctx context.Context, imageID string) ([]image.HistoryResponseItem, error) {
+ var history []image.HistoryResponseItem
+ serverResp, err := cli.get(ctx, "/images/"+imageID+"/history", url.Values{}, nil)
+ if err != nil {
+ return history, err
+ }
+
+ err = json.NewDecoder(serverResp.body).Decode(&history)
+ ensureReaderClosed(serverResp)
+ return history, err
+}
diff --git a/unum/vendor/github.com/docker/docker/client/image_import.go b/unum/vendor/github.com/docker/docker/client/image_import.go
new file mode 100644
index 0000000..d7dedd8
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/client/image_import.go
@@ -0,0 +1,37 @@
+package client
+
+import (
+ "io"
+ "net/url"
+
+ "golang.org/x/net/context"
+
+ "github.com/docker/distribution/reference"
+ "github.com/docker/docker/api/types"
+)
+
+// ImageImport creates a new image based in the source options.
+// It returns the JSON content in the response body.
+func (cli *Client) ImageImport(ctx context.Context, source types.ImageImportSource, ref string, options types.ImageImportOptions) (io.ReadCloser, error) {
+ if ref != "" {
+ //Check if the given image name can be resolved
+ if _, err := reference.ParseNormalizedNamed(ref); err != nil {
+ return nil, err
+ }
+ }
+
+ query := url.Values{}
+ query.Set("fromSrc", source.SourceName)
+ query.Set("repo", ref)
+ query.Set("tag", options.Tag)
+ query.Set("message", options.Message)
+ for _, change := range options.Changes {
+ query.Add("changes", change)
+ }
+
+ resp, err := cli.postRaw(ctx, "/images/create", query, source.Source, nil)
+ if err != nil {
+ return nil, err
+ }
+ return resp.body, nil
+}
diff --git a/unum/vendor/github.com/docker/docker/client/image_inspect.go b/unum/vendor/github.com/docker/docker/client/image_inspect.go
new file mode 100644
index 0000000..b3a64ce
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/client/image_inspect.go
@@ -0,0 +1,33 @@
+package client
+
+import (
+ "bytes"
+ "encoding/json"
+ "io/ioutil"
+ "net/http"
+
+ "github.com/docker/docker/api/types"
+ "golang.org/x/net/context"
+)
+
+// ImageInspectWithRaw returns the image information and its raw representation.
+func (cli *Client) ImageInspectWithRaw(ctx context.Context, imageID string) (types.ImageInspect, []byte, error) {
+ serverResp, err := cli.get(ctx, "/images/"+imageID+"/json", nil, nil)
+ if err != nil {
+ if serverResp.statusCode == http.StatusNotFound {
+ return types.ImageInspect{}, nil, imageNotFoundError{imageID}
+ }
+ return types.ImageInspect{}, nil, err
+ }
+ defer ensureReaderClosed(serverResp)
+
+ body, err := ioutil.ReadAll(serverResp.body)
+ if err != nil {
+ return types.ImageInspect{}, nil, err
+ }
+
+ var response types.ImageInspect
+ rdr := bytes.NewReader(body)
+ err = json.NewDecoder(rdr).Decode(&response)
+ return response, body, err
+}
diff --git a/unum/vendor/github.com/docker/docker/client/image_list.go b/unum/vendor/github.com/docker/docker/client/image_list.go
new file mode 100644
index 0000000..f26464f
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/client/image_list.go
@@ -0,0 +1,45 @@
+package client
+
+import (
+ "encoding/json"
+ "net/url"
+
+ "github.com/docker/docker/api/types"
+ "github.com/docker/docker/api/types/filters"
+ "github.com/docker/docker/api/types/versions"
+ "golang.org/x/net/context"
+)
+
+// ImageList returns a list of images in the docker host.
+func (cli *Client) ImageList(ctx context.Context, options types.ImageListOptions) ([]types.ImageSummary, error) {
+ var images []types.ImageSummary
+ query := url.Values{}
+
+ optionFilters := options.Filters
+ referenceFilters := optionFilters.Get("reference")
+ if versions.LessThan(cli.version, "1.25") && len(referenceFilters) > 0 {
+ query.Set("filter", referenceFilters[0])
+ for _, filterValue := range referenceFilters {
+ optionFilters.Del("reference", filterValue)
+ }
+ }
+ if optionFilters.Len() > 0 {
+ filterJSON, err := filters.ToParamWithVersion(cli.version, optionFilters)
+ if err != nil {
+ return images, err
+ }
+ query.Set("filters", filterJSON)
+ }
+ if options.All {
+ query.Set("all", "1")
+ }
+
+ serverResp, err := cli.get(ctx, "/images/json", query, nil)
+ if err != nil {
+ return images, err
+ }
+
+ err = json.NewDecoder(serverResp.body).Decode(&images)
+ ensureReaderClosed(serverResp)
+ return images, err
+}
diff --git a/unum/vendor/github.com/docker/docker/client/image_load.go b/unum/vendor/github.com/docker/docker/client/image_load.go
new file mode 100644
index 0000000..77aaf1a
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/client/image_load.go
@@ -0,0 +1,30 @@
+package client
+
+import (
+ "io"
+ "net/url"
+
+ "golang.org/x/net/context"
+
+ "github.com/docker/docker/api/types"
+)
+
+// ImageLoad loads an image in the docker host from the client host.
+// It's up to the caller to close the io.ReadCloser in the
+// ImageLoadResponse returned by this function.
+func (cli *Client) ImageLoad(ctx context.Context, input io.Reader, quiet bool) (types.ImageLoadResponse, error) {
+ v := url.Values{}
+ v.Set("quiet", "0")
+ if quiet {
+ v.Set("quiet", "1")
+ }
+ headers := map[string][]string{"Content-Type": {"application/x-tar"}}
+ resp, err := cli.postRaw(ctx, "/images/load", v, input, headers)
+ if err != nil {
+ return types.ImageLoadResponse{}, err
+ }
+ return types.ImageLoadResponse{
+ Body: resp.body,
+ JSON: resp.header.Get("Content-Type") == "application/json",
+ }, nil
+}
diff --git a/unum/vendor/github.com/docker/docker/client/image_prune.go b/unum/vendor/github.com/docker/docker/client/image_prune.go
new file mode 100644
index 0000000..5ef98b7
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/client/image_prune.go
@@ -0,0 +1,36 @@
+package client
+
+import (
+ "encoding/json"
+ "fmt"
+
+ "github.com/docker/docker/api/types"
+ "github.com/docker/docker/api/types/filters"
+ "golang.org/x/net/context"
+)
+
+// ImagesPrune requests the daemon to delete unused data
+func (cli *Client) ImagesPrune(ctx context.Context, pruneFilters filters.Args) (types.ImagesPruneReport, error) {
+ var report types.ImagesPruneReport
+
+ if err := cli.NewVersionError("1.25", "image prune"); err != nil {
+ return report, err
+ }
+
+ query, err := getFiltersQuery(pruneFilters)
+ if err != nil {
+ return report, err
+ }
+
+ serverResp, err := cli.post(ctx, "/images/prune", query, nil, nil)
+ if err != nil {
+ return report, err
+ }
+ defer ensureReaderClosed(serverResp)
+
+ if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil {
+ return report, fmt.Errorf("Error retrieving disk usage: %v", err)
+ }
+
+ return report, nil
+}
diff --git a/unum/vendor/github.com/docker/docker/client/image_pull.go b/unum/vendor/github.com/docker/docker/client/image_pull.go
new file mode 100644
index 0000000..a72b9bf
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/client/image_pull.go
@@ -0,0 +1,61 @@
+package client
+
+import (
+ "io"
+ "net/http"
+ "net/url"
+
+ "golang.org/x/net/context"
+
+ "github.com/docker/distribution/reference"
+ "github.com/docker/docker/api/types"
+)
+
+// ImagePull requests the docker host to pull an image from a remote registry.
+// It executes the privileged function if the operation is unauthorized
+// and it tries one more time.
+// It's up to the caller to handle the io.ReadCloser and close it properly.
+//
+// FIXME(vdemeester): there is currently used in a few way in docker/docker
+// - if not in trusted content, ref is used to pass the whole reference, and tag is empty
+// - if in trusted content, ref is used to pass the reference name, and tag for the digest
+func (cli *Client) ImagePull(ctx context.Context, refStr string, options types.ImagePullOptions) (io.ReadCloser, error) {
+ ref, err := reference.ParseNormalizedNamed(refStr)
+ if err != nil {
+ return nil, err
+ }
+
+ query := url.Values{}
+ query.Set("fromImage", reference.FamiliarName(ref))
+ if !options.All {
+ query.Set("tag", getAPITagFromNamedRef(ref))
+ }
+
+ resp, err := cli.tryImageCreate(ctx, query, options.RegistryAuth)
+ if resp.statusCode == http.StatusUnauthorized && options.PrivilegeFunc != nil {
+ newAuthHeader, privilegeErr := options.PrivilegeFunc()
+ if privilegeErr != nil {
+ return nil, privilegeErr
+ }
+ resp, err = cli.tryImageCreate(ctx, query, newAuthHeader)
+ }
+ if err != nil {
+ return nil, err
+ }
+ return resp.body, nil
+}
+
+// getAPITagFromNamedRef returns a tag from the specified reference.
+// This function is necessary as long as the docker "server" api expects
+// digests to be sent as tags and makes a distinction between the name
+// and tag/digest part of a reference.
+func getAPITagFromNamedRef(ref reference.Named) string {
+ if digested, ok := ref.(reference.Digested); ok {
+ return digested.Digest().String()
+ }
+ ref = reference.TagNameOnly(ref)
+ if tagged, ok := ref.(reference.Tagged); ok {
+ return tagged.Tag()
+ }
+ return ""
+}
diff --git a/unum/vendor/github.com/docker/docker/client/image_push.go b/unum/vendor/github.com/docker/docker/client/image_push.go
new file mode 100644
index 0000000..410d2fb
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/client/image_push.go
@@ -0,0 +1,56 @@
+package client
+
+import (
+ "errors"
+ "io"
+ "net/http"
+ "net/url"
+
+ "golang.org/x/net/context"
+
+ "github.com/docker/distribution/reference"
+ "github.com/docker/docker/api/types"
+)
+
+// ImagePush requests the docker host to push an image to a remote registry.
+// It executes the privileged function if the operation is unauthorized
+// and it tries one more time.
+// It's up to the caller to handle the io.ReadCloser and close it properly.
+func (cli *Client) ImagePush(ctx context.Context, image string, options types.ImagePushOptions) (io.ReadCloser, error) {
+ ref, err := reference.ParseNormalizedNamed(image)
+ if err != nil {
+ return nil, err
+ }
+
+ if _, isCanonical := ref.(reference.Canonical); isCanonical {
+ return nil, errors.New("cannot push a digest reference")
+ }
+
+ tag := ""
+ name := reference.FamiliarName(ref)
+
+ if nameTaggedRef, isNamedTagged := ref.(reference.NamedTagged); isNamedTagged {
+ tag = nameTaggedRef.Tag()
+ }
+
+ query := url.Values{}
+ query.Set("tag", tag)
+
+ resp, err := cli.tryImagePush(ctx, name, query, options.RegistryAuth)
+ if resp.statusCode == http.StatusUnauthorized && options.PrivilegeFunc != nil {
+ newAuthHeader, privilegeErr := options.PrivilegeFunc()
+ if privilegeErr != nil {
+ return nil, privilegeErr
+ }
+ resp, err = cli.tryImagePush(ctx, name, query, newAuthHeader)
+ }
+ if err != nil {
+ return nil, err
+ }
+ return resp.body, nil
+}
+
+func (cli *Client) tryImagePush(ctx context.Context, imageID string, query url.Values, registryAuth string) (serverResponse, error) {
+ headers := map[string][]string{"X-Registry-Auth": {registryAuth}}
+ return cli.post(ctx, "/images/"+imageID+"/push", query, nil, headers)
+}
diff --git a/unum/vendor/github.com/docker/docker/client/image_remove.go b/unum/vendor/github.com/docker/docker/client/image_remove.go
new file mode 100644
index 0000000..94e4b74
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/client/image_remove.go
@@ -0,0 +1,35 @@
+package client
+
+import (
+ "encoding/json"
+ "net/http"
+ "net/url"
+
+ "github.com/docker/docker/api/types"
+ "golang.org/x/net/context"
+)
+
+// ImageRemove removes an image from the docker host.
+func (cli *Client) ImageRemove(ctx context.Context, imageID string, options types.ImageRemoveOptions) ([]types.ImageDeleteResponseItem, error) {
+ query := url.Values{}
+
+ if options.Force {
+ query.Set("force", "1")
+ }
+ if !options.PruneChildren {
+ query.Set("noprune", "1")
+ }
+
+ resp, err := cli.delete(ctx, "/images/"+imageID, query, nil)
+ if err != nil {
+ if resp.statusCode == http.StatusNotFound {
+ return nil, imageNotFoundError{imageID}
+ }
+ return nil, err
+ }
+
+ var dels []types.ImageDeleteResponseItem
+ err = json.NewDecoder(resp.body).Decode(&dels)
+ ensureReaderClosed(resp)
+ return dels, err
+}
diff --git a/unum/vendor/github.com/docker/docker/client/image_save.go b/unum/vendor/github.com/docker/docker/client/image_save.go
new file mode 100644
index 0000000..ecac880
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/client/image_save.go
@@ -0,0 +1,22 @@
+package client
+
+import (
+ "io"
+ "net/url"
+
+ "golang.org/x/net/context"
+)
+
+// ImageSave retrieves one or more images from the docker host as an io.ReadCloser.
+// It's up to the caller to store the images and close the stream.
+func (cli *Client) ImageSave(ctx context.Context, imageIDs []string) (io.ReadCloser, error) {
+ query := url.Values{
+ "names": imageIDs,
+ }
+
+ resp, err := cli.get(ctx, "/images/get", query, nil)
+ if err != nil {
+ return nil, err
+ }
+ return resp.body, nil
+}
diff --git a/unum/vendor/github.com/docker/docker/client/image_search.go b/unum/vendor/github.com/docker/docker/client/image_search.go
new file mode 100644
index 0000000..b0fcd5c
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/client/image_search.go
@@ -0,0 +1,51 @@
+package client
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/http"
+ "net/url"
+
+ "github.com/docker/docker/api/types"
+ "github.com/docker/docker/api/types/filters"
+ "github.com/docker/docker/api/types/registry"
+ "golang.org/x/net/context"
+)
+
+// ImageSearch makes the docker host to search by a term in a remote registry.
+// The list of results is not sorted in any fashion.
+func (cli *Client) ImageSearch(ctx context.Context, term string, options types.ImageSearchOptions) ([]registry.SearchResult, error) {
+ var results []registry.SearchResult
+ query := url.Values{}
+ query.Set("term", term)
+ query.Set("limit", fmt.Sprintf("%d", options.Limit))
+
+ if options.Filters.Len() > 0 {
+ filterJSON, err := filters.ToParam(options.Filters)
+ if err != nil {
+ return results, err
+ }
+ query.Set("filters", filterJSON)
+ }
+
+ resp, err := cli.tryImageSearch(ctx, query, options.RegistryAuth)
+ if resp.statusCode == http.StatusUnauthorized && options.PrivilegeFunc != nil {
+ newAuthHeader, privilegeErr := options.PrivilegeFunc()
+ if privilegeErr != nil {
+ return results, privilegeErr
+ }
+ resp, err = cli.tryImageSearch(ctx, query, newAuthHeader)
+ }
+ if err != nil {
+ return results, err
+ }
+
+ err = json.NewDecoder(resp.body).Decode(&results)
+ ensureReaderClosed(resp)
+ return results, err
+}
+
+func (cli *Client) tryImageSearch(ctx context.Context, query url.Values, registryAuth string) (serverResponse, error) {
+ headers := map[string][]string{"X-Registry-Auth": {registryAuth}}
+ return cli.get(ctx, "/images/search", query, headers)
+}
diff --git a/unum/vendor/github.com/docker/docker/client/image_tag.go b/unum/vendor/github.com/docker/docker/client/image_tag.go
new file mode 100644
index 0000000..8924f71
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/client/image_tag.go
@@ -0,0 +1,37 @@
+package client
+
+import (
+ "net/url"
+
+ "github.com/docker/distribution/reference"
+ "github.com/pkg/errors"
+ "golang.org/x/net/context"
+)
+
+// ImageTag tags an image in the docker host
+func (cli *Client) ImageTag(ctx context.Context, source, target string) error {
+ if _, err := reference.ParseAnyReference(source); err != nil {
+ return errors.Wrapf(err, "Error parsing reference: %q is not a valid repository/tag", source)
+ }
+
+ ref, err := reference.ParseNormalizedNamed(target)
+ if err != nil {
+ return errors.Wrapf(err, "Error parsing reference: %q is not a valid repository/tag", target)
+ }
+
+ if _, isCanonical := ref.(reference.Canonical); isCanonical {
+ return errors.New("refusing to create a tag with a digest reference")
+ }
+
+ ref = reference.TagNameOnly(ref)
+
+ query := url.Values{}
+ query.Set("repo", reference.FamiliarName(ref))
+ if tagged, ok := ref.(reference.Tagged); ok {
+ query.Set("tag", tagged.Tag())
+ }
+
+ resp, err := cli.post(ctx, "/images/"+source+"/tag", query, nil, nil)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/unum/vendor/github.com/docker/docker/client/info.go b/unum/vendor/github.com/docker/docker/client/info.go
new file mode 100644
index 0000000..ac07961
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/client/info.go
@@ -0,0 +1,26 @@
+package client
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/url"
+
+ "github.com/docker/docker/api/types"
+ "golang.org/x/net/context"
+)
+
+// Info returns information about the docker server.
+func (cli *Client) Info(ctx context.Context) (types.Info, error) {
+ var info types.Info
+ serverResp, err := cli.get(ctx, "/info", url.Values{}, nil)
+ if err != nil {
+ return info, err
+ }
+ defer ensureReaderClosed(serverResp)
+
+ if err := json.NewDecoder(serverResp.body).Decode(&info); err != nil {
+ return info, fmt.Errorf("Error reading remote info: %v", err)
+ }
+
+ return info, nil
+}
diff --git a/unum/vendor/github.com/docker/docker/client/interface.go b/unum/vendor/github.com/docker/docker/client/interface.go
new file mode 100644
index 0000000..acd4de1
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/client/interface.go
@@ -0,0 +1,194 @@
+package client
+
+import (
+ "io"
+ "net"
+ "time"
+
+ "github.com/docker/docker/api/types"
+ "github.com/docker/docker/api/types/container"
+ "github.com/docker/docker/api/types/events"
+ "github.com/docker/docker/api/types/filters"
+ "github.com/docker/docker/api/types/image"
+ "github.com/docker/docker/api/types/network"
+ "github.com/docker/docker/api/types/registry"
+ "github.com/docker/docker/api/types/swarm"
+ volumetypes "github.com/docker/docker/api/types/volume"
+ "golang.org/x/net/context"
+)
+
+// CommonAPIClient is the common methods between stable and experimental versions of APIClient.
+type CommonAPIClient interface {
+ ConfigAPIClient
+ ContainerAPIClient
+ DistributionAPIClient
+ ImageAPIClient
+ NodeAPIClient
+ NetworkAPIClient
+ PluginAPIClient
+ ServiceAPIClient
+ SwarmAPIClient
+ SecretAPIClient
+ SystemAPIClient
+ VolumeAPIClient
+ ClientVersion() string
+ DaemonHost() string
+ ServerVersion(ctx context.Context) (types.Version, error)
+ NegotiateAPIVersion(ctx context.Context)
+ NegotiateAPIVersionPing(types.Ping)
+ DialSession(ctx context.Context, proto string, meta map[string][]string) (net.Conn, error)
+}
+
+// ContainerAPIClient defines API client methods for the containers
+type ContainerAPIClient interface {
+ ContainerAttach(ctx context.Context, container string, options types.ContainerAttachOptions) (types.HijackedResponse, error)
+ ContainerCommit(ctx context.Context, container string, options types.ContainerCommitOptions) (types.IDResponse, error)
+ ContainerCreate(ctx context.Context, config *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig, containerName string) (container.ContainerCreateCreatedBody, error)
+ ContainerDiff(ctx context.Context, container string) ([]container.ContainerChangeResponseItem, error)
+ ContainerExecAttach(ctx context.Context, execID string, config types.ExecConfig) (types.HijackedResponse, error)
+ ContainerExecCreate(ctx context.Context, container string, config types.ExecConfig) (types.IDResponse, error)
+ ContainerExecInspect(ctx context.Context, execID string) (types.ContainerExecInspect, error)
+ ContainerExecResize(ctx context.Context, execID string, options types.ResizeOptions) error
+ ContainerExecStart(ctx context.Context, execID string, config types.ExecStartCheck) error
+ ContainerExport(ctx context.Context, container string) (io.ReadCloser, error)
+ ContainerInspect(ctx context.Context, container string) (types.ContainerJSON, error)
+ ContainerInspectWithRaw(ctx context.Context, container string, getSize bool) (types.ContainerJSON, []byte, error)
+ ContainerKill(ctx context.Context, container, signal string) error
+ ContainerList(ctx context.Context, options types.ContainerListOptions) ([]types.Container, error)
+ ContainerLogs(ctx context.Context, container string, options types.ContainerLogsOptions) (io.ReadCloser, error)
+ ContainerPause(ctx context.Context, container string) error
+ ContainerRemove(ctx context.Context, container string, options types.ContainerRemoveOptions) error
+ ContainerRename(ctx context.Context, container, newContainerName string) error
+ ContainerResize(ctx context.Context, container string, options types.ResizeOptions) error
+ ContainerRestart(ctx context.Context, container string, timeout *time.Duration) error
+ ContainerStatPath(ctx context.Context, container, path string) (types.ContainerPathStat, error)
+ ContainerStats(ctx context.Context, container string, stream bool) (types.ContainerStats, error)
+ ContainerStart(ctx context.Context, container string, options types.ContainerStartOptions) error
+ ContainerStop(ctx context.Context, container string, timeout *time.Duration) error
+ ContainerTop(ctx context.Context, container string, arguments []string) (container.ContainerTopOKBody, error)
+ ContainerUnpause(ctx context.Context, container string) error
+ ContainerUpdate(ctx context.Context, container string, updateConfig container.UpdateConfig) (container.ContainerUpdateOKBody, error)
+ ContainerWait(ctx context.Context, container string, condition container.WaitCondition) (<-chan container.ContainerWaitOKBody, <-chan error)
+ CopyFromContainer(ctx context.Context, container, srcPath string) (io.ReadCloser, types.ContainerPathStat, error)
+ CopyToContainer(ctx context.Context, container, path string, content io.Reader, options types.CopyToContainerOptions) error
+ ContainersPrune(ctx context.Context, pruneFilters filters.Args) (types.ContainersPruneReport, error)
+}
+
+// DistributionAPIClient defines API client methods for the registry
+type DistributionAPIClient interface {
+ DistributionInspect(ctx context.Context, image, encodedRegistryAuth string) (registry.DistributionInspect, error)
+}
+
+// ImageAPIClient defines API client methods for the images
+type ImageAPIClient interface {
+ ImageBuild(ctx context.Context, context io.Reader, options types.ImageBuildOptions) (types.ImageBuildResponse, error)
+ BuildCachePrune(ctx context.Context) (*types.BuildCachePruneReport, error)
+ ImageCreate(ctx context.Context, parentReference string, options types.ImageCreateOptions) (io.ReadCloser, error)
+ ImageHistory(ctx context.Context, image string) ([]image.HistoryResponseItem, error)
+ ImageImport(ctx context.Context, source types.ImageImportSource, ref string, options types.ImageImportOptions) (io.ReadCloser, error)
+ ImageInspectWithRaw(ctx context.Context, image string) (types.ImageInspect, []byte, error)
+ ImageList(ctx context.Context, options types.ImageListOptions) ([]types.ImageSummary, error)
+ ImageLoad(ctx context.Context, input io.Reader, quiet bool) (types.ImageLoadResponse, error)
+ ImagePull(ctx context.Context, ref string, options types.ImagePullOptions) (io.ReadCloser, error)
+ ImagePush(ctx context.Context, ref string, options types.ImagePushOptions) (io.ReadCloser, error)
+ ImageRemove(ctx context.Context, image string, options types.ImageRemoveOptions) ([]types.ImageDeleteResponseItem, error)
+ ImageSearch(ctx context.Context, term string, options types.ImageSearchOptions) ([]registry.SearchResult, error)
+ ImageSave(ctx context.Context, images []string) (io.ReadCloser, error)
+ ImageTag(ctx context.Context, image, ref string) error
+ ImagesPrune(ctx context.Context, pruneFilter filters.Args) (types.ImagesPruneReport, error)
+}
+
+// NetworkAPIClient defines API client methods for the networks
+type NetworkAPIClient interface {
+ NetworkConnect(ctx context.Context, networkID, container string, config *network.EndpointSettings) error
+ NetworkCreate(ctx context.Context, name string, options types.NetworkCreate) (types.NetworkCreateResponse, error)
+ NetworkDisconnect(ctx context.Context, networkID, container string, force bool) error
+ NetworkInspect(ctx context.Context, networkID string, options types.NetworkInspectOptions) (types.NetworkResource, error)
+ NetworkInspectWithRaw(ctx context.Context, networkID string, options types.NetworkInspectOptions) (types.NetworkResource, []byte, error)
+ NetworkList(ctx context.Context, options types.NetworkListOptions) ([]types.NetworkResource, error)
+ NetworkRemove(ctx context.Context, networkID string) error
+ NetworksPrune(ctx context.Context, pruneFilter filters.Args) (types.NetworksPruneReport, error)
+}
+
+// NodeAPIClient defines API client methods for the nodes
+type NodeAPIClient interface {
+ NodeInspectWithRaw(ctx context.Context, nodeID string) (swarm.Node, []byte, error)
+ NodeList(ctx context.Context, options types.NodeListOptions) ([]swarm.Node, error)
+ NodeRemove(ctx context.Context, nodeID string, options types.NodeRemoveOptions) error
+ NodeUpdate(ctx context.Context, nodeID string, version swarm.Version, node swarm.NodeSpec) error
+}
+
+// PluginAPIClient defines API client methods for the plugins
+type PluginAPIClient interface {
+ PluginList(ctx context.Context, filter filters.Args) (types.PluginsListResponse, error)
+ PluginRemove(ctx context.Context, name string, options types.PluginRemoveOptions) error
+ PluginEnable(ctx context.Context, name string, options types.PluginEnableOptions) error
+ PluginDisable(ctx context.Context, name string, options types.PluginDisableOptions) error
+ PluginInstall(ctx context.Context, name string, options types.PluginInstallOptions) (io.ReadCloser, error)
+ PluginUpgrade(ctx context.Context, name string, options types.PluginInstallOptions) (io.ReadCloser, error)
+ PluginPush(ctx context.Context, name string, registryAuth string) (io.ReadCloser, error)
+ PluginSet(ctx context.Context, name string, args []string) error
+ PluginInspectWithRaw(ctx context.Context, name string) (*types.Plugin, []byte, error)
+ PluginCreate(ctx context.Context, createContext io.Reader, options types.PluginCreateOptions) error
+}
+
+// ServiceAPIClient defines API client methods for the services
+type ServiceAPIClient interface {
+ ServiceCreate(ctx context.Context, service swarm.ServiceSpec, options types.ServiceCreateOptions) (types.ServiceCreateResponse, error)
+ ServiceInspectWithRaw(ctx context.Context, serviceID string, options types.ServiceInspectOptions) (swarm.Service, []byte, error)
+ ServiceList(ctx context.Context, options types.ServiceListOptions) ([]swarm.Service, error)
+ ServiceRemove(ctx context.Context, serviceID string) error
+ ServiceUpdate(ctx context.Context, serviceID string, version swarm.Version, service swarm.ServiceSpec, options types.ServiceUpdateOptions) (types.ServiceUpdateResponse, error)
+ ServiceLogs(ctx context.Context, serviceID string, options types.ContainerLogsOptions) (io.ReadCloser, error)
+ TaskLogs(ctx context.Context, taskID string, options types.ContainerLogsOptions) (io.ReadCloser, error)
+ TaskInspectWithRaw(ctx context.Context, taskID string) (swarm.Task, []byte, error)
+ TaskList(ctx context.Context, options types.TaskListOptions) ([]swarm.Task, error)
+}
+
+// SwarmAPIClient defines API client methods for the swarm
+type SwarmAPIClient interface {
+ SwarmInit(ctx context.Context, req swarm.InitRequest) (string, error)
+ SwarmJoin(ctx context.Context, req swarm.JoinRequest) error
+ SwarmGetUnlockKey(ctx context.Context) (types.SwarmUnlockKeyResponse, error)
+ SwarmUnlock(ctx context.Context, req swarm.UnlockRequest) error
+ SwarmLeave(ctx context.Context, force bool) error
+ SwarmInspect(ctx context.Context) (swarm.Swarm, error)
+ SwarmUpdate(ctx context.Context, version swarm.Version, swarm swarm.Spec, flags swarm.UpdateFlags) error
+}
+
+// SystemAPIClient defines API client methods for the system
+type SystemAPIClient interface {
+ Events(ctx context.Context, options types.EventsOptions) (<-chan events.Message, <-chan error)
+ Info(ctx context.Context) (types.Info, error)
+ RegistryLogin(ctx context.Context, auth types.AuthConfig) (registry.AuthenticateOKBody, error)
+ DiskUsage(ctx context.Context) (types.DiskUsage, error)
+ Ping(ctx context.Context) (types.Ping, error)
+}
+
+// VolumeAPIClient defines API client methods for the volumes
+type VolumeAPIClient interface {
+ VolumeCreate(ctx context.Context, options volumetypes.VolumesCreateBody) (types.Volume, error)
+ VolumeInspect(ctx context.Context, volumeID string) (types.Volume, error)
+ VolumeInspectWithRaw(ctx context.Context, volumeID string) (types.Volume, []byte, error)
+ VolumeList(ctx context.Context, filter filters.Args) (volumetypes.VolumesListOKBody, error)
+ VolumeRemove(ctx context.Context, volumeID string, force bool) error
+ VolumesPrune(ctx context.Context, pruneFilter filters.Args) (types.VolumesPruneReport, error)
+}
+
+// SecretAPIClient defines API client methods for secrets
+type SecretAPIClient interface {
+ SecretList(ctx context.Context, options types.SecretListOptions) ([]swarm.Secret, error)
+ SecretCreate(ctx context.Context, secret swarm.SecretSpec) (types.SecretCreateResponse, error)
+ SecretRemove(ctx context.Context, id string) error
+ SecretInspectWithRaw(ctx context.Context, name string) (swarm.Secret, []byte, error)
+ SecretUpdate(ctx context.Context, id string, version swarm.Version, secret swarm.SecretSpec) error
+}
+
+// ConfigAPIClient defines API client methods for configs
+type ConfigAPIClient interface {
+ ConfigList(ctx context.Context, options types.ConfigListOptions) ([]swarm.Config, error)
+ ConfigCreate(ctx context.Context, config swarm.ConfigSpec) (types.ConfigCreateResponse, error)
+ ConfigRemove(ctx context.Context, id string) error
+ ConfigInspectWithRaw(ctx context.Context, name string) (swarm.Config, []byte, error)
+ ConfigUpdate(ctx context.Context, id string, version swarm.Version, config swarm.ConfigSpec) error
+}
diff --git a/unum/vendor/github.com/docker/docker/client/interface_experimental.go b/unum/vendor/github.com/docker/docker/client/interface_experimental.go
new file mode 100644
index 0000000..51da98e
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/client/interface_experimental.go
@@ -0,0 +1,17 @@
+package client
+
+import (
+ "github.com/docker/docker/api/types"
+ "golang.org/x/net/context"
+)
+
+type apiClientExperimental interface {
+ CheckpointAPIClient
+}
+
+// CheckpointAPIClient defines API client methods for the checkpoints
+type CheckpointAPIClient interface {
+ CheckpointCreate(ctx context.Context, container string, options types.CheckpointCreateOptions) error
+ CheckpointDelete(ctx context.Context, container string, options types.CheckpointDeleteOptions) error
+ CheckpointList(ctx context.Context, container string, options types.CheckpointListOptions) ([]types.Checkpoint, error)
+}
diff --git a/unum/vendor/github.com/docker/docker/client/interface_stable.go b/unum/vendor/github.com/docker/docker/client/interface_stable.go
new file mode 100644
index 0000000..cc90a3c
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/client/interface_stable.go
@@ -0,0 +1,10 @@
+package client
+
+// APIClient is an interface that clients that talk with a docker server must implement.
+type APIClient interface {
+ CommonAPIClient
+ apiClientExperimental
+}
+
+// Ensure that Client always implements APIClient.
+var _ APIClient = &Client{}
diff --git a/unum/vendor/github.com/docker/docker/client/login.go b/unum/vendor/github.com/docker/docker/client/login.go
new file mode 100644
index 0000000..79219ff
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/client/login.go
@@ -0,0 +1,29 @@
+package client
+
+import (
+ "encoding/json"
+ "net/http"
+ "net/url"
+
+ "github.com/docker/docker/api/types"
+ "github.com/docker/docker/api/types/registry"
+ "golang.org/x/net/context"
+)
+
+// RegistryLogin authenticates the docker server with a given docker registry.
+// It returns unauthorizedError when the authentication fails.
+func (cli *Client) RegistryLogin(ctx context.Context, auth types.AuthConfig) (registry.AuthenticateOKBody, error) {
+ resp, err := cli.post(ctx, "/auth", url.Values{}, auth, nil)
+
+ if resp.statusCode == http.StatusUnauthorized {
+ return registry.AuthenticateOKBody{}, unauthorizedError{err}
+ }
+ if err != nil {
+ return registry.AuthenticateOKBody{}, err
+ }
+
+ var response registry.AuthenticateOKBody
+ err = json.NewDecoder(resp.body).Decode(&response)
+ ensureReaderClosed(resp)
+ return response, err
+}
diff --git a/unum/vendor/github.com/docker/docker/client/network_connect.go b/unum/vendor/github.com/docker/docker/client/network_connect.go
new file mode 100644
index 0000000..c022c17
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/client/network_connect.go
@@ -0,0 +1,18 @@
+package client
+
+import (
+ "github.com/docker/docker/api/types"
+ "github.com/docker/docker/api/types/network"
+ "golang.org/x/net/context"
+)
+
+// NetworkConnect connects a container to an existent network in the docker host.
+func (cli *Client) NetworkConnect(ctx context.Context, networkID, containerID string, config *network.EndpointSettings) error {
+ nc := types.NetworkConnect{
+ Container: containerID,
+ EndpointConfig: config,
+ }
+ resp, err := cli.post(ctx, "/networks/"+networkID+"/connect", nil, nc, nil)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/unum/vendor/github.com/docker/docker/client/network_create.go b/unum/vendor/github.com/docker/docker/client/network_create.go
new file mode 100644
index 0000000..4067a54
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/client/network_create.go
@@ -0,0 +1,25 @@
+package client
+
+import (
+ "encoding/json"
+
+ "github.com/docker/docker/api/types"
+ "golang.org/x/net/context"
+)
+
+// NetworkCreate creates a new network in the docker host.
+func (cli *Client) NetworkCreate(ctx context.Context, name string, options types.NetworkCreate) (types.NetworkCreateResponse, error) {
+ networkCreateRequest := types.NetworkCreateRequest{
+ NetworkCreate: options,
+ Name: name,
+ }
+ var response types.NetworkCreateResponse
+ serverResp, err := cli.post(ctx, "/networks/create", nil, networkCreateRequest, nil)
+ if err != nil {
+ return response, err
+ }
+
+ json.NewDecoder(serverResp.body).Decode(&response)
+ ensureReaderClosed(serverResp)
+ return response, err
+}
diff --git a/unum/vendor/github.com/docker/docker/client/network_disconnect.go b/unum/vendor/github.com/docker/docker/client/network_disconnect.go
new file mode 100644
index 0000000..24b58e3
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/client/network_disconnect.go
@@ -0,0 +1,14 @@
+package client
+
+import (
+ "github.com/docker/docker/api/types"
+ "golang.org/x/net/context"
+)
+
+// NetworkDisconnect disconnects a container from an existent network in the docker host.
+func (cli *Client) NetworkDisconnect(ctx context.Context, networkID, containerID string, force bool) error {
+ nd := types.NetworkDisconnect{Container: containerID, Force: force}
+ resp, err := cli.post(ctx, "/networks/"+networkID+"/disconnect", nil, nd, nil)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/unum/vendor/github.com/docker/docker/client/network_inspect.go b/unum/vendor/github.com/docker/docker/client/network_inspect.go
new file mode 100644
index 0000000..848c979
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/client/network_inspect.go
@@ -0,0 +1,50 @@
+package client
+
+import (
+ "bytes"
+ "encoding/json"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+
+ "github.com/docker/docker/api/types"
+ "golang.org/x/net/context"
+)
+
+// NetworkInspect returns the information for a specific network configured in the docker host.
+func (cli *Client) NetworkInspect(ctx context.Context, networkID string, options types.NetworkInspectOptions) (types.NetworkResource, error) {
+ networkResource, _, err := cli.NetworkInspectWithRaw(ctx, networkID, options)
+ return networkResource, err
+}
+
+// NetworkInspectWithRaw returns the information for a specific network configured in the docker host and its raw representation.
+func (cli *Client) NetworkInspectWithRaw(ctx context.Context, networkID string, options types.NetworkInspectOptions) (types.NetworkResource, []byte, error) {
+ var (
+ networkResource types.NetworkResource
+ resp serverResponse
+ err error
+ )
+ query := url.Values{}
+ if options.Verbose {
+ query.Set("verbose", "true")
+ }
+ if options.Scope != "" {
+ query.Set("scope", options.Scope)
+ }
+ resp, err = cli.get(ctx, "/networks/"+networkID, query, nil)
+ if err != nil {
+ if resp.statusCode == http.StatusNotFound {
+ return networkResource, nil, networkNotFoundError{networkID}
+ }
+ return networkResource, nil, err
+ }
+ defer ensureReaderClosed(resp)
+
+ body, err := ioutil.ReadAll(resp.body)
+ if err != nil {
+ return networkResource, nil, err
+ }
+ rdr := bytes.NewReader(body)
+ err = json.NewDecoder(rdr).Decode(&networkResource)
+ return networkResource, body, err
+}
diff --git a/unum/vendor/github.com/docker/docker/client/network_list.go b/unum/vendor/github.com/docker/docker/client/network_list.go
new file mode 100644
index 0000000..e566a93
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/client/network_list.go
@@ -0,0 +1,31 @@
+package client
+
+import (
+ "encoding/json"
+ "net/url"
+
+ "github.com/docker/docker/api/types"
+ "github.com/docker/docker/api/types/filters"
+ "golang.org/x/net/context"
+)
+
+// NetworkList returns the list of networks configured in the docker host.
+func (cli *Client) NetworkList(ctx context.Context, options types.NetworkListOptions) ([]types.NetworkResource, error) {
+ query := url.Values{}
+ if options.Filters.Len() > 0 {
+ filterJSON, err := filters.ToParamWithVersion(cli.version, options.Filters)
+ if err != nil {
+ return nil, err
+ }
+
+ query.Set("filters", filterJSON)
+ }
+ var networkResources []types.NetworkResource
+ resp, err := cli.get(ctx, "/networks", query, nil)
+ if err != nil {
+ return networkResources, err
+ }
+ err = json.NewDecoder(resp.body).Decode(&networkResources)
+ ensureReaderClosed(resp)
+ return networkResources, err
+}
diff --git a/unum/vendor/github.com/docker/docker/client/network_prune.go b/unum/vendor/github.com/docker/docker/client/network_prune.go
new file mode 100644
index 0000000..7352a7f
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/client/network_prune.go
@@ -0,0 +1,36 @@
+package client
+
+import (
+ "encoding/json"
+ "fmt"
+
+ "github.com/docker/docker/api/types"
+ "github.com/docker/docker/api/types/filters"
+ "golang.org/x/net/context"
+)
+
+// NetworksPrune requests the daemon to delete unused networks
+func (cli *Client) NetworksPrune(ctx context.Context, pruneFilters filters.Args) (types.NetworksPruneReport, error) {
+ var report types.NetworksPruneReport
+
+ if err := cli.NewVersionError("1.25", "network prune"); err != nil {
+ return report, err
+ }
+
+ query, err := getFiltersQuery(pruneFilters)
+ if err != nil {
+ return report, err
+ }
+
+ serverResp, err := cli.post(ctx, "/networks/prune", query, nil, nil)
+ if err != nil {
+ return report, err
+ }
+ defer ensureReaderClosed(serverResp)
+
+ if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil {
+ return report, fmt.Errorf("Error retrieving network prune report: %v", err)
+ }
+
+ return report, nil
+}
diff --git a/unum/vendor/github.com/docker/docker/client/network_remove.go b/unum/vendor/github.com/docker/docker/client/network_remove.go
new file mode 100644
index 0000000..6bd6748
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/client/network_remove.go
@@ -0,0 +1,10 @@
+package client
+
+import "golang.org/x/net/context"
+
+// NetworkRemove removes an existent network from the docker host.
+func (cli *Client) NetworkRemove(ctx context.Context, networkID string) error {
+ resp, err := cli.delete(ctx, "/networks/"+networkID, nil, nil)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/unum/vendor/github.com/docker/docker/client/node_inspect.go b/unum/vendor/github.com/docker/docker/client/node_inspect.go
new file mode 100644
index 0000000..abf505d
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/client/node_inspect.go
@@ -0,0 +1,33 @@
+package client
+
+import (
+ "bytes"
+ "encoding/json"
+ "io/ioutil"
+ "net/http"
+
+ "github.com/docker/docker/api/types/swarm"
+ "golang.org/x/net/context"
+)
+
+// NodeInspectWithRaw returns the node information.
+func (cli *Client) NodeInspectWithRaw(ctx context.Context, nodeID string) (swarm.Node, []byte, error) {
+ serverResp, err := cli.get(ctx, "/nodes/"+nodeID, nil, nil)
+ if err != nil {
+ if serverResp.statusCode == http.StatusNotFound {
+ return swarm.Node{}, nil, nodeNotFoundError{nodeID}
+ }
+ return swarm.Node{}, nil, err
+ }
+ defer ensureReaderClosed(serverResp)
+
+ body, err := ioutil.ReadAll(serverResp.body)
+ if err != nil {
+ return swarm.Node{}, nil, err
+ }
+
+ var response swarm.Node
+ rdr := bytes.NewReader(body)
+ err = json.NewDecoder(rdr).Decode(&response)
+ return response, body, err
+}
diff --git a/unum/vendor/github.com/docker/docker/client/node_list.go b/unum/vendor/github.com/docker/docker/client/node_list.go
new file mode 100644
index 0000000..3e8440f
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/client/node_list.go
@@ -0,0 +1,36 @@
+package client
+
+import (
+ "encoding/json"
+ "net/url"
+
+ "github.com/docker/docker/api/types"
+ "github.com/docker/docker/api/types/filters"
+ "github.com/docker/docker/api/types/swarm"
+ "golang.org/x/net/context"
+)
+
+// NodeList returns the list of nodes.
+func (cli *Client) NodeList(ctx context.Context, options types.NodeListOptions) ([]swarm.Node, error) {
+ query := url.Values{}
+
+ if options.Filters.Len() > 0 {
+ filterJSON, err := filters.ToParam(options.Filters)
+
+ if err != nil {
+ return nil, err
+ }
+
+ query.Set("filters", filterJSON)
+ }
+
+ resp, err := cli.get(ctx, "/nodes", query, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ var nodes []swarm.Node
+ err = json.NewDecoder(resp.body).Decode(&nodes)
+ ensureReaderClosed(resp)
+ return nodes, err
+}
diff --git a/unum/vendor/github.com/docker/docker/client/node_remove.go b/unum/vendor/github.com/docker/docker/client/node_remove.go
new file mode 100644
index 0000000..0a77f3d
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/client/node_remove.go
@@ -0,0 +1,21 @@
+package client
+
+import (
+ "net/url"
+
+ "github.com/docker/docker/api/types"
+
+ "golang.org/x/net/context"
+)
+
+// NodeRemove removes a Node.
+func (cli *Client) NodeRemove(ctx context.Context, nodeID string, options types.NodeRemoveOptions) error {
+ query := url.Values{}
+ if options.Force {
+ query.Set("force", "1")
+ }
+
+ resp, err := cli.delete(ctx, "/nodes/"+nodeID, query, nil)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/unum/vendor/github.com/docker/docker/client/node_update.go b/unum/vendor/github.com/docker/docker/client/node_update.go
new file mode 100644
index 0000000..3ca9760
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/client/node_update.go
@@ -0,0 +1,18 @@
+package client
+
+import (
+ "net/url"
+ "strconv"
+
+ "github.com/docker/docker/api/types/swarm"
+ "golang.org/x/net/context"
+)
+
+// NodeUpdate updates a Node.
+func (cli *Client) NodeUpdate(ctx context.Context, nodeID string, version swarm.Version, node swarm.NodeSpec) error {
+ query := url.Values{}
+ query.Set("version", strconv.FormatUint(version.Index, 10))
+ resp, err := cli.post(ctx, "/nodes/"+nodeID+"/update", query, node, nil)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/unum/vendor/github.com/docker/docker/client/parse_logs.go b/unum/vendor/github.com/docker/docker/client/parse_logs.go
new file mode 100644
index 0000000..e427f80
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/client/parse_logs.go
@@ -0,0 +1,41 @@
+package client
+
+// parse_logs.go contains utility helpers for getting information out of docker
+// log lines. really, it only contains ParseDetails right now. maybe in the
+// future there will be some desire to parse log messages back into a struct?
+// that would go here if we did
+
+import (
+ "net/url"
+ "strings"
+
+ "github.com/pkg/errors"
+)
+
+// ParseLogDetails takes a details string of key value pairs in the form
+// "k=v,l=w", where the keys and values are url query escaped, and each pair
+// is separated by a comma, returns a map. returns an error if the details
+// string is not in a valid format
+// the exact form of details encoding is implemented in
+// api/server/httputils/write_log_stream.go
+func ParseLogDetails(details string) (map[string]string, error) {
+ pairs := strings.Split(details, ",")
+ detailsMap := make(map[string]string, len(pairs))
+ for _, pair := range pairs {
+ p := strings.SplitN(pair, "=", 2)
+ // if there is no equals sign, we will only get 1 part back
+ if len(p) != 2 {
+ return nil, errors.New("invalid details format")
+ }
+ k, err := url.QueryUnescape(p[0])
+ if err != nil {
+ return nil, err
+ }
+ v, err := url.QueryUnescape(p[1])
+ if err != nil {
+ return nil, err
+ }
+ detailsMap[k] = v
+ }
+ return detailsMap, nil
+}
diff --git a/unum/vendor/github.com/docker/docker/client/ping.go b/unum/vendor/github.com/docker/docker/client/ping.go
new file mode 100644
index 0000000..8501375
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/client/ping.go
@@ -0,0 +1,34 @@
+package client
+
+import (
+ "path"
+
+ "github.com/docker/docker/api/types"
+ "golang.org/x/net/context"
+)
+
+// Ping pings the server and returns the value of the "Docker-Experimental", "OS-Type" & "API-Version" headers
+func (cli *Client) Ping(ctx context.Context) (types.Ping, error) {
+ var ping types.Ping
+ req, err := cli.buildRequest("GET", path.Join(cli.basePath, "/_ping"), nil, nil)
+ if err != nil {
+ return ping, err
+ }
+ serverResp, err := cli.doRequest(ctx, req)
+ if err != nil {
+ return ping, err
+ }
+ defer ensureReaderClosed(serverResp)
+
+ if serverResp.header != nil {
+ ping.APIVersion = serverResp.header.Get("API-Version")
+
+ if serverResp.header.Get("Docker-Experimental") == "true" {
+ ping.Experimental = true
+ }
+ ping.OSType = serverResp.header.Get("OSType")
+ }
+
+ err = cli.checkResponseErr(serverResp)
+ return ping, err
+}
diff --git a/unum/vendor/github.com/docker/docker/client/plugin_create.go b/unum/vendor/github.com/docker/docker/client/plugin_create.go
new file mode 100644
index 0000000..27954aa
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/client/plugin_create.go
@@ -0,0 +1,26 @@
+package client
+
+import (
+ "io"
+ "net/http"
+ "net/url"
+
+ "github.com/docker/docker/api/types"
+ "golang.org/x/net/context"
+)
+
+// PluginCreate creates a plugin
+func (cli *Client) PluginCreate(ctx context.Context, createContext io.Reader, createOptions types.PluginCreateOptions) error {
+ headers := http.Header(make(map[string][]string))
+ headers.Set("Content-Type", "application/x-tar")
+
+ query := url.Values{}
+ query.Set("name", createOptions.RepoName)
+
+ resp, err := cli.postRaw(ctx, "/plugins/create", query, createContext, headers)
+ if err != nil {
+ return err
+ }
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/unum/vendor/github.com/docker/docker/client/plugin_disable.go b/unum/vendor/github.com/docker/docker/client/plugin_disable.go
new file mode 100644
index 0000000..30467db
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/client/plugin_disable.go
@@ -0,0 +1,19 @@
+package client
+
+import (
+ "net/url"
+
+ "github.com/docker/docker/api/types"
+ "golang.org/x/net/context"
+)
+
+// PluginDisable disables a plugin
+func (cli *Client) PluginDisable(ctx context.Context, name string, options types.PluginDisableOptions) error {
+ query := url.Values{}
+ if options.Force {
+ query.Set("force", "1")
+ }
+ resp, err := cli.post(ctx, "/plugins/"+name+"/disable", query, nil, nil)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/unum/vendor/github.com/docker/docker/client/plugin_enable.go b/unum/vendor/github.com/docker/docker/client/plugin_enable.go
new file mode 100644
index 0000000..95517c4
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/client/plugin_enable.go
@@ -0,0 +1,19 @@
+package client
+
+import (
+ "net/url"
+ "strconv"
+
+ "github.com/docker/docker/api/types"
+ "golang.org/x/net/context"
+)
+
+// PluginEnable enables a plugin
+func (cli *Client) PluginEnable(ctx context.Context, name string, options types.PluginEnableOptions) error {
+ query := url.Values{}
+ query.Set("timeout", strconv.Itoa(options.Timeout))
+
+ resp, err := cli.post(ctx, "/plugins/"+name+"/enable", query, nil, nil)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/unum/vendor/github.com/docker/docker/client/plugin_inspect.go b/unum/vendor/github.com/docker/docker/client/plugin_inspect.go
new file mode 100644
index 0000000..89f39ee
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/client/plugin_inspect.go
@@ -0,0 +1,32 @@
+package client
+
+import (
+ "bytes"
+ "encoding/json"
+ "io/ioutil"
+ "net/http"
+
+ "github.com/docker/docker/api/types"
+ "golang.org/x/net/context"
+)
+
+// PluginInspectWithRaw inspects an existing plugin
+func (cli *Client) PluginInspectWithRaw(ctx context.Context, name string) (*types.Plugin, []byte, error) {
+ resp, err := cli.get(ctx, "/plugins/"+name+"/json", nil, nil)
+ if err != nil {
+ if resp.statusCode == http.StatusNotFound {
+ return nil, nil, pluginNotFoundError{name}
+ }
+ return nil, nil, err
+ }
+
+ defer ensureReaderClosed(resp)
+ body, err := ioutil.ReadAll(resp.body)
+ if err != nil {
+ return nil, nil, err
+ }
+ var p types.Plugin
+ rdr := bytes.NewReader(body)
+ err = json.NewDecoder(rdr).Decode(&p)
+ return &p, body, err
+}
diff --git a/unum/vendor/github.com/docker/docker/client/plugin_install.go b/unum/vendor/github.com/docker/docker/client/plugin_install.go
new file mode 100644
index 0000000..ce3e050
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/client/plugin_install.go
@@ -0,0 +1,113 @@
+package client
+
+import (
+ "encoding/json"
+ "io"
+ "net/http"
+ "net/url"
+
+ "github.com/docker/distribution/reference"
+ "github.com/docker/docker/api/types"
+ "github.com/pkg/errors"
+ "golang.org/x/net/context"
+)
+
+// PluginInstall installs a plugin
+func (cli *Client) PluginInstall(ctx context.Context, name string, options types.PluginInstallOptions) (rc io.ReadCloser, err error) {
+ query := url.Values{}
+ if _, err := reference.ParseNormalizedNamed(options.RemoteRef); err != nil {
+ return nil, errors.Wrap(err, "invalid remote reference")
+ }
+ query.Set("remote", options.RemoteRef)
+
+ privileges, err := cli.checkPluginPermissions(ctx, query, options)
+ if err != nil {
+ return nil, err
+ }
+
+ // set name for plugin pull, if empty should default to remote reference
+ query.Set("name", name)
+
+ resp, err := cli.tryPluginPull(ctx, query, privileges, options.RegistryAuth)
+ if err != nil {
+ return nil, err
+ }
+
+ name = resp.header.Get("Docker-Plugin-Name")
+
+ pr, pw := io.Pipe()
+ go func() { // todo: the client should probably be designed more around the actual api
+ _, err := io.Copy(pw, resp.body)
+ if err != nil {
+ pw.CloseWithError(err)
+ return
+ }
+ defer func() {
+ if err != nil {
+ delResp, _ := cli.delete(ctx, "/plugins/"+name, nil, nil)
+ ensureReaderClosed(delResp)
+ }
+ }()
+ if len(options.Args) > 0 {
+ if err := cli.PluginSet(ctx, name, options.Args); err != nil {
+ pw.CloseWithError(err)
+ return
+ }
+ }
+
+ if options.Disabled {
+ pw.Close()
+ return
+ }
+
+ enableErr := cli.PluginEnable(ctx, name, types.PluginEnableOptions{Timeout: 0})
+ pw.CloseWithError(enableErr)
+ }()
+ return pr, nil
+}
+
+func (cli *Client) tryPluginPrivileges(ctx context.Context, query url.Values, registryAuth string) (serverResponse, error) {
+ headers := map[string][]string{"X-Registry-Auth": {registryAuth}}
+ return cli.get(ctx, "/plugins/privileges", query, headers)
+}
+
+func (cli *Client) tryPluginPull(ctx context.Context, query url.Values, privileges types.PluginPrivileges, registryAuth string) (serverResponse, error) {
+ headers := map[string][]string{"X-Registry-Auth": {registryAuth}}
+ return cli.post(ctx, "/plugins/pull", query, privileges, headers)
+}
+
+func (cli *Client) checkPluginPermissions(ctx context.Context, query url.Values, options types.PluginInstallOptions) (types.PluginPrivileges, error) {
+ resp, err := cli.tryPluginPrivileges(ctx, query, options.RegistryAuth)
+ if resp.statusCode == http.StatusUnauthorized && options.PrivilegeFunc != nil {
+ // todo: do inspect before to check existing name before checking privileges
+ newAuthHeader, privilegeErr := options.PrivilegeFunc()
+ if privilegeErr != nil {
+ ensureReaderClosed(resp)
+ return nil, privilegeErr
+ }
+ options.RegistryAuth = newAuthHeader
+ resp, err = cli.tryPluginPrivileges(ctx, query, options.RegistryAuth)
+ }
+ if err != nil {
+ ensureReaderClosed(resp)
+ return nil, err
+ }
+
+ var privileges types.PluginPrivileges
+ if err := json.NewDecoder(resp.body).Decode(&privileges); err != nil {
+ ensureReaderClosed(resp)
+ return nil, err
+ }
+ ensureReaderClosed(resp)
+
+ if !options.AcceptAllPermissions && options.AcceptPermissionsFunc != nil && len(privileges) > 0 {
+ accept, err := options.AcceptPermissionsFunc(privileges)
+ if err != nil {
+ return nil, err
+ }
+ if !accept {
+ return nil, pluginPermissionDenied{options.RemoteRef}
+ }
+ }
+ return privileges, nil
+}
diff --git a/unum/vendor/github.com/docker/docker/client/plugin_list.go b/unum/vendor/github.com/docker/docker/client/plugin_list.go
new file mode 100644
index 0000000..3acde3b
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/client/plugin_list.go
@@ -0,0 +1,32 @@
+package client
+
+import (
+ "encoding/json"
+ "net/url"
+
+ "github.com/docker/docker/api/types"
+ "github.com/docker/docker/api/types/filters"
+ "golang.org/x/net/context"
+)
+
+// PluginList returns the installed plugins
+func (cli *Client) PluginList(ctx context.Context, filter filters.Args) (types.PluginsListResponse, error) {
+ var plugins types.PluginsListResponse
+ query := url.Values{}
+
+ if filter.Len() > 0 {
+ filterJSON, err := filters.ToParamWithVersion(cli.version, filter)
+ if err != nil {
+ return plugins, err
+ }
+ query.Set("filters", filterJSON)
+ }
+ resp, err := cli.get(ctx, "/plugins", query, nil)
+ if err != nil {
+ return plugins, err
+ }
+
+ err = json.NewDecoder(resp.body).Decode(&plugins)
+ ensureReaderClosed(resp)
+ return plugins, err
+}
diff --git a/unum/vendor/github.com/docker/docker/client/plugin_push.go b/unum/vendor/github.com/docker/docker/client/plugin_push.go
new file mode 100644
index 0000000..1e5f963
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/client/plugin_push.go
@@ -0,0 +1,17 @@
+package client
+
+import (
+ "io"
+
+ "golang.org/x/net/context"
+)
+
+// PluginPush pushes a plugin to a registry
+func (cli *Client) PluginPush(ctx context.Context, name string, registryAuth string) (io.ReadCloser, error) {
+ headers := map[string][]string{"X-Registry-Auth": {registryAuth}}
+ resp, err := cli.post(ctx, "/plugins/"+name+"/push", nil, nil, headers)
+ if err != nil {
+ return nil, err
+ }
+ return resp.body, nil
+}
diff --git a/unum/vendor/github.com/docker/docker/client/plugin_remove.go b/unum/vendor/github.com/docker/docker/client/plugin_remove.go
new file mode 100644
index 0000000..b017e4d
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/client/plugin_remove.go
@@ -0,0 +1,20 @@
+package client
+
+import (
+ "net/url"
+
+ "github.com/docker/docker/api/types"
+ "golang.org/x/net/context"
+)
+
+// PluginRemove removes a plugin
+func (cli *Client) PluginRemove(ctx context.Context, name string, options types.PluginRemoveOptions) error {
+ query := url.Values{}
+ if options.Force {
+ query.Set("force", "1")
+ }
+
+ resp, err := cli.delete(ctx, "/plugins/"+name, query, nil)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/unum/vendor/github.com/docker/docker/client/plugin_set.go b/unum/vendor/github.com/docker/docker/client/plugin_set.go
new file mode 100644
index 0000000..3260d2a
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/client/plugin_set.go
@@ -0,0 +1,12 @@
+package client
+
+import (
+ "golang.org/x/net/context"
+)
+
+// PluginSet modifies settings for an existing plugin
+func (cli *Client) PluginSet(ctx context.Context, name string, args []string) error {
+ resp, err := cli.post(ctx, "/plugins/"+name+"/set", nil, args, nil)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/unum/vendor/github.com/docker/docker/client/plugin_upgrade.go b/unum/vendor/github.com/docker/docker/client/plugin_upgrade.go
new file mode 100644
index 0000000..049ebfa
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/client/plugin_upgrade.go
@@ -0,0 +1,39 @@
+package client
+
+import (
+ "io"
+ "net/url"
+
+ "github.com/docker/distribution/reference"
+ "github.com/docker/docker/api/types"
+ "github.com/pkg/errors"
+ "golang.org/x/net/context"
+)
+
+// PluginUpgrade upgrades a plugin
+func (cli *Client) PluginUpgrade(ctx context.Context, name string, options types.PluginInstallOptions) (rc io.ReadCloser, err error) {
+ if err := cli.NewVersionError("1.26", "plugin upgrade"); err != nil {
+ return nil, err
+ }
+ query := url.Values{}
+ if _, err := reference.ParseNormalizedNamed(options.RemoteRef); err != nil {
+ return nil, errors.Wrap(err, "invalid remote reference")
+ }
+ query.Set("remote", options.RemoteRef)
+
+ privileges, err := cli.checkPluginPermissions(ctx, query, options)
+ if err != nil {
+ return nil, err
+ }
+
+ resp, err := cli.tryPluginUpgrade(ctx, query, privileges, name, options.RegistryAuth)
+ if err != nil {
+ return nil, err
+ }
+ return resp.body, nil
+}
+
+func (cli *Client) tryPluginUpgrade(ctx context.Context, query url.Values, privileges types.PluginPrivileges, name, registryAuth string) (serverResponse, error) {
+ headers := map[string][]string{"X-Registry-Auth": {registryAuth}}
+ return cli.post(ctx, "/plugins/"+name+"/upgrade", query, privileges, headers)
+}
diff --git a/unum/vendor/github.com/docker/docker/client/request.go b/unum/vendor/github.com/docker/docker/client/request.go
new file mode 100644
index 0000000..3e7d43f
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/client/request.go
@@ -0,0 +1,262 @@
+package client
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net"
+ "net/http"
+ "net/url"
+ "os"
+ "strings"
+
+ "github.com/docker/docker/api/types"
+ "github.com/docker/docker/api/types/versions"
+ "github.com/pkg/errors"
+ "golang.org/x/net/context"
+ "golang.org/x/net/context/ctxhttp"
+)
+
+// serverResponse is a wrapper for http API responses.
+type serverResponse struct {
+ body io.ReadCloser
+ header http.Header
+ statusCode int
+ reqURL *url.URL
+}
+
+// head sends an http request to the docker API using the method HEAD.
+func (cli *Client) head(ctx context.Context, path string, query url.Values, headers map[string][]string) (serverResponse, error) {
+ return cli.sendRequest(ctx, "HEAD", path, query, nil, headers)
+}
+
+// get sends an http request to the docker API using the method GET with a specific Go context.
+func (cli *Client) get(ctx context.Context, path string, query url.Values, headers map[string][]string) (serverResponse, error) {
+ return cli.sendRequest(ctx, "GET", path, query, nil, headers)
+}
+
+// post sends an http request to the docker API using the method POST with a specific Go context.
+func (cli *Client) post(ctx context.Context, path string, query url.Values, obj interface{}, headers map[string][]string) (serverResponse, error) {
+ body, headers, err := encodeBody(obj, headers)
+ if err != nil {
+ return serverResponse{}, err
+ }
+ return cli.sendRequest(ctx, "POST", path, query, body, headers)
+}
+
+func (cli *Client) postRaw(ctx context.Context, path string, query url.Values, body io.Reader, headers map[string][]string) (serverResponse, error) {
+ return cli.sendRequest(ctx, "POST", path, query, body, headers)
+}
+
+// put sends an http request to the docker API using the method PUT.
+func (cli *Client) put(ctx context.Context, path string, query url.Values, obj interface{}, headers map[string][]string) (serverResponse, error) {
+ body, headers, err := encodeBody(obj, headers)
+ if err != nil {
+ return serverResponse{}, err
+ }
+ return cli.sendRequest(ctx, "PUT", path, query, body, headers)
+}
+
+// putRaw sends an http request to the docker API using the method PUT.
+func (cli *Client) putRaw(ctx context.Context, path string, query url.Values, body io.Reader, headers map[string][]string) (serverResponse, error) {
+ return cli.sendRequest(ctx, "PUT", path, query, body, headers)
+}
+
+// delete sends an http request to the docker API using the method DELETE.
+func (cli *Client) delete(ctx context.Context, path string, query url.Values, headers map[string][]string) (serverResponse, error) {
+ return cli.sendRequest(ctx, "DELETE", path, query, nil, headers)
+}
+
+type headers map[string][]string
+
+func encodeBody(obj interface{}, headers headers) (io.Reader, headers, error) {
+ if obj == nil {
+ return nil, headers, nil
+ }
+
+ body, err := encodeData(obj)
+ if err != nil {
+ return nil, headers, err
+ }
+ if headers == nil {
+ headers = make(map[string][]string)
+ }
+ headers["Content-Type"] = []string{"application/json"}
+ return body, headers, nil
+}
+
+func (cli *Client) buildRequest(method, path string, body io.Reader, headers headers) (*http.Request, error) {
+ expectedPayload := (method == "POST" || method == "PUT")
+ if expectedPayload && body == nil {
+ body = bytes.NewReader([]byte{})
+ }
+
+ req, err := http.NewRequest(method, path, body)
+ if err != nil {
+ return nil, err
+ }
+ req = cli.addHeaders(req, headers)
+
+ if cli.proto == "unix" || cli.proto == "npipe" {
+ // For local communications, it doesn't matter what the host is. We just
+ // need a valid and meaningful host name. (See #189)
+ req.Host = "docker"
+ }
+
+ req.URL.Host = cli.addr
+ req.URL.Scheme = cli.scheme
+
+ if expectedPayload && req.Header.Get("Content-Type") == "" {
+ req.Header.Set("Content-Type", "text/plain")
+ }
+ return req, nil
+}
+
+func (cli *Client) sendRequest(ctx context.Context, method, path string, query url.Values, body io.Reader, headers headers) (serverResponse, error) {
+ req, err := cli.buildRequest(method, cli.getAPIPath(path, query), body, headers)
+ if err != nil {
+ return serverResponse{}, err
+ }
+ resp, err := cli.doRequest(ctx, req)
+ if err != nil {
+ return resp, err
+ }
+ if err := cli.checkResponseErr(resp); err != nil {
+ return resp, err
+ }
+ return resp, nil
+}
+
+func (cli *Client) doRequest(ctx context.Context, req *http.Request) (serverResponse, error) {
+ serverResp := serverResponse{statusCode: -1, reqURL: req.URL}
+
+ resp, err := ctxhttp.Do(ctx, cli.client, req)
+ if err != nil {
+ if cli.scheme != "https" && strings.Contains(err.Error(), "malformed HTTP response") {
+ return serverResp, fmt.Errorf("%v.\n* Are you trying to connect to a TLS-enabled daemon without TLS?", err)
+ }
+
+ if cli.scheme == "https" && strings.Contains(err.Error(), "bad certificate") {
+ return serverResp, fmt.Errorf("The server probably has client authentication (--tlsverify) enabled. Please check your TLS client certification settings: %v", err)
+ }
+
+ // Don't decorate context sentinel errors; users may be comparing to
+ // them directly.
+ switch err {
+ case context.Canceled, context.DeadlineExceeded:
+ return serverResp, err
+ }
+
+ if nErr, ok := err.(*url.Error); ok {
+ if nErr, ok := nErr.Err.(*net.OpError); ok {
+ if os.IsPermission(nErr.Err) {
+ return serverResp, errors.Wrapf(err, "Got permission denied while trying to connect to the Docker daemon socket at %v", cli.host)
+ }
+ }
+ }
+
+ if err, ok := err.(net.Error); ok {
+ if err.Timeout() {
+ return serverResp, ErrorConnectionFailed(cli.host)
+ }
+ if !err.Temporary() {
+ if strings.Contains(err.Error(), "connection refused") || strings.Contains(err.Error(), "dial unix") {
+ return serverResp, ErrorConnectionFailed(cli.host)
+ }
+ }
+ }
+
+ // Although there's not a strongly typed error for this in go-winio,
+ // lots of people are using the default configuration for the docker
+ // daemon on Windows where the daemon is listening on a named pipe
+ // `//./pipe/docker_engine, and the client must be running elevated.
+ // Give users a clue rather than the not-overly useful message
+ // such as `error during connect: Get http://%2F%2F.%2Fpipe%2Fdocker_engine/v1.26/info:
+ // open //./pipe/docker_engine: The system cannot find the file specified.`.
+ // Note we can't string compare "The system cannot find the file specified" as
+ // this is localised - for example in French the error would be
+ // `open //./pipe/docker_engine: Le fichier spécifié est introuvable.`
+ if strings.Contains(err.Error(), `open //./pipe/docker_engine`) {
+ err = errors.New(err.Error() + " In the default daemon configuration on Windows, the docker client must be run elevated to connect. This error may also indicate that the docker daemon is not running.")
+ }
+
+ return serverResp, errors.Wrap(err, "error during connect")
+ }
+
+ if resp != nil {
+ serverResp.statusCode = resp.StatusCode
+ serverResp.body = resp.Body
+ serverResp.header = resp.Header
+ }
+ return serverResp, nil
+}
+
+func (cli *Client) checkResponseErr(serverResp serverResponse) error {
+ if serverResp.statusCode >= 200 && serverResp.statusCode < 400 {
+ return nil
+ }
+
+ body, err := ioutil.ReadAll(serverResp.body)
+ if err != nil {
+ return err
+ }
+ if len(body) == 0 {
+ return fmt.Errorf("Error: request returned %s for API route and version %s, check if the server supports the requested API version", http.StatusText(serverResp.statusCode), serverResp.reqURL)
+ }
+
+ var ct string
+ if serverResp.header != nil {
+ ct = serverResp.header.Get("Content-Type")
+ }
+
+ var errorMessage string
+ if (cli.version == "" || versions.GreaterThan(cli.version, "1.23")) && ct == "application/json" {
+ var errorResponse types.ErrorResponse
+ if err := json.Unmarshal(body, &errorResponse); err != nil {
+ return fmt.Errorf("Error reading JSON: %v", err)
+ }
+ errorMessage = errorResponse.Message
+ } else {
+ errorMessage = string(body)
+ }
+
+ return fmt.Errorf("Error response from daemon: %s", strings.TrimSpace(errorMessage))
+}
+
+func (cli *Client) addHeaders(req *http.Request, headers headers) *http.Request {
+ // Add CLI Config's HTTP Headers BEFORE we set the Docker headers
+ // then the user can't change OUR headers
+ for k, v := range cli.customHTTPHeaders {
+ if versions.LessThan(cli.version, "1.25") && k == "User-Agent" {
+ continue
+ }
+ req.Header.Set(k, v)
+ }
+
+ if headers != nil {
+ for k, v := range headers {
+ req.Header[k] = v
+ }
+ }
+ return req
+}
+
+func encodeData(data interface{}) (*bytes.Buffer, error) {
+ params := bytes.NewBuffer(nil)
+ if data != nil {
+ if err := json.NewEncoder(params).Encode(data); err != nil {
+ return nil, err
+ }
+ }
+ return params, nil
+}
+
+func ensureReaderClosed(response serverResponse) {
+ if response.body != nil {
+ // Drain up to 512 bytes and close the body to let the Transport reuse the connection
+ io.CopyN(ioutil.Discard, response.body, 512)
+ response.body.Close()
+ }
+}
diff --git a/unum/vendor/github.com/docker/docker/client/secret_create.go b/unum/vendor/github.com/docker/docker/client/secret_create.go
new file mode 100644
index 0000000..4354afe
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/client/secret_create.go
@@ -0,0 +1,25 @@
+package client
+
+import (
+ "encoding/json"
+
+ "github.com/docker/docker/api/types"
+ "github.com/docker/docker/api/types/swarm"
+ "golang.org/x/net/context"
+)
+
+// SecretCreate creates a new Secret.
+func (cli *Client) SecretCreate(ctx context.Context, secret swarm.SecretSpec) (types.SecretCreateResponse, error) {
+ var response types.SecretCreateResponse
+ if err := cli.NewVersionError("1.25", "secret create"); err != nil {
+ return response, err
+ }
+ resp, err := cli.post(ctx, "/secrets/create", nil, secret, nil)
+ if err != nil {
+ return response, err
+ }
+
+ err = json.NewDecoder(resp.body).Decode(&response)
+ ensureReaderClosed(resp)
+ return response, err
+}
diff --git a/unum/vendor/github.com/docker/docker/client/secret_inspect.go b/unum/vendor/github.com/docker/docker/client/secret_inspect.go
new file mode 100644
index 0000000..9b60297
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/client/secret_inspect.go
@@ -0,0 +1,37 @@
+package client
+
+import (
+ "bytes"
+ "encoding/json"
+ "io/ioutil"
+ "net/http"
+
+ "github.com/docker/docker/api/types/swarm"
+ "golang.org/x/net/context"
+)
+
+// SecretInspectWithRaw returns the secret information with raw data
+func (cli *Client) SecretInspectWithRaw(ctx context.Context, id string) (swarm.Secret, []byte, error) {
+ if err := cli.NewVersionError("1.25", "secret inspect"); err != nil {
+ return swarm.Secret{}, nil, err
+ }
+ resp, err := cli.get(ctx, "/secrets/"+id, nil, nil)
+ if err != nil {
+ if resp.statusCode == http.StatusNotFound {
+ return swarm.Secret{}, nil, secretNotFoundError{id}
+ }
+ return swarm.Secret{}, nil, err
+ }
+ defer ensureReaderClosed(resp)
+
+ body, err := ioutil.ReadAll(resp.body)
+ if err != nil {
+ return swarm.Secret{}, nil, err
+ }
+
+ var secret swarm.Secret
+ rdr := bytes.NewReader(body)
+ err = json.NewDecoder(rdr).Decode(&secret)
+
+ return secret, body, err
+}
diff --git a/unum/vendor/github.com/docker/docker/client/secret_list.go b/unum/vendor/github.com/docker/docker/client/secret_list.go
new file mode 100644
index 0000000..0d33ecf
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/client/secret_list.go
@@ -0,0 +1,38 @@
+package client
+
+import (
+ "encoding/json"
+ "net/url"
+
+ "github.com/docker/docker/api/types"
+ "github.com/docker/docker/api/types/filters"
+ "github.com/docker/docker/api/types/swarm"
+ "golang.org/x/net/context"
+)
+
+// SecretList returns the list of secrets.
+func (cli *Client) SecretList(ctx context.Context, options types.SecretListOptions) ([]swarm.Secret, error) {
+ if err := cli.NewVersionError("1.25", "secret list"); err != nil {
+ return nil, err
+ }
+ query := url.Values{}
+
+ if options.Filters.Len() > 0 {
+ filterJSON, err := filters.ToParam(options.Filters)
+ if err != nil {
+ return nil, err
+ }
+
+ query.Set("filters", filterJSON)
+ }
+
+ resp, err := cli.get(ctx, "/secrets", query, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ var secrets []swarm.Secret
+ err = json.NewDecoder(resp.body).Decode(&secrets)
+ ensureReaderClosed(resp)
+ return secrets, err
+}
diff --git a/unum/vendor/github.com/docker/docker/client/secret_remove.go b/unum/vendor/github.com/docker/docker/client/secret_remove.go
new file mode 100644
index 0000000..c5e37af
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/client/secret_remove.go
@@ -0,0 +1,13 @@
+package client
+
+import "golang.org/x/net/context"
+
+// SecretRemove removes a Secret.
+func (cli *Client) SecretRemove(ctx context.Context, id string) error {
+ if err := cli.NewVersionError("1.25", "secret remove"); err != nil {
+ return err
+ }
+ resp, err := cli.delete(ctx, "/secrets/"+id, nil, nil)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/unum/vendor/github.com/docker/docker/client/secret_update.go b/unum/vendor/github.com/docker/docker/client/secret_update.go
new file mode 100644
index 0000000..875a4c9
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/client/secret_update.go
@@ -0,0 +1,21 @@
+package client
+
+import (
+ "net/url"
+ "strconv"
+
+ "github.com/docker/docker/api/types/swarm"
+ "golang.org/x/net/context"
+)
+
+// SecretUpdate attempts to update a Secret
+func (cli *Client) SecretUpdate(ctx context.Context, id string, version swarm.Version, secret swarm.SecretSpec) error {
+ if err := cli.NewVersionError("1.25", "secret update"); err != nil {
+ return err
+ }
+ query := url.Values{}
+ query.Set("version", strconv.FormatUint(version.Index, 10))
+ resp, err := cli.post(ctx, "/secrets/"+id+"/update", query, secret, nil)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/unum/vendor/github.com/docker/docker/client/service_create.go b/unum/vendor/github.com/docker/docker/client/service_create.go
new file mode 100644
index 0000000..6b9364d
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/client/service_create.go
@@ -0,0 +1,156 @@
+package client
+
+import (
+ "encoding/json"
+ "fmt"
+
+ "github.com/docker/distribution/reference"
+ "github.com/docker/docker/api/types"
+ "github.com/docker/docker/api/types/swarm"
+ digest "github.com/opencontainers/go-digest"
+ "github.com/pkg/errors"
+ "golang.org/x/net/context"
+)
+
+// ServiceCreate creates a new Service.
+func (cli *Client) ServiceCreate(ctx context.Context, service swarm.ServiceSpec, options types.ServiceCreateOptions) (types.ServiceCreateResponse, error) {
+ var distErr error
+
+ headers := map[string][]string{
+ "version": {cli.version},
+ }
+
+ if options.EncodedRegistryAuth != "" {
+ headers["X-Registry-Auth"] = []string{options.EncodedRegistryAuth}
+ }
+
+ // Make sure containerSpec is not nil when no runtime is set or the runtime is set to container
+ if service.TaskTemplate.ContainerSpec == nil && (service.TaskTemplate.Runtime == "" || service.TaskTemplate.Runtime == swarm.RuntimeContainer) {
+ service.TaskTemplate.ContainerSpec = &swarm.ContainerSpec{}
+ }
+
+ if err := validateServiceSpec(service); err != nil {
+ return types.ServiceCreateResponse{}, err
+ }
+
+ // ensure that the image is tagged
+ var imgPlatforms []swarm.Platform
+ if service.TaskTemplate.ContainerSpec != nil {
+ if taggedImg := imageWithTagString(service.TaskTemplate.ContainerSpec.Image); taggedImg != "" {
+ service.TaskTemplate.ContainerSpec.Image = taggedImg
+ }
+ if options.QueryRegistry {
+ var img string
+ img, imgPlatforms, distErr = imageDigestAndPlatforms(ctx, cli, service.TaskTemplate.ContainerSpec.Image, options.EncodedRegistryAuth)
+ if img != "" {
+ service.TaskTemplate.ContainerSpec.Image = img
+ }
+ }
+ }
+
+ // ensure that the image is tagged
+ if service.TaskTemplate.PluginSpec != nil {
+ if taggedImg := imageWithTagString(service.TaskTemplate.PluginSpec.Remote); taggedImg != "" {
+ service.TaskTemplate.PluginSpec.Remote = taggedImg
+ }
+ if options.QueryRegistry {
+ var img string
+ img, imgPlatforms, distErr = imageDigestAndPlatforms(ctx, cli, service.TaskTemplate.PluginSpec.Remote, options.EncodedRegistryAuth)
+ if img != "" {
+ service.TaskTemplate.PluginSpec.Remote = img
+ }
+ }
+ }
+
+ if service.TaskTemplate.Placement == nil && len(imgPlatforms) > 0 {
+ service.TaskTemplate.Placement = &swarm.Placement{}
+ }
+ if len(imgPlatforms) > 0 {
+ service.TaskTemplate.Placement.Platforms = imgPlatforms
+ }
+
+ var response types.ServiceCreateResponse
+ resp, err := cli.post(ctx, "/services/create", nil, service, headers)
+ if err != nil {
+ return response, err
+ }
+
+ err = json.NewDecoder(resp.body).Decode(&response)
+
+ if distErr != nil {
+ response.Warnings = append(response.Warnings, digestWarning(service.TaskTemplate.ContainerSpec.Image))
+ }
+
+ ensureReaderClosed(resp)
+ return response, err
+}
+
+func imageDigestAndPlatforms(ctx context.Context, cli DistributionAPIClient, image, encodedAuth string) (string, []swarm.Platform, error) {
+ distributionInspect, err := cli.DistributionInspect(ctx, image, encodedAuth)
+ imageWithDigest := image
+ var platforms []swarm.Platform
+ if err != nil {
+ return "", nil, err
+ }
+
+ imageWithDigest = imageWithDigestString(image, distributionInspect.Descriptor.Digest)
+
+ if len(distributionInspect.Platforms) > 0 {
+ platforms = make([]swarm.Platform, 0, len(distributionInspect.Platforms))
+ for _, p := range distributionInspect.Platforms {
+ platforms = append(platforms, swarm.Platform{
+ Architecture: p.Architecture,
+ OS: p.OS,
+ })
+ }
+ }
+ return imageWithDigest, platforms, err
+}
+
+// imageWithDigestString takes an image string and a digest, and updates
+// the image string if it didn't originally contain a digest. It returns
+// an empty string if there are no updates.
+func imageWithDigestString(image string, dgst digest.Digest) string {
+ namedRef, err := reference.ParseNormalizedNamed(image)
+ if err == nil {
+ if _, isCanonical := namedRef.(reference.Canonical); !isCanonical {
+ // ensure that image gets a default tag if none is provided
+ img, err := reference.WithDigest(namedRef, dgst)
+ if err == nil {
+ return reference.FamiliarString(img)
+ }
+ }
+ }
+ return ""
+}
+
+// imageWithTagString takes an image string, and returns a tagged image
+// string, adding a 'latest' tag if one was not provided. It returns an
+// emptry string if a canonical reference was provided
+func imageWithTagString(image string) string {
+ namedRef, err := reference.ParseNormalizedNamed(image)
+ if err == nil {
+ return reference.FamiliarString(reference.TagNameOnly(namedRef))
+ }
+ return ""
+}
+
+// digestWarning constructs a formatted warning string using the
+// image name that could not be pinned by digest. The formatting
+// is hardcoded, but could me made smarter in the future
+func digestWarning(image string) string {
+ return fmt.Sprintf("image %s could not be accessed on a registry to record\nits digest. Each node will access %s independently,\npossibly leading to different nodes running different\nversions of the image.\n", image, image)
+}
+
+func validateServiceSpec(s swarm.ServiceSpec) error {
+ if s.TaskTemplate.ContainerSpec != nil && s.TaskTemplate.PluginSpec != nil {
+ return errors.New("must not specify both a container spec and a plugin spec in the task template")
+ }
+ if s.TaskTemplate.PluginSpec != nil && s.TaskTemplate.Runtime != swarm.RuntimePlugin {
+ return errors.New("mismatched runtime with plugin spec")
+ }
+ if s.TaskTemplate.ContainerSpec != nil && (s.TaskTemplate.Runtime != "" && s.TaskTemplate.Runtime != swarm.RuntimeContainer) {
+ return errors.New("mismatched runtime with container spec")
+ }
+ return nil
+}
diff --git a/unum/vendor/github.com/docker/docker/client/service_inspect.go b/unum/vendor/github.com/docker/docker/client/service_inspect.go
new file mode 100644
index 0000000..d7e051e
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/client/service_inspect.go
@@ -0,0 +1,38 @@
+package client
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+
+ "github.com/docker/docker/api/types"
+ "github.com/docker/docker/api/types/swarm"
+ "golang.org/x/net/context"
+)
+
+// ServiceInspectWithRaw returns the service information and the raw data.
+func (cli *Client) ServiceInspectWithRaw(ctx context.Context, serviceID string, opts types.ServiceInspectOptions) (swarm.Service, []byte, error) {
+ query := url.Values{}
+ query.Set("insertDefaults", fmt.Sprintf("%v", opts.InsertDefaults))
+ serverResp, err := cli.get(ctx, "/services/"+serviceID, query, nil)
+ if err != nil {
+ if serverResp.statusCode == http.StatusNotFound {
+ return swarm.Service{}, nil, serviceNotFoundError{serviceID}
+ }
+ return swarm.Service{}, nil, err
+ }
+ defer ensureReaderClosed(serverResp)
+
+ body, err := ioutil.ReadAll(serverResp.body)
+ if err != nil {
+ return swarm.Service{}, nil, err
+ }
+
+ var response swarm.Service
+ rdr := bytes.NewReader(body)
+ err = json.NewDecoder(rdr).Decode(&response)
+ return response, body, err
+}
diff --git a/unum/vendor/github.com/docker/docker/client/service_list.go b/unum/vendor/github.com/docker/docker/client/service_list.go
new file mode 100644
index 0000000..c29e6d4
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/client/service_list.go
@@ -0,0 +1,35 @@
+package client
+
+import (
+ "encoding/json"
+ "net/url"
+
+ "github.com/docker/docker/api/types"
+ "github.com/docker/docker/api/types/filters"
+ "github.com/docker/docker/api/types/swarm"
+ "golang.org/x/net/context"
+)
+
+// ServiceList returns the list of services.
+func (cli *Client) ServiceList(ctx context.Context, options types.ServiceListOptions) ([]swarm.Service, error) {
+ query := url.Values{}
+
+ if options.Filters.Len() > 0 {
+ filterJSON, err := filters.ToParam(options.Filters)
+ if err != nil {
+ return nil, err
+ }
+
+ query.Set("filters", filterJSON)
+ }
+
+ resp, err := cli.get(ctx, "/services", query, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ var services []swarm.Service
+ err = json.NewDecoder(resp.body).Decode(&services)
+ ensureReaderClosed(resp)
+ return services, err
+}
diff --git a/unum/vendor/github.com/docker/docker/client/service_logs.go b/unum/vendor/github.com/docker/docker/client/service_logs.go
new file mode 100644
index 0000000..24384e3
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/client/service_logs.go
@@ -0,0 +1,52 @@
+package client
+
+import (
+ "io"
+ "net/url"
+ "time"
+
+ "golang.org/x/net/context"
+
+ "github.com/docker/docker/api/types"
+ timetypes "github.com/docker/docker/api/types/time"
+)
+
+// ServiceLogs returns the logs generated by a service in an io.ReadCloser.
+// It's up to the caller to close the stream.
+func (cli *Client) ServiceLogs(ctx context.Context, serviceID string, options types.ContainerLogsOptions) (io.ReadCloser, error) {
+ query := url.Values{}
+ if options.ShowStdout {
+ query.Set("stdout", "1")
+ }
+
+ if options.ShowStderr {
+ query.Set("stderr", "1")
+ }
+
+ if options.Since != "" {
+ ts, err := timetypes.GetTimestamp(options.Since, time.Now())
+ if err != nil {
+ return nil, err
+ }
+ query.Set("since", ts)
+ }
+
+ if options.Timestamps {
+ query.Set("timestamps", "1")
+ }
+
+ if options.Details {
+ query.Set("details", "1")
+ }
+
+ if options.Follow {
+ query.Set("follow", "1")
+ }
+ query.Set("tail", options.Tail)
+
+ resp, err := cli.get(ctx, "/services/"+serviceID+"/logs", query, nil)
+ if err != nil {
+ return nil, err
+ }
+ return resp.body, nil
+}
diff --git a/unum/vendor/github.com/docker/docker/client/service_remove.go b/unum/vendor/github.com/docker/docker/client/service_remove.go
new file mode 100644
index 0000000..a9331f9
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/client/service_remove.go
@@ -0,0 +1,10 @@
+package client
+
+import "golang.org/x/net/context"
+
+// ServiceRemove kills and removes a service.
+func (cli *Client) ServiceRemove(ctx context.Context, serviceID string) error {
+ resp, err := cli.delete(ctx, "/services/"+serviceID, nil, nil)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/unum/vendor/github.com/docker/docker/client/service_update.go b/unum/vendor/github.com/docker/docker/client/service_update.go
new file mode 100644
index 0000000..8764f29
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/client/service_update.go
@@ -0,0 +1,92 @@
+package client
+
+import (
+ "encoding/json"
+ "net/url"
+ "strconv"
+
+ "github.com/docker/docker/api/types"
+ "github.com/docker/docker/api/types/swarm"
+ "golang.org/x/net/context"
+)
+
+// ServiceUpdate updates a Service.
+func (cli *Client) ServiceUpdate(ctx context.Context, serviceID string, version swarm.Version, service swarm.ServiceSpec, options types.ServiceUpdateOptions) (types.ServiceUpdateResponse, error) {
+ var (
+ query = url.Values{}
+ distErr error
+ )
+
+ headers := map[string][]string{
+ "version": {cli.version},
+ }
+
+ if options.EncodedRegistryAuth != "" {
+ headers["X-Registry-Auth"] = []string{options.EncodedRegistryAuth}
+ }
+
+ if options.RegistryAuthFrom != "" {
+ query.Set("registryAuthFrom", options.RegistryAuthFrom)
+ }
+
+ if options.Rollback != "" {
+ query.Set("rollback", options.Rollback)
+ }
+
+ query.Set("version", strconv.FormatUint(version.Index, 10))
+
+ if err := validateServiceSpec(service); err != nil {
+ return types.ServiceUpdateResponse{}, err
+ }
+
+ var imgPlatforms []swarm.Platform
+ // ensure that the image is tagged
+ if service.TaskTemplate.ContainerSpec != nil {
+ if taggedImg := imageWithTagString(service.TaskTemplate.ContainerSpec.Image); taggedImg != "" {
+ service.TaskTemplate.ContainerSpec.Image = taggedImg
+ }
+ if options.QueryRegistry {
+ var img string
+ img, imgPlatforms, distErr = imageDigestAndPlatforms(ctx, cli, service.TaskTemplate.ContainerSpec.Image, options.EncodedRegistryAuth)
+ if img != "" {
+ service.TaskTemplate.ContainerSpec.Image = img
+ }
+ }
+ }
+
+ // ensure that the image is tagged
+ if service.TaskTemplate.PluginSpec != nil {
+ if taggedImg := imageWithTagString(service.TaskTemplate.PluginSpec.Remote); taggedImg != "" {
+ service.TaskTemplate.PluginSpec.Remote = taggedImg
+ }
+ if options.QueryRegistry {
+ var img string
+ img, imgPlatforms, distErr = imageDigestAndPlatforms(ctx, cli, service.TaskTemplate.PluginSpec.Remote, options.EncodedRegistryAuth)
+ if img != "" {
+ service.TaskTemplate.PluginSpec.Remote = img
+ }
+ }
+ }
+
+ if service.TaskTemplate.Placement == nil && len(imgPlatforms) > 0 {
+ service.TaskTemplate.Placement = &swarm.Placement{}
+ }
+ if len(imgPlatforms) > 0 {
+ service.TaskTemplate.Placement.Platforms = imgPlatforms
+ }
+
+ var response types.ServiceUpdateResponse
+ resp, err := cli.post(ctx, "/services/"+serviceID+"/update", query, service, headers)
+ if err != nil {
+ return response, err
+ }
+
+ err = json.NewDecoder(resp.body).Decode(&response)
+
+ if distErr != nil {
+ response.Warnings = append(response.Warnings, digestWarning(service.TaskTemplate.ContainerSpec.Image))
+ }
+
+ ensureReaderClosed(resp)
+ return response, err
+}
diff --git a/unum/vendor/github.com/docker/docker/client/session.go b/unum/vendor/github.com/docker/docker/client/session.go
new file mode 100644
index 0000000..8ee9162
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/client/session.go
@@ -0,0 +1,19 @@
+package client
+
+import (
+ "net"
+ "net/http"
+
+ "golang.org/x/net/context"
+)
+
+// DialSession returns a connection that can be used communication with daemon
+func (cli *Client) DialSession(ctx context.Context, proto string, meta map[string][]string) (net.Conn, error) {
+ req, err := http.NewRequest("POST", "/session", nil)
+ if err != nil {
+ return nil, err
+ }
+ req = cli.addHeaders(req, meta)
+
+ return cli.setupHijackConn(req, proto)
+}
diff --git a/unum/vendor/github.com/docker/docker/client/swarm_get_unlock_key.go b/unum/vendor/github.com/docker/docker/client/swarm_get_unlock_key.go
new file mode 100644
index 0000000..be28d32
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/client/swarm_get_unlock_key.go
@@ -0,0 +1,21 @@
+package client
+
+import (
+ "encoding/json"
+
+ "github.com/docker/docker/api/types"
+ "golang.org/x/net/context"
+)
+
+// SwarmGetUnlockKey retrieves the swarm's unlock key.
+func (cli *Client) SwarmGetUnlockKey(ctx context.Context) (types.SwarmUnlockKeyResponse, error) {
+ serverResp, err := cli.get(ctx, "/swarm/unlockkey", nil, nil)
+ if err != nil {
+ return types.SwarmUnlockKeyResponse{}, err
+ }
+
+ var response types.SwarmUnlockKeyResponse
+ err = json.NewDecoder(serverResp.body).Decode(&response)
+ ensureReaderClosed(serverResp)
+ return response, err
+}
diff --git a/unum/vendor/github.com/docker/docker/client/swarm_init.go b/unum/vendor/github.com/docker/docker/client/swarm_init.go
new file mode 100644
index 0000000..9e65e1c
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/client/swarm_init.go
@@ -0,0 +1,21 @@
+package client
+
+import (
+ "encoding/json"
+
+ "github.com/docker/docker/api/types/swarm"
+ "golang.org/x/net/context"
+)
+
+// SwarmInit initializes the swarm.
+func (cli *Client) SwarmInit(ctx context.Context, req swarm.InitRequest) (string, error) {
+ serverResp, err := cli.post(ctx, "/swarm/init", nil, req, nil)
+ if err != nil {
+ return "", err
+ }
+
+ var response string
+ err = json.NewDecoder(serverResp.body).Decode(&response)
+ ensureReaderClosed(serverResp)
+ return response, err
+}
diff --git a/unum/vendor/github.com/docker/docker/client/swarm_inspect.go b/unum/vendor/github.com/docker/docker/client/swarm_inspect.go
new file mode 100644
index 0000000..77e72f8
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/client/swarm_inspect.go
@@ -0,0 +1,21 @@
+package client
+
+import (
+ "encoding/json"
+
+ "github.com/docker/docker/api/types/swarm"
+ "golang.org/x/net/context"
+)
+
+// SwarmInspect inspects the swarm.
+func (cli *Client) SwarmInspect(ctx context.Context) (swarm.Swarm, error) {
+ serverResp, err := cli.get(ctx, "/swarm", nil, nil)
+ if err != nil {
+ return swarm.Swarm{}, err
+ }
+
+ var response swarm.Swarm
+ err = json.NewDecoder(serverResp.body).Decode(&response)
+ ensureReaderClosed(serverResp)
+ return response, err
+}
diff --git a/unum/vendor/github.com/docker/docker/client/swarm_join.go b/unum/vendor/github.com/docker/docker/client/swarm_join.go
new file mode 100644
index 0000000..19e5192
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/client/swarm_join.go
@@ -0,0 +1,13 @@
+package client
+
+import (
+ "github.com/docker/docker/api/types/swarm"
+ "golang.org/x/net/context"
+)
+
+// SwarmJoin joins the swarm.
+func (cli *Client) SwarmJoin(ctx context.Context, req swarm.JoinRequest) error {
+ resp, err := cli.post(ctx, "/swarm/join", nil, req, nil)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/unum/vendor/github.com/docker/docker/client/swarm_leave.go b/unum/vendor/github.com/docker/docker/client/swarm_leave.go
new file mode 100644
index 0000000..3a205cf
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/client/swarm_leave.go
@@ -0,0 +1,18 @@
+package client
+
+import (
+ "net/url"
+
+ "golang.org/x/net/context"
+)
+
+// SwarmLeave leaves the swarm.
+func (cli *Client) SwarmLeave(ctx context.Context, force bool) error {
+ query := url.Values{}
+ if force {
+ query.Set("force", "1")
+ }
+ resp, err := cli.post(ctx, "/swarm/leave", query, nil, nil)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/unum/vendor/github.com/docker/docker/client/swarm_unlock.go b/unum/vendor/github.com/docker/docker/client/swarm_unlock.go
new file mode 100644
index 0000000..9ee441f
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/client/swarm_unlock.go
@@ -0,0 +1,13 @@
+package client
+
+import (
+ "github.com/docker/docker/api/types/swarm"
+ "golang.org/x/net/context"
+)
+
+// SwarmUnlock unlocks locked swarm.
+func (cli *Client) SwarmUnlock(ctx context.Context, req swarm.UnlockRequest) error {
+ serverResp, err := cli.post(ctx, "/swarm/unlock", nil, req, nil)
+ ensureReaderClosed(serverResp)
+ return err
+}
diff --git a/unum/vendor/github.com/docker/docker/client/swarm_update.go b/unum/vendor/github.com/docker/docker/client/swarm_update.go
new file mode 100644
index 0000000..7245fd4
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/client/swarm_update.go
@@ -0,0 +1,22 @@
+package client
+
+import (
+ "fmt"
+ "net/url"
+ "strconv"
+
+ "github.com/docker/docker/api/types/swarm"
+ "golang.org/x/net/context"
+)
+
+// SwarmUpdate updates the swarm.
+func (cli *Client) SwarmUpdate(ctx context.Context, version swarm.Version, swarm swarm.Spec, flags swarm.UpdateFlags) error {
+ query := url.Values{}
+ query.Set("version", strconv.FormatUint(version.Index, 10))
+ query.Set("rotateWorkerToken", fmt.Sprintf("%v", flags.RotateWorkerToken))
+ query.Set("rotateManagerToken", fmt.Sprintf("%v", flags.RotateManagerToken))
+ query.Set("rotateManagerUnlockKey", fmt.Sprintf("%v", flags.RotateManagerUnlockKey))
+ resp, err := cli.post(ctx, "/swarm/update", query, swarm, nil)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/unum/vendor/github.com/docker/docker/client/task_inspect.go b/unum/vendor/github.com/docker/docker/client/task_inspect.go
new file mode 100644
index 0000000..bc8058f
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/client/task_inspect.go
@@ -0,0 +1,34 @@
+package client
+
+import (
+ "bytes"
+ "encoding/json"
+ "io/ioutil"
+ "net/http"
+
+ "github.com/docker/docker/api/types/swarm"
+
+ "golang.org/x/net/context"
+)
+
+// TaskInspectWithRaw returns the task information and its raw representation..
+func (cli *Client) TaskInspectWithRaw(ctx context.Context, taskID string) (swarm.Task, []byte, error) {
+ serverResp, err := cli.get(ctx, "/tasks/"+taskID, nil, nil)
+ if err != nil {
+ if serverResp.statusCode == http.StatusNotFound {
+ return swarm.Task{}, nil, taskNotFoundError{taskID}
+ }
+ return swarm.Task{}, nil, err
+ }
+ defer ensureReaderClosed(serverResp)
+
+ body, err := ioutil.ReadAll(serverResp.body)
+ if err != nil {
+ return swarm.Task{}, nil, err
+ }
+
+ var response swarm.Task
+ rdr := bytes.NewReader(body)
+ err = json.NewDecoder(rdr).Decode(&response)
+ return response, body, err
+}
diff --git a/unum/vendor/github.com/docker/docker/client/task_list.go b/unum/vendor/github.com/docker/docker/client/task_list.go
new file mode 100644
index 0000000..66324da
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/client/task_list.go
@@ -0,0 +1,35 @@
+package client
+
+import (
+ "encoding/json"
+ "net/url"
+
+ "github.com/docker/docker/api/types"
+ "github.com/docker/docker/api/types/filters"
+ "github.com/docker/docker/api/types/swarm"
+ "golang.org/x/net/context"
+)
+
+// TaskList returns the list of tasks.
+func (cli *Client) TaskList(ctx context.Context, options types.TaskListOptions) ([]swarm.Task, error) {
+ query := url.Values{}
+
+ if options.Filters.Len() > 0 {
+ filterJSON, err := filters.ToParam(options.Filters)
+ if err != nil {
+ return nil, err
+ }
+
+ query.Set("filters", filterJSON)
+ }
+
+ resp, err := cli.get(ctx, "/tasks", query, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ var tasks []swarm.Task
+ err = json.NewDecoder(resp.body).Decode(&tasks)
+ ensureReaderClosed(resp)
+ return tasks, err
+}
diff --git a/unum/vendor/github.com/docker/docker/client/task_logs.go b/unum/vendor/github.com/docker/docker/client/task_logs.go
new file mode 100644
index 0000000..2ed1954
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/client/task_logs.go
@@ -0,0 +1,52 @@
+package client
+
+import (
+ "io"
+ "net/url"
+ "time"
+
+ "golang.org/x/net/context"
+
+ "github.com/docker/docker/api/types"
+ timetypes "github.com/docker/docker/api/types/time"
+)
+
+// TaskLogs returns the logs generated by a task in an io.ReadCloser.
+// It's up to the caller to close the stream.
+func (cli *Client) TaskLogs(ctx context.Context, taskID string, options types.ContainerLogsOptions) (io.ReadCloser, error) {
+ query := url.Values{}
+ if options.ShowStdout {
+ query.Set("stdout", "1")
+ }
+
+ if options.ShowStderr {
+ query.Set("stderr", "1")
+ }
+
+ if options.Since != "" {
+ ts, err := timetypes.GetTimestamp(options.Since, time.Now())
+ if err != nil {
+ return nil, err
+ }
+ query.Set("since", ts)
+ }
+
+ if options.Timestamps {
+ query.Set("timestamps", "1")
+ }
+
+ if options.Details {
+ query.Set("details", "1")
+ }
+
+ if options.Follow {
+ query.Set("follow", "1")
+ }
+ query.Set("tail", options.Tail)
+
+ resp, err := cli.get(ctx, "/tasks/"+taskID+"/logs", query, nil)
+ if err != nil {
+ return nil, err
+ }
+ return resp.body, nil
+}
diff --git a/unum/vendor/github.com/docker/docker/client/transport.go b/unum/vendor/github.com/docker/docker/client/transport.go
new file mode 100644
index 0000000..73f6ef7
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/client/transport.go
@@ -0,0 +1,17 @@
+package client
+
+import (
+ "crypto/tls"
+ "net/http"
+)
+
+// resolveTLSConfig attempts to resolve the TLS configuration from the
+// RoundTripper.
+func resolveTLSConfig(transport http.RoundTripper) *tls.Config {
+ switch tr := transport.(type) {
+ case *http.Transport:
+ return tr.TLSClientConfig
+ default:
+ return nil
+ }
+}
diff --git a/unum/vendor/github.com/docker/docker/client/utils.go b/unum/vendor/github.com/docker/docker/client/utils.go
new file mode 100644
index 0000000..f3d8877
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/client/utils.go
@@ -0,0 +1,34 @@
+package client
+
+import (
+ "net/url"
+ "regexp"
+
+ "github.com/docker/docker/api/types/filters"
+)
+
+var headerRegexp = regexp.MustCompile(`\ADocker/.+\s\((.+)\)\z`)
+
+// getDockerOS returns the operating system based on the server header from the daemon.
+func getDockerOS(serverHeader string) string {
+ var osType string
+ matches := headerRegexp.FindStringSubmatch(serverHeader)
+ if len(matches) > 0 {
+ osType = matches[1]
+ }
+ return osType
+}
+
+// getFiltersQuery returns a url query with "filters" query term, based on the
+// filters provided.
+func getFiltersQuery(f filters.Args) (url.Values, error) {
+ query := url.Values{}
+ if f.Len() > 0 {
+ filterJSON, err := filters.ToParam(f)
+ if err != nil {
+ return query, err
+ }
+ query.Set("filters", filterJSON)
+ }
+ return query, nil
+}
diff --git a/unum/vendor/github.com/docker/docker/client/version.go b/unum/vendor/github.com/docker/docker/client/version.go
new file mode 100644
index 0000000..933ceb4
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/client/version.go
@@ -0,0 +1,21 @@
+package client
+
+import (
+ "encoding/json"
+
+ "github.com/docker/docker/api/types"
+ "golang.org/x/net/context"
+)
+
+// ServerVersion returns information of the docker client and server host.
+func (cli *Client) ServerVersion(ctx context.Context) (types.Version, error) {
+ resp, err := cli.get(ctx, "/version", nil, nil)
+ if err != nil {
+ return types.Version{}, err
+ }
+
+ var server types.Version
+ err = json.NewDecoder(resp.body).Decode(&server)
+ ensureReaderClosed(resp)
+ return server, err
+}
diff --git a/unum/vendor/github.com/docker/docker/client/volume_create.go b/unum/vendor/github.com/docker/docker/client/volume_create.go
new file mode 100644
index 0000000..9620c87
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/client/volume_create.go
@@ -0,0 +1,21 @@
+package client
+
+import (
+ "encoding/json"
+
+ "github.com/docker/docker/api/types"
+ volumetypes "github.com/docker/docker/api/types/volume"
+ "golang.org/x/net/context"
+)
+
+// VolumeCreate creates a volume in the docker host.
+func (cli *Client) VolumeCreate(ctx context.Context, options volumetypes.VolumesCreateBody) (types.Volume, error) {
+ var volume types.Volume
+ resp, err := cli.post(ctx, "/volumes/create", nil, options, nil)
+ if err != nil {
+ return volume, err
+ }
+ err = json.NewDecoder(resp.body).Decode(&volume)
+ ensureReaderClosed(resp)
+ return volume, err
+}
diff --git a/unum/vendor/github.com/docker/docker/client/volume_inspect.go b/unum/vendor/github.com/docker/docker/client/volume_inspect.go
new file mode 100644
index 0000000..3860e9b
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/client/volume_inspect.go
@@ -0,0 +1,38 @@
+package client
+
+import (
+ "bytes"
+ "encoding/json"
+ "io/ioutil"
+ "net/http"
+
+ "github.com/docker/docker/api/types"
+ "golang.org/x/net/context"
+)
+
+// VolumeInspect returns the information about a specific volume in the docker host.
+func (cli *Client) VolumeInspect(ctx context.Context, volumeID string) (types.Volume, error) {
+ volume, _, err := cli.VolumeInspectWithRaw(ctx, volumeID)
+ return volume, err
+}
+
+// VolumeInspectWithRaw returns the information about a specific volume in the docker host and its raw representation
+func (cli *Client) VolumeInspectWithRaw(ctx context.Context, volumeID string) (types.Volume, []byte, error) {
+ var volume types.Volume
+ resp, err := cli.get(ctx, "/volumes/"+volumeID, nil, nil)
+ if err != nil {
+ if resp.statusCode == http.StatusNotFound {
+ return volume, nil, volumeNotFoundError{volumeID}
+ }
+ return volume, nil, err
+ }
+ defer ensureReaderClosed(resp)
+
+ body, err := ioutil.ReadAll(resp.body)
+ if err != nil {
+ return volume, nil, err
+ }
+ rdr := bytes.NewReader(body)
+ err = json.NewDecoder(rdr).Decode(&volume)
+ return volume, body, err
+}
diff --git a/unum/vendor/github.com/docker/docker/client/volume_list.go b/unum/vendor/github.com/docker/docker/client/volume_list.go
new file mode 100644
index 0000000..32247ce
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/client/volume_list.go
@@ -0,0 +1,32 @@
+package client
+
+import (
+ "encoding/json"
+ "net/url"
+
+ "github.com/docker/docker/api/types/filters"
+ volumetypes "github.com/docker/docker/api/types/volume"
+ "golang.org/x/net/context"
+)
+
+// VolumeList returns the volumes configured in the docker host.
+func (cli *Client) VolumeList(ctx context.Context, filter filters.Args) (volumetypes.VolumesListOKBody, error) {
+ var volumes volumetypes.VolumesListOKBody
+ query := url.Values{}
+
+ if filter.Len() > 0 {
+ filterJSON, err := filters.ToParamWithVersion(cli.version, filter)
+ if err != nil {
+ return volumes, err
+ }
+ query.Set("filters", filterJSON)
+ }
+ resp, err := cli.get(ctx, "/volumes", query, nil)
+ if err != nil {
+ return volumes, err
+ }
+
+ err = json.NewDecoder(resp.body).Decode(&volumes)
+ ensureReaderClosed(resp)
+ return volumes, err
+}
diff --git a/unum/vendor/github.com/docker/docker/client/volume_prune.go b/unum/vendor/github.com/docker/docker/client/volume_prune.go
new file mode 100644
index 0000000..2e7fea7
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/client/volume_prune.go
@@ -0,0 +1,36 @@
+package client
+
+import (
+ "encoding/json"
+ "fmt"
+
+ "github.com/docker/docker/api/types"
+ "github.com/docker/docker/api/types/filters"
+ "golang.org/x/net/context"
+)
+
+// VolumesPrune requests the daemon to delete unused data
+func (cli *Client) VolumesPrune(ctx context.Context, pruneFilters filters.Args) (types.VolumesPruneReport, error) {
+ var report types.VolumesPruneReport
+
+ if err := cli.NewVersionError("1.25", "volume prune"); err != nil {
+ return report, err
+ }
+
+ query, err := getFiltersQuery(pruneFilters)
+ if err != nil {
+ return report, err
+ }
+
+ serverResp, err := cli.post(ctx, "/volumes/prune", query, nil, nil)
+ if err != nil {
+ return report, err
+ }
+ defer ensureReaderClosed(serverResp)
+
+ if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil {
+ return report, fmt.Errorf("Error retrieving volume prune report: %v", err)
+ }
+
+ return report, nil
+}
diff --git a/unum/vendor/github.com/docker/docker/client/volume_remove.go b/unum/vendor/github.com/docker/docker/client/volume_remove.go
new file mode 100644
index 0000000..6c26575
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/client/volume_remove.go
@@ -0,0 +1,21 @@
+package client
+
+import (
+ "net/url"
+
+ "github.com/docker/docker/api/types/versions"
+ "golang.org/x/net/context"
+)
+
+// VolumeRemove removes a volume from the docker host.
+func (cli *Client) VolumeRemove(ctx context.Context, volumeID string, force bool) error {
+ query := url.Values{}
+ if versions.GreaterThanOrEqualTo(cli.version, "1.25") {
+ if force {
+ query.Set("force", "1")
+ }
+ }
+ resp, err := cli.delete(ctx, "/volumes/"+volumeID, query, nil)
+ ensureReaderClosed(resp)
+ return err
+}
diff --git a/unum/vendor/github.com/docker/docker/pkg/ioutils/buffer.go b/unum/vendor/github.com/docker/docker/pkg/ioutils/buffer.go
new file mode 100644
index 0000000..3d737b3
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/pkg/ioutils/buffer.go
@@ -0,0 +1,51 @@
+package ioutils
+
+import (
+ "errors"
+ "io"
+)
+
+var errBufferFull = errors.New("buffer is full")
+
+type fixedBuffer struct {
+ buf []byte
+ pos int
+ lastRead int
+}
+
+func (b *fixedBuffer) Write(p []byte) (int, error) {
+ n := copy(b.buf[b.pos:cap(b.buf)], p)
+ b.pos += n
+
+ if n < len(p) {
+ if b.pos == cap(b.buf) {
+ return n, errBufferFull
+ }
+ return n, io.ErrShortWrite
+ }
+ return n, nil
+}
+
+func (b *fixedBuffer) Read(p []byte) (int, error) {
+ n := copy(p, b.buf[b.lastRead:b.pos])
+ b.lastRead += n
+ return n, nil
+}
+
+func (b *fixedBuffer) Len() int {
+ return b.pos - b.lastRead
+}
+
+func (b *fixedBuffer) Cap() int {
+ return cap(b.buf)
+}
+
+func (b *fixedBuffer) Reset() {
+ b.pos = 0
+ b.lastRead = 0
+ b.buf = b.buf[:0]
+}
+
+func (b *fixedBuffer) String() string {
+ return string(b.buf[b.lastRead:b.pos])
+}
diff --git a/unum/vendor/github.com/docker/docker/pkg/ioutils/bytespipe.go b/unum/vendor/github.com/docker/docker/pkg/ioutils/bytespipe.go
new file mode 100644
index 0000000..72a04f3
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/pkg/ioutils/bytespipe.go
@@ -0,0 +1,186 @@
+package ioutils
+
+import (
+ "errors"
+ "io"
+ "sync"
+)
+
+// maxCap is the highest capacity to use in byte slices that buffer data.
+const maxCap = 1e6
+
+// minCap is the lowest capacity to use in byte slices that buffer data
+const minCap = 64
+
+// blockThreshold is the minimum number of bytes in the buffer which will cause
+// a write to BytesPipe to block when allocating a new slice.
+const blockThreshold = 1e6
+
+var (
+ // ErrClosed is returned when Write is called on a closed BytesPipe.
+ ErrClosed = errors.New("write to closed BytesPipe")
+
+ bufPools = make(map[int]*sync.Pool)
+ bufPoolsLock sync.Mutex
+)
+
+// BytesPipe is io.ReadWriteCloser which works similarly to pipe(queue).
+// All written data may be read at most once. Also, BytesPipe allocates
+// and releases new byte slices to adjust to current needs, so the buffer
+// won't be overgrown after peak loads.
+type BytesPipe struct {
+ mu sync.Mutex
+ wait *sync.Cond
+ buf []*fixedBuffer
+ bufLen int
+ closeErr error // error to return from next Read. set to nil if not closed.
+}
+
+// NewBytesPipe creates new BytesPipe, initialized by specified slice.
+// If buf is nil, then it will be initialized with slice which cap is 64.
+// buf will be adjusted in a way that len(buf) == 0, cap(buf) == cap(buf).
+func NewBytesPipe() *BytesPipe {
+ bp := &BytesPipe{}
+ bp.buf = append(bp.buf, getBuffer(minCap))
+ bp.wait = sync.NewCond(&bp.mu)
+ return bp
+}
+
+// Write writes p to BytesPipe.
+// It can allocate new []byte slices in a process of writing.
+func (bp *BytesPipe) Write(p []byte) (int, error) {
+ bp.mu.Lock()
+
+ written := 0
+loop0:
+ for {
+ if bp.closeErr != nil {
+ bp.mu.Unlock()
+ return written, ErrClosed
+ }
+
+ if len(bp.buf) == 0 {
+ bp.buf = append(bp.buf, getBuffer(64))
+ }
+ // get the last buffer
+ b := bp.buf[len(bp.buf)-1]
+
+ n, err := b.Write(p)
+ written += n
+ bp.bufLen += n
+
+ // errBufferFull is an error we expect to get if the buffer is full
+ if err != nil && err != errBufferFull {
+ bp.wait.Broadcast()
+ bp.mu.Unlock()
+ return written, err
+ }
+
+ // if there was enough room to write all then break
+ if len(p) == n {
+ break
+ }
+
+ // more data: write to the next slice
+ p = p[n:]
+
+ // make sure the buffer doesn't grow too big from this write
+ for bp.bufLen >= blockThreshold {
+ bp.wait.Wait()
+ if bp.closeErr != nil {
+ continue loop0
+ }
+ }
+
+ // add new byte slice to the buffers slice and continue writing
+ nextCap := b.Cap() * 2
+ if nextCap > maxCap {
+ nextCap = maxCap
+ }
+ bp.buf = append(bp.buf, getBuffer(nextCap))
+ }
+ bp.wait.Broadcast()
+ bp.mu.Unlock()
+ return written, nil
+}
+
+// CloseWithError causes further reads from a BytesPipe to return immediately.
+func (bp *BytesPipe) CloseWithError(err error) error {
+ bp.mu.Lock()
+ if err != nil {
+ bp.closeErr = err
+ } else {
+ bp.closeErr = io.EOF
+ }
+ bp.wait.Broadcast()
+ bp.mu.Unlock()
+ return nil
+}
+
+// Close causes further reads from a BytesPipe to return immediately.
+func (bp *BytesPipe) Close() error {
+ return bp.CloseWithError(nil)
+}
+
+// Read reads bytes from BytesPipe.
+// Data could be read only once.
+func (bp *BytesPipe) Read(p []byte) (n int, err error) {
+ bp.mu.Lock()
+ if bp.bufLen == 0 {
+ if bp.closeErr != nil {
+ bp.mu.Unlock()
+ return 0, bp.closeErr
+ }
+ bp.wait.Wait()
+ if bp.bufLen == 0 && bp.closeErr != nil {
+ err := bp.closeErr
+ bp.mu.Unlock()
+ return 0, err
+ }
+ }
+
+ for bp.bufLen > 0 {
+ b := bp.buf[0]
+ read, _ := b.Read(p) // ignore error since fixedBuffer doesn't really return an error
+ n += read
+ bp.bufLen -= read
+
+ if b.Len() == 0 {
+ // it's empty so return it to the pool and move to the next one
+ returnBuffer(b)
+ bp.buf[0] = nil
+ bp.buf = bp.buf[1:]
+ }
+
+ if len(p) == read {
+ break
+ }
+
+ p = p[read:]
+ }
+
+ bp.wait.Broadcast()
+ bp.mu.Unlock()
+ return
+}
+
+func returnBuffer(b *fixedBuffer) {
+ b.Reset()
+ bufPoolsLock.Lock()
+ pool := bufPools[b.Cap()]
+ bufPoolsLock.Unlock()
+ if pool != nil {
+ pool.Put(b)
+ }
+}
+
+func getBuffer(size int) *fixedBuffer {
+ bufPoolsLock.Lock()
+ pool, ok := bufPools[size]
+ if !ok {
+ pool = &sync.Pool{New: func() interface{} { return &fixedBuffer{buf: make([]byte, 0, size)} }}
+ bufPools[size] = pool
+ }
+ bufPoolsLock.Unlock()
+ return pool.Get().(*fixedBuffer)
+}
diff --git a/unum/vendor/github.com/docker/docker/pkg/ioutils/fswriters.go b/unum/vendor/github.com/docker/docker/pkg/ioutils/fswriters.go
new file mode 100644
index 0000000..a56c462
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/pkg/ioutils/fswriters.go
@@ -0,0 +1,162 @@
+package ioutils
+
+import (
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+)
+
+// NewAtomicFileWriter returns WriteCloser so that writing to it writes to a
+// temporary file and closing it atomically changes the temporary file to
+// destination path. Writing and closing concurrently is not allowed.
+func NewAtomicFileWriter(filename string, perm os.FileMode) (io.WriteCloser, error) {
+ f, err := ioutil.TempFile(filepath.Dir(filename), ".tmp-"+filepath.Base(filename))
+ if err != nil {
+ return nil, err
+ }
+
+ abspath, err := filepath.Abs(filename)
+ if err != nil {
+ return nil, err
+ }
+ return &atomicFileWriter{
+ f: f,
+ fn: abspath,
+ perm: perm,
+ }, nil
+}
+
+// AtomicWriteFile atomically writes data to a file named by filename.
+func AtomicWriteFile(filename string, data []byte, perm os.FileMode) error {
+ f, err := NewAtomicFileWriter(filename, perm)
+ if err != nil {
+ return err
+ }
+ n, err := f.Write(data)
+ if err == nil && n < len(data) {
+ err = io.ErrShortWrite
+ f.(*atomicFileWriter).writeErr = err
+ }
+ if err1 := f.Close(); err == nil {
+ err = err1
+ }
+ return err
+}
+
+type atomicFileWriter struct {
+ f *os.File
+ fn string
+ writeErr error
+ perm os.FileMode
+}
+
+func (w *atomicFileWriter) Write(dt []byte) (int, error) {
+ n, err := w.f.Write(dt)
+ if err != nil {
+ w.writeErr = err
+ }
+ return n, err
+}
+
+func (w *atomicFileWriter) Close() (retErr error) {
+ defer func() {
+ if retErr != nil || w.writeErr != nil {
+ os.Remove(w.f.Name())
+ }
+ }()
+ if err := w.f.Sync(); err != nil {
+ w.f.Close()
+ return err
+ }
+ if err := w.f.Close(); err != nil {
+ return err
+ }
+ if err := os.Chmod(w.f.Name(), w.perm); err != nil {
+ return err
+ }
+ if w.writeErr == nil {
+ return os.Rename(w.f.Name(), w.fn)
+ }
+ return nil
+}
+
+// AtomicWriteSet is used to atomically write a set
+// of files and ensure they are visible at the same time.
+// Must be committed to a new directory.
+type AtomicWriteSet struct {
+ root string
+}
+
+// NewAtomicWriteSet creates a new atomic write set to
+// atomically create a set of files. The given directory
+// is used as the base directory for storing files before
+// commit. If no temporary directory is given the system
+// default is used.
+func NewAtomicWriteSet(tmpDir string) (*AtomicWriteSet, error) {
+ td, err := ioutil.TempDir(tmpDir, "write-set-")
+ if err != nil {
+ return nil, err
+ }
+
+ return &AtomicWriteSet{
+ root: td,
+ }, nil
+}
+
+// WriteFile writes a file to the set, guaranteeing the file
+// has been synced.
+func (ws *AtomicWriteSet) WriteFile(filename string, data []byte, perm os.FileMode) error {
+ f, err := ws.FileWriter(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm)
+ if err != nil {
+ return err
+ }
+ n, err := f.Write(data)
+ if err == nil && n < len(data) {
+ err = io.ErrShortWrite
+ }
+ if err1 := f.Close(); err == nil {
+ err = err1
+ }
+ return err
+}
+
+type syncFileCloser struct {
+ *os.File
+}
+
+func (w syncFileCloser) Close() error {
+ err := w.File.Sync()
+ if err1 := w.File.Close(); err == nil {
+ err = err1
+ }
+ return err
+}
+
+// FileWriter opens a file writer inside the set. The file
+// should be synced and closed before calling commit.
+func (ws *AtomicWriteSet) FileWriter(name string, flag int, perm os.FileMode) (io.WriteCloser, error) {
+ f, err := os.OpenFile(filepath.Join(ws.root, name), flag, perm)
+ if err != nil {
+ return nil, err
+ }
+ return syncFileCloser{f}, nil
+}
+
+// Cancel cancels the set and removes all temporary data
+// created in the set.
+func (ws *AtomicWriteSet) Cancel() error {
+ return os.RemoveAll(ws.root)
+}
+
+// Commit moves all created files to the target directory. The
+// target directory must not exist and the parent of the target
+// directory must exist.
+func (ws *AtomicWriteSet) Commit(target string) error {
+ return os.Rename(ws.root, target)
+}
+
+// String returns the location the set is writing to.
+func (ws *AtomicWriteSet) String() string {
+ return ws.root
+}
diff --git a/unum/vendor/github.com/docker/docker/pkg/ioutils/readers.go b/unum/vendor/github.com/docker/docker/pkg/ioutils/readers.go
new file mode 100644
index 0000000..63f3c07
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/pkg/ioutils/readers.go
@@ -0,0 +1,154 @@
+package ioutils
+
+import (
+ "crypto/sha256"
+ "encoding/hex"
+ "io"
+
+ "golang.org/x/net/context"
+)
+
+type readCloserWrapper struct {
+ io.Reader
+ closer func() error
+}
+
+func (r *readCloserWrapper) Close() error {
+ return r.closer()
+}
+
+// NewReadCloserWrapper returns a new io.ReadCloser.
+func NewReadCloserWrapper(r io.Reader, closer func() error) io.ReadCloser {
+ return &readCloserWrapper{
+ Reader: r,
+ closer: closer,
+ }
+}
+
+type readerErrWrapper struct {
+ reader io.Reader
+ closer func()
+}
+
+func (r *readerErrWrapper) Read(p []byte) (int, error) {
+ n, err := r.reader.Read(p)
+ if err != nil {
+ r.closer()
+ }
+ return n, err
+}
+
+// NewReaderErrWrapper returns a new io.Reader.
+func NewReaderErrWrapper(r io.Reader, closer func()) io.Reader {
+ return &readerErrWrapper{
+ reader: r,
+ closer: closer,
+ }
+}
+
+// HashData returns the sha256 sum of src.
+func HashData(src io.Reader) (string, error) {
+ h := sha256.New()
+ if _, err := io.Copy(h, src); err != nil {
+ return "", err
+ }
+ return "sha256:" + hex.EncodeToString(h.Sum(nil)), nil
+}
+
+// OnEOFReader wraps an io.ReadCloser and a function
+// the function will run at the end of file or close the file.
+type OnEOFReader struct {
+ Rc io.ReadCloser
+ Fn func()
+}
+
+func (r *OnEOFReader) Read(p []byte) (n int, err error) {
+ n, err = r.Rc.Read(p)
+ if err == io.EOF {
+ r.runFunc()
+ }
+ return
+}
+
+// Close closes the file and run the function.
+func (r *OnEOFReader) Close() error {
+ err := r.Rc.Close()
+ r.runFunc()
+ return err
+}
+
+func (r *OnEOFReader) runFunc() {
+ if fn := r.Fn; fn != nil {
+ fn()
+ r.Fn = nil
+ }
+}
+
+// cancelReadCloser wraps an io.ReadCloser with a context for cancelling read
+// operations.
+type cancelReadCloser struct {
+ cancel func()
+ pR *io.PipeReader // Stream to read from
+ pW *io.PipeWriter
+}
+
+// NewCancelReadCloser creates a wrapper that closes the ReadCloser when the
+// context is cancelled. The returned io.ReadCloser must be closed when it is
+// no longer needed.
+func NewCancelReadCloser(ctx context.Context, in io.ReadCloser) io.ReadCloser {
+ pR, pW := io.Pipe()
+
+ // Create a context used to signal when the pipe is closed
+ doneCtx, cancel := context.WithCancel(context.Background())
+
+ p := &cancelReadCloser{
+ cancel: cancel,
+ pR: pR,
+ pW: pW,
+ }
+
+ go func() {
+ _, err := io.Copy(pW, in)
+ select {
+ case <-ctx.Done():
+ // If the context was closed, p.closeWithError
+ // was already called. Calling it again would
+ // change the error that Read returns.
+ default:
+ p.closeWithError(err)
+ }
+ in.Close()
+ }()
+ go func() {
+ for {
+ select {
+ case <-ctx.Done():
+ p.closeWithError(ctx.Err())
+ case <-doneCtx.Done():
+ return
+ }
+ }
+ }()
+
+ return p
+}
+
+// Read wraps the Read method of the pipe that provides data from the wrapped
+// ReadCloser.
+func (p *cancelReadCloser) Read(buf []byte) (n int, err error) {
+ return p.pR.Read(buf)
+}
+
+// closeWithError closes the wrapper and its underlying reader. It will
+// cause future calls to Read to return err.
+func (p *cancelReadCloser) closeWithError(err error) {
+ p.pW.CloseWithError(err)
+ p.cancel()
+}
+
+// Close closes the wrapper its underlying reader. It will cause
+// future calls to Read to return io.EOF.
+func (p *cancelReadCloser) Close() error {
+ p.closeWithError(io.EOF)
+ return nil
+}
diff --git a/unum/vendor/github.com/docker/docker/pkg/ioutils/temp_unix.go b/unum/vendor/github.com/docker/docker/pkg/ioutils/temp_unix.go
new file mode 100644
index 0000000..1539ad2
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/pkg/ioutils/temp_unix.go
@@ -0,0 +1,10 @@
+// +build !windows
+
+package ioutils
+
+import "io/ioutil"
+
+// TempDir on Unix systems is equivalent to ioutil.TempDir.
+func TempDir(dir, prefix string) (string, error) {
+ return ioutil.TempDir(dir, prefix)
+}
diff --git a/unum/vendor/github.com/docker/docker/pkg/ioutils/temp_windows.go b/unum/vendor/github.com/docker/docker/pkg/ioutils/temp_windows.go
new file mode 100644
index 0000000..c258e5f
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/pkg/ioutils/temp_windows.go
@@ -0,0 +1,18 @@
+// +build windows
+
+package ioutils
+
+import (
+ "io/ioutil"
+
+ "github.com/docker/docker/pkg/longpath"
+)
+
+// TempDir is the equivalent of ioutil.TempDir, except that the result is in Windows longpath format.
+func TempDir(dir, prefix string) (string, error) {
+ tempDir, err := ioutil.TempDir(dir, prefix)
+ if err != nil {
+ return "", err
+ }
+ return longpath.AddPrefix(tempDir), nil
+}
diff --git a/unum/vendor/github.com/docker/docker/pkg/ioutils/writeflusher.go b/unum/vendor/github.com/docker/docker/pkg/ioutils/writeflusher.go
new file mode 100644
index 0000000..52a4901
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/pkg/ioutils/writeflusher.go
@@ -0,0 +1,92 @@
+package ioutils
+
+import (
+ "io"
+ "sync"
+)
+
+// WriteFlusher wraps the Write and Flush operation ensuring that every write
+// is a flush. In addition, the Close method can be called to intercept
+// Read/Write calls if the targets lifecycle has already ended.
+type WriteFlusher struct {
+ w io.Writer
+ flusher flusher
+ flushed chan struct{}
+ flushedOnce sync.Once
+ closed chan struct{}
+ closeLock sync.Mutex
+}
+
+type flusher interface {
+ Flush()
+}
+
+var errWriteFlusherClosed = io.EOF
+
+func (wf *WriteFlusher) Write(b []byte) (n int, err error) {
+ select {
+ case <-wf.closed:
+ return 0, errWriteFlusherClosed
+ default:
+ }
+
+ n, err = wf.w.Write(b)
+ wf.Flush() // every write is a flush.
+ return n, err
+}
+
+// Flush the stream immediately.
+func (wf *WriteFlusher) Flush() {
+ select {
+ case <-wf.closed:
+ return
+ default:
+ }
+
+ wf.flushedOnce.Do(func() {
+ close(wf.flushed)
+ })
+ wf.flusher.Flush()
+}
+
+// Flushed returns the state of flushed.
+// If it's flushed, return true, or else it return false.
+func (wf *WriteFlusher) Flushed() bool {
+ // BUG(stevvooe): Remove this method. Its use is inherently racy. Seems to
+ // be used to detect whether or a response code has been issued or not.
+ // Another hook should be used instead.
+ var flushed bool
+ select {
+ case <-wf.flushed:
+ flushed = true
+ default:
+ }
+ return flushed
+}
+
+// Close closes the write flusher, disallowing any further writes to the
+// target. After the flusher is closed, all calls to write or flush will
+// result in an error.
+func (wf *WriteFlusher) Close() error {
+ wf.closeLock.Lock()
+ defer wf.closeLock.Unlock()
+
+ select {
+ case <-wf.closed:
+ return errWriteFlusherClosed
+ default:
+ close(wf.closed)
+ }
+ return nil
+}
+
+// NewWriteFlusher returns a new WriteFlusher.
+func NewWriteFlusher(w io.Writer) *WriteFlusher {
+ var fl flusher
+ if f, ok := w.(flusher); ok {
+ fl = f
+ } else {
+ fl = &NopFlusher{}
+ }
+ return &WriteFlusher{w: w, flusher: fl, closed: make(chan struct{}), flushed: make(chan struct{})}
+}
diff --git a/unum/vendor/github.com/docker/docker/pkg/ioutils/writers.go b/unum/vendor/github.com/docker/docker/pkg/ioutils/writers.go
new file mode 100644
index 0000000..ccc7f9c
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/pkg/ioutils/writers.go
@@ -0,0 +1,66 @@
+package ioutils
+
+import "io"
+
+// NopWriter represents a type which write operation is nop.
+type NopWriter struct{}
+
+func (*NopWriter) Write(buf []byte) (int, error) {
+ return len(buf), nil
+}
+
+type nopWriteCloser struct {
+ io.Writer
+}
+
+func (w *nopWriteCloser) Close() error { return nil }
+
+// NopWriteCloser returns a nopWriteCloser.
+func NopWriteCloser(w io.Writer) io.WriteCloser {
+ return &nopWriteCloser{w}
+}
+
+// NopFlusher represents a type which flush operation is nop.
+type NopFlusher struct{}
+
+// Flush is a nop operation.
+func (f *NopFlusher) Flush() {}
+
+type writeCloserWrapper struct {
+ io.Writer
+ closer func() error
+}
+
+func (r *writeCloserWrapper) Close() error {
+ return r.closer()
+}
+
+// NewWriteCloserWrapper returns a new io.WriteCloser.
+func NewWriteCloserWrapper(r io.Writer, closer func() error) io.WriteCloser {
+ return &writeCloserWrapper{
+ Writer: r,
+ closer: closer,
+ }
+}
+
+// WriteCounter wraps a concrete io.Writer and hold a count of the number
+// of bytes written to the writer during a "session".
+// This can be convenient when write return is masked
+// (e.g., json.Encoder.Encode())
+type WriteCounter struct {
+ Count int64
+ Writer io.Writer
+}
+
+// NewWriteCounter returns a new WriteCounter.
+func NewWriteCounter(w io.Writer) *WriteCounter {
+ return &WriteCounter{
+ Writer: w,
+ }
+}
+
+func (wc *WriteCounter) Write(p []byte) (count int, err error) {
+ count, err = wc.Writer.Write(p)
+ wc.Count += int64(count)
+ return
+}
diff --git a/unum/vendor/github.com/docker/docker/pkg/longpath/longpath.go b/unum/vendor/github.com/docker/docker/pkg/longpath/longpath.go
new file mode 100644
index 0000000..9b15bff
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/pkg/longpath/longpath.go
@@ -0,0 +1,26 @@
+// longpath introduces some constants and helper functions for handling long paths
+// in Windows, which are expected to be prepended with `\\?\` and followed by either
+// a drive letter, a UNC server\share, or a volume identifier.
+
+package longpath
+
+import (
+ "strings"
+)
+
+// Prefix is the longpath prefix for Windows file paths.
+const Prefix = `\\?\`
+
+// AddPrefix will add the Windows long path prefix to the path provided if
+// it does not already have it.
+func AddPrefix(path string) string {
+ if !strings.HasPrefix(path, Prefix) {
+ if strings.HasPrefix(path, `\\`) {
+ // This is a UNC path, so we need to add 'UNC' to the path as well.
+ path = Prefix + `UNC` + path[1:]
+ } else {
+ path = Prefix + path
+ }
+ }
+ return path
+}
diff --git a/unum/vendor/github.com/docker/docker/pkg/mount/flags.go b/unum/vendor/github.com/docker/docker/pkg/mount/flags.go
new file mode 100644
index 0000000..607dbed
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/pkg/mount/flags.go
@@ -0,0 +1,149 @@
+package mount
+
+import (
+ "fmt"
+ "strings"
+)
+
+var flags = map[string]struct {
+ clear bool
+ flag int
+}{
+ "defaults": {false, 0},
+ "ro": {false, RDONLY},
+ "rw": {true, RDONLY},
+ "suid": {true, NOSUID},
+ "nosuid": {false, NOSUID},
+ "dev": {true, NODEV},
+ "nodev": {false, NODEV},
+ "exec": {true, NOEXEC},
+ "noexec": {false, NOEXEC},
+ "sync": {false, SYNCHRONOUS},
+ "async": {true, SYNCHRONOUS},
+ "dirsync": {false, DIRSYNC},
+ "remount": {false, REMOUNT},
+ "mand": {false, MANDLOCK},
+ "nomand": {true, MANDLOCK},
+ "atime": {true, NOATIME},
+ "noatime": {false, NOATIME},
+ "diratime": {true, NODIRATIME},
+ "nodiratime": {false, NODIRATIME},
+ "bind": {false, BIND},
+ "rbind": {false, RBIND},
+ "unbindable": {false, UNBINDABLE},
+ "runbindable": {false, RUNBINDABLE},
+ "private": {false, PRIVATE},
+ "rprivate": {false, RPRIVATE},
+ "shared": {false, SHARED},
+ "rshared": {false, RSHARED},
+ "slave": {false, SLAVE},
+ "rslave": {false, RSLAVE},
+ "relatime": {false, RELATIME},
+ "norelatime": {true, RELATIME},
+ "strictatime": {false, STRICTATIME},
+ "nostrictatime": {true, STRICTATIME},
+}
+
+var validFlags = map[string]bool{
+ "": true,
+ "size": true,
+ "mode": true,
+ "uid": true,
+ "gid": true,
+ "nr_inodes": true,
+ "nr_blocks": true,
+ "mpol": true,
+}
+
+var propagationFlags = map[string]bool{
+ "bind": true,
+ "rbind": true,
+ "unbindable": true,
+ "runbindable": true,
+ "private": true,
+ "rprivate": true,
+ "shared": true,
+ "rshared": true,
+ "slave": true,
+ "rslave": true,
+}
+
+// MergeTmpfsOptions merge mount options to make sure there is no duplicate.
+func MergeTmpfsOptions(options []string) ([]string, error) {
+ // We use collisions maps to remove duplicates.
+ // For flag, the key is the flag value (the key for propagation flag is -1)
+ // For data=value, the key is the data
+ flagCollisions := map[int]bool{}
+ dataCollisions := map[string]bool{}
+
+ var newOptions []string
+ // We process in reverse order
+ for i := len(options) - 1; i >= 0; i-- {
+ option := options[i]
+ if option == "defaults" {
+ continue
+ }
+ if f, ok := flags[option]; ok && f.flag != 0 {
+ // There is only one propagation mode
+ key := f.flag
+ if propagationFlags[option] {
+ key = -1
+ }
+ // Check to see if there is collision for flag
+ if !flagCollisions[key] {
+ // We prepend the option and add to collision map
+ newOptions = append([]string{option}, newOptions...)
+ flagCollisions[key] = true
+ }
+ continue
+ }
+ opt := strings.SplitN(option, "=", 2)
+ if len(opt) != 2 || !validFlags[opt[0]] {
+ return nil, fmt.Errorf("Invalid tmpfs option %q", opt)
+ }
+ if !dataCollisions[opt[0]] {
+ // We prepend the option and add to collision map
+ newOptions = append([]string{option}, newOptions...)
+ dataCollisions[opt[0]] = true
+ }
+ }
+
+ return newOptions, nil
+}
+
+// Parse fstab type mount options into mount() flags
+// and device specific data
+func parseOptions(options string) (int, string) {
+ var (
+ flag int
+ data []string
+ )
+
+ for _, o := range strings.Split(options, ",") {
+ // If the option does not exist in the flags table or the flag
+ // is not supported on the platform,
+ // then it is a data value for a specific fs type
+ if f, exists := flags[o]; exists && f.flag != 0 {
+ if f.clear {
+ flag &= ^f.flag
+ } else {
+ flag |= f.flag
+ }
+ } else {
+ data = append(data, o)
+ }
+ }
+ return flag, strings.Join(data, ",")
+}
+
+// ParseTmpfsOptions parse fstab type mount options into flags and data
+func ParseTmpfsOptions(options string) (int, string, error) {
+ flags, data := parseOptions(options)
+ for _, o := range strings.Split(data, ",") {
+ opt := strings.SplitN(o, "=", 2)
+ if !validFlags[opt[0]] {
+ return 0, "", fmt.Errorf("Invalid tmpfs option %q", opt)
+ }
+ }
+ return flags, data, nil
+}
diff --git a/unum/vendor/github.com/docker/docker/pkg/mount/flags_freebsd.go b/unum/vendor/github.com/docker/docker/pkg/mount/flags_freebsd.go
new file mode 100644
index 0000000..5f76f33
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/pkg/mount/flags_freebsd.go
@@ -0,0 +1,49 @@
+// +build freebsd,cgo
+
+package mount
+
+/*
+#include <sys/mount.h>
+*/
+import "C"
+
+const (
+ // RDONLY will mount the filesystem as read-only.
+ RDONLY = C.MNT_RDONLY
+
+ // NOSUID will not allow set-user-identifier or set-group-identifier bits to
+ // take effect.
+ NOSUID = C.MNT_NOSUID
+
+ // NOEXEC will not allow execution of any binaries on the mounted file system.
+ NOEXEC = C.MNT_NOEXEC
+
+ // SYNCHRONOUS will allow any I/O to the file system to be done synchronously.
+ SYNCHRONOUS = C.MNT_SYNCHRONOUS
+
+ // NOATIME will not update the file access time when reading from a file.
+ NOATIME = C.MNT_NOATIME
+)
+
+// These flags are unsupported.
+const (
+ BIND = 0
+ DIRSYNC = 0
+ MANDLOCK = 0
+ NODEV = 0
+ NODIRATIME = 0
+ UNBINDABLE = 0
+ RUNBINDABLE = 0
+ PRIVATE = 0
+ RPRIVATE = 0
+ SHARED = 0
+ RSHARED = 0
+ SLAVE = 0
+ RSLAVE = 0
+ RBIND = 0
+ RELATIVE = 0
+ RELATIME = 0
+ REMOUNT = 0
+ STRICTATIME = 0
+ mntDetach = 0
+)
diff --git a/unum/vendor/github.com/docker/docker/pkg/mount/flags_linux.go b/unum/vendor/github.com/docker/docker/pkg/mount/flags_linux.go
new file mode 100644
index 0000000..0425d0d
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/pkg/mount/flags_linux.go
@@ -0,0 +1,87 @@
+package mount
+
+import (
+ "golang.org/x/sys/unix"
+)
+
+const (
+ // RDONLY will mount the file system read-only.
+ RDONLY = unix.MS_RDONLY
+
+ // NOSUID will not allow set-user-identifier or set-group-identifier bits to
+ // take effect.
+ NOSUID = unix.MS_NOSUID
+
+ // NODEV will not interpret character or block special devices on the file
+ // system.
+ NODEV = unix.MS_NODEV
+
+ // NOEXEC will not allow execution of any binaries on the mounted file system.
+ NOEXEC = unix.MS_NOEXEC
+
+ // SYNCHRONOUS will allow I/O to the file system to be done synchronously.
+ SYNCHRONOUS = unix.MS_SYNCHRONOUS
+
+ // DIRSYNC will force all directory updates within the file system to be done
+ // synchronously. This affects the following system calls: create, link,
+ // unlink, symlink, mkdir, rmdir, mknod and rename.
+ DIRSYNC = unix.MS_DIRSYNC
+
+ // REMOUNT will attempt to remount an already-mounted file system. This is
+ // commonly used to change the mount flags for a file system, especially to
+ // make a readonly file system writeable. It does not change device or mount
+ // point.
+ REMOUNT = unix.MS_REMOUNT
+
+ // MANDLOCK will force mandatory locks on a filesystem.
+ MANDLOCK = unix.MS_MANDLOCK
+
+ // NOATIME will not update the file access time when reading from a file.
+ NOATIME = unix.MS_NOATIME
+
+ // NODIRATIME will not update the directory access time.
+ NODIRATIME = unix.MS_NODIRATIME
+
+ // BIND remounts a subtree somewhere else.
+ BIND = unix.MS_BIND
+
+ // RBIND remounts a subtree and all possible submounts somewhere else.
+ RBIND = unix.MS_BIND | unix.MS_REC
+
+ // UNBINDABLE creates a mount which cannot be cloned through a bind operation.
+ UNBINDABLE = unix.MS_UNBINDABLE
+
+ // RUNBINDABLE marks the entire mount tree as UNBINDABLE.
+ RUNBINDABLE = unix.MS_UNBINDABLE | unix.MS_REC
+
+ // PRIVATE creates a mount which carries no propagation abilities.
+ PRIVATE = unix.MS_PRIVATE
+
+ // RPRIVATE marks the entire mount tree as PRIVATE.
+ RPRIVATE = unix.MS_PRIVATE | unix.MS_REC
+
+ // SLAVE creates a mount which receives propagation from its master, but not
+ // vice versa.
+ SLAVE = unix.MS_SLAVE
+
+ // RSLAVE marks the entire mount tree as SLAVE.
+ RSLAVE = unix.MS_SLAVE | unix.MS_REC
+
+ // SHARED creates a mount which provides the ability to create mirrors of
+ // that mount such that mounts and unmounts within any of the mirrors
+ // propagate to the other mirrors.
+ SHARED = unix.MS_SHARED
+
+ // RSHARED marks the entire mount tree as SHARED.
+ RSHARED = unix.MS_SHARED | unix.MS_REC
+
+ // RELATIME updates inode access times relative to modify or change time.
+ RELATIME = unix.MS_RELATIME
+
+ // STRICTATIME allows to explicitly request full atime updates. This makes
+ // it possible for the kernel to default to relatime or noatime but still
+ // allow userspace to override it.
+ STRICTATIME = unix.MS_STRICTATIME
+
+ mntDetach = unix.MNT_DETACH
+)
diff --git a/unum/vendor/github.com/docker/docker/pkg/mount/flags_unsupported.go b/unum/vendor/github.com/docker/docker/pkg/mount/flags_unsupported.go
new file mode 100644
index 0000000..9ed741e
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/pkg/mount/flags_unsupported.go
@@ -0,0 +1,31 @@
+// +build !linux,!freebsd freebsd,!cgo solaris,!cgo
+
+package mount
+
+// These flags are unsupported.
+const (
+ BIND = 0
+ DIRSYNC = 0
+ MANDLOCK = 0
+ NOATIME = 0
+ NODEV = 0
+ NODIRATIME = 0
+ NOEXEC = 0
+ NOSUID = 0
+ UNBINDABLE = 0
+ RUNBINDABLE = 0
+ PRIVATE = 0
+ RPRIVATE = 0
+ SHARED = 0
+ RSHARED = 0
+ SLAVE = 0
+ RSLAVE = 0
+ RBIND = 0
+ RELATIME = 0
+ RELATIVE = 0
+ REMOUNT = 0
+ STRICTATIME = 0
+ SYNCHRONOUS = 0
+ RDONLY = 0
+ mntDetach = 0
+)
diff --git a/unum/vendor/github.com/docker/docker/pkg/mount/mount.go b/unum/vendor/github.com/docker/docker/pkg/mount/mount.go
new file mode 100644
index 0000000..c9fdfd6
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/pkg/mount/mount.go
@@ -0,0 +1,86 @@
+package mount
+
+import (
+ "sort"
+ "strings"
+)
+
+// GetMounts retrieves a list of mounts for the current running process.
+func GetMounts() ([]*Info, error) {
+ return parseMountTable()
+}
+
+// Mounted determines if a specified mountpoint has been mounted.
+// On Linux it looks at /proc/self/mountinfo and on Solaris at mnttab.
+func Mounted(mountpoint string) (bool, error) {
+ entries, err := parseMountTable()
+ if err != nil {
+ return false, err
+ }
+
+ // Search the table for the mountpoint
+ for _, e := range entries {
+ if e.Mountpoint == mountpoint {
+ return true, nil
+ }
+ }
+ return false, nil
+}
+
+// Mount will mount filesystem according to the specified configuration, on the
+// condition that the target path is *not* already mounted. Options must be
+// specified like the mount or fstab unix commands: "opt1=val1,opt2=val2". See
+// flags.go for supported option flags.
+func Mount(device, target, mType, options string) error {
+ flag, _ := parseOptions(options)
+ if flag&REMOUNT != REMOUNT {
+ if mounted, err := Mounted(target); err != nil || mounted {
+ return err
+ }
+ }
+ return ForceMount(device, target, mType, options)
+}
+
+// ForceMount will mount a filesystem according to the specified configuration,
+// *regardless* if the target path is not already mounted. Options must be
+// specified like the mount or fstab unix commands: "opt1=val1,opt2=val2". See
+// flags.go for supported option flags.
+func ForceMount(device, target, mType, options string) error {
+ flag, data := parseOptions(options)
+ return mount(device, target, mType, uintptr(flag), data)
+}
+
+// Unmount lazily unmounts a filesystem on supported platforms, otherwise
+// does a normal unmount.
+func Unmount(target string) error {
+ if mounted, err := Mounted(target); err != nil || !mounted {
+ return err
+ }
+ return unmount(target, mntDetach)
+}
+
+// RecursiveUnmount unmounts the target and all mounts underneath, starting with
+// the deepsest mount first.
+func RecursiveUnmount(target string) error {
+ mounts, err := GetMounts()
+ if err != nil {
+ return err
+ }
+
+ // Make the deepest mount be first
+ sort.Sort(sort.Reverse(byMountpoint(mounts)))
+
+ for i, m := range mounts {
+ if !strings.HasPrefix(m.Mountpoint, target) {
+ continue
+ }
+ if err := Unmount(m.Mountpoint); err != nil && i == len(mounts)-1 {
+ if mounted, err := Mounted(m.Mountpoint); err != nil || mounted {
+ return err
+ }
+ // Ignore errors for submounts and continue trying to unmount others
+ // The final unmount should fail if there ane any submounts remaining
+ }
+ }
+ return nil
+}
diff --git a/unum/vendor/github.com/docker/docker/pkg/mount/mounter_freebsd.go b/unum/vendor/github.com/docker/docker/pkg/mount/mounter_freebsd.go
new file mode 100644
index 0000000..814896c
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/pkg/mount/mounter_freebsd.go
@@ -0,0 +1,60 @@
+package mount
+
+/*
+#include <errno.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/_iovec.h>
+#include <sys/mount.h>
+#include <sys/param.h>
+*/
+import "C"
+
+import (
+ "fmt"
+ "strings"
+ "unsafe"
+
+ "golang.org/x/sys/unix"
+)
+
+func allocateIOVecs(options []string) []C.struct_iovec {
+ out := make([]C.struct_iovec, len(options))
+ for i, option := range options {
+ out[i].iov_base = unsafe.Pointer(C.CString(option))
+ out[i].iov_len = C.size_t(len(option) + 1)
+ }
+ return out
+}
+
+func mount(device, target, mType string, flag uintptr, data string) error {
+ isNullFS := false
+
+ xs := strings.Split(data, ",")
+ for _, x := range xs {
+ if x == "bind" {
+ isNullFS = true
+ }
+ }
+
+ options := []string{"fspath", target}
+ if isNullFS {
+ options = append(options, "fstype", "nullfs", "target", device)
+ } else {
+ options = append(options, "fstype", mType, "from", device)
+ }
+ rawOptions := allocateIOVecs(options)
+ for _, rawOption := range rawOptions {
+ defer C.free(rawOption.iov_base)
+ }
+
+ if errno := C.nmount(&rawOptions[0], C.uint(len(options)), C.int(flag)); errno != 0 {
+ reason := C.GoString(C.strerror(*C.__error()))
+ return fmt.Errorf("Failed to call nmount: %s", reason)
+ }
+ return nil
+}
+
+func unmount(target string, flag int) error {
+ return unix.Unmount(target, flag)
+}
diff --git a/unum/vendor/github.com/docker/docker/pkg/mount/mounter_linux.go b/unum/vendor/github.com/docker/docker/pkg/mount/mounter_linux.go
new file mode 100644
index 0000000..39c36d4
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/pkg/mount/mounter_linux.go
@@ -0,0 +1,57 @@
+package mount
+
+import (
+ "golang.org/x/sys/unix"
+)
+
+const (
+ // ptypes is the set propagation types.
+ ptypes = unix.MS_SHARED | unix.MS_PRIVATE | unix.MS_SLAVE | unix.MS_UNBINDABLE
+
+ // pflags is the full set valid flags for a change propagation call.
+ pflags = ptypes | unix.MS_REC | unix.MS_SILENT
+
+ // broflags is the combination of bind and read only
+ broflags = unix.MS_BIND | unix.MS_RDONLY
+)
+
+// isremount returns true if either device name or flags identify a remount request, false otherwise.
+func isremount(device string, flags uintptr) bool {
+ switch {
+ // We treat device "" and "none" as a remount request to provide compatibility with
+ // requests that don't explicitly set MS_REMOUNT such as those manipulating bind mounts.
+ case flags&unix.MS_REMOUNT != 0, device == "", device == "none":
+ return true
+ default:
+ return false
+ }
+}
+
+func mount(device, target, mType string, flags uintptr, data string) error {
+ oflags := flags &^ ptypes
+ if !isremount(device, flags) || data != "" {
+ // Initial call applying all non-propagation flags for mount
+ // or remount with changed data
+ if err := unix.Mount(device, target, mType, oflags, data); err != nil {
+ return err
+ }
+ }
+
+ if flags&ptypes != 0 {
+ // Change the propagation type.
+ if err := unix.Mount("", target, "", flags&pflags, ""); err != nil {
+ return err
+ }
+ }
+
+ if oflags&broflags == broflags {
+ // Remount the bind to apply read only.
+ return unix.Mount("", target, "", oflags|unix.MS_REMOUNT, "")
+ }
+
+ return nil
+}
+
+func unmount(target string, flag int) error {
+ return unix.Unmount(target, flag)
+}
diff --git a/unum/vendor/github.com/docker/docker/pkg/mount/mounter_solaris.go b/unum/vendor/github.com/docker/docker/pkg/mount/mounter_solaris.go
new file mode 100644
index 0000000..48b8677
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/pkg/mount/mounter_solaris.go
@@ -0,0 +1,34 @@
+// +build solaris,cgo
+
+package mount
+
+import (
+ "unsafe"
+
+ "golang.org/x/sys/unix"
+)
+
+// #include <stdlib.h>
+// #include <stdio.h>
+// #include <sys/mount.h>
+// int Mount(const char *spec, const char *dir, int mflag,
+// char *fstype, char *dataptr, int datalen, char *optptr, int optlen) {
+// return mount(spec, dir, mflag, fstype, dataptr, datalen, optptr, optlen);
+// }
+import "C"
+
+func mount(device, target, mType string, flag uintptr, data string) error {
+ spec := C.CString(device)
+ dir := C.CString(target)
+ fstype := C.CString(mType)
+ _, err := C.Mount(spec, dir, C.int(flag), fstype, nil, 0, nil, 0)
+ C.free(unsafe.Pointer(spec))
+ C.free(unsafe.Pointer(dir))
+ C.free(unsafe.Pointer(fstype))
+ return err
+}
+
+func unmount(target string, flag int) error {
+ err := unix.Unmount(target, flag)
+ return err
+}
diff --git a/unum/vendor/github.com/docker/docker/pkg/mount/mounter_unsupported.go b/unum/vendor/github.com/docker/docker/pkg/mount/mounter_unsupported.go
new file mode 100644
index 0000000..a2a3bb4
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/pkg/mount/mounter_unsupported.go
@@ -0,0 +1,11 @@
+// +build !linux,!freebsd,!solaris freebsd,!cgo solaris,!cgo
+
+package mount
+
+func mount(device, target, mType string, flag uintptr, data string) error {
+ panic("Not implemented")
+}
+
+func unmount(target string, flag int) error {
+ panic("Not implemented")
+}
diff --git a/unum/vendor/github.com/docker/docker/pkg/mount/mountinfo.go b/unum/vendor/github.com/docker/docker/pkg/mount/mountinfo.go
new file mode 100644
index 0000000..ff4cc1d
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/pkg/mount/mountinfo.go
@@ -0,0 +1,54 @@
+package mount
+
+// Info reveals information about a particular mounted filesystem. This
+// struct is populated from the content in the /proc/<pid>/mountinfo file.
+type Info struct {
+ // ID is a unique identifier of the mount (may be reused after umount).
+ ID int
+
+ // Parent indicates the ID of the mount parent (or of self for the top of the
+ // mount tree).
+ Parent int
+
+ // Major indicates one half of the device ID which identifies the device class.
+ Major int
+
+ // Minor indicates one half of the device ID which identifies a specific
+ // instance of device.
+ Minor int
+
+ // Root of the mount within the filesystem.
+ Root string
+
+ // Mountpoint indicates the mount point relative to the process's root.
+ Mountpoint string
+
+ // Opts represents mount-specific options.
+ Opts string
+
+ // Optional represents optional fields.
+ Optional string
+
+ // Fstype indicates the type of filesystem, such as EXT3.
+ Fstype string
+
+ // Source indicates filesystem specific information or "none".
+ Source string
+
+ // VfsOpts represents per super block options.
+ VfsOpts string
+}
+
+type byMountpoint []*Info
+
+func (by byMountpoint) Len() int {
+ return len(by)
+}
+
+func (by byMountpoint) Less(i, j int) bool {
+ return by[i].Mountpoint < by[j].Mountpoint
+}
+
+func (by byMountpoint) Swap(i, j int) {
+ by[i], by[j] = by[j], by[i]
+}
diff --git a/unum/vendor/github.com/docker/docker/pkg/mount/mountinfo_freebsd.go b/unum/vendor/github.com/docker/docker/pkg/mount/mountinfo_freebsd.go
new file mode 100644
index 0000000..4f32edc
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/pkg/mount/mountinfo_freebsd.go
@@ -0,0 +1,41 @@
+package mount
+
+/*
+#include <sys/param.h>
+#include <sys/ucred.h>
+#include <sys/mount.h>
+*/
+import "C"
+
+import (
+ "fmt"
+ "reflect"
+ "unsafe"
+)
+
+// Parse /proc/self/mountinfo because comparing Dev and ino does not work from
+// bind mounts.
+func parseMountTable() ([]*Info, error) {
+ var rawEntries *C.struct_statfs
+
+ count := int(C.getmntinfo(&rawEntries, C.MNT_WAIT))
+ if count == 0 {
+ return nil, fmt.Errorf("Failed to call getmntinfo")
+ }
+
+ var entries []C.struct_statfs
+ header := (*reflect.SliceHeader)(unsafe.Pointer(&entries))
+ header.Cap = count
+ header.Len = count
+ header.Data = uintptr(unsafe.Pointer(rawEntries))
+
+ var out []*Info
+ for _, entry := range entries {
+ var mountinfo Info
+ mountinfo.Mountpoint = C.GoString(&entry.f_mntonname[0])
+ mountinfo.Source = C.GoString(&entry.f_mntfromname[0])
+ mountinfo.Fstype = C.GoString(&entry.f_fstypename[0])
+ out = append(out, &mountinfo)
+ }
+ return out, nil
+}
diff --git a/unum/vendor/github.com/docker/docker/pkg/mount/mountinfo_linux.go b/unum/vendor/github.com/docker/docker/pkg/mount/mountinfo_linux.go
new file mode 100644
index 0000000..be69fee
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/pkg/mount/mountinfo_linux.go
@@ -0,0 +1,95 @@
+// +build linux
+
+package mount
+
+import (
+ "bufio"
+ "fmt"
+ "io"
+ "os"
+ "strings"
+)
+
+const (
+ /* 36 35 98:0 /mnt1 /mnt2 rw,noatime master:1 - ext3 /dev/root rw,errors=continue
+ (1)(2)(3) (4) (5) (6) (7) (8) (9) (10) (11)
+
+ (1) mount ID: unique identifier of the mount (may be reused after umount)
+ (2) parent ID: ID of parent (or of self for the top of the mount tree)
+ (3) major:minor: value of st_dev for files on filesystem
+ (4) root: root of the mount within the filesystem
+ (5) mount point: mount point relative to the process's root
+ (6) mount options: per mount options
+ (7) optional fields: zero or more fields of the form "tag[:value]"
+ (8) separator: marks the end of the optional fields
+ (9) filesystem type: name of filesystem of the form "type[.subtype]"
+ (10) mount source: filesystem specific information or "none"
+ (11) super options: per super block options*/
+ mountinfoFormat = "%d %d %d:%d %s %s %s %s"
+)
+
+// Parse /proc/self/mountinfo because comparing Dev and ino does not work from
+// bind mounts
+func parseMountTable() ([]*Info, error) {
+ f, err := os.Open("/proc/self/mountinfo")
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+
+ return parseInfoFile(f)
+}
+
+func parseInfoFile(r io.Reader) ([]*Info, error) {
+ var (
+ s = bufio.NewScanner(r)
+ out = []*Info{}
+ )
+
+ for s.Scan() {
+ if err := s.Err(); err != nil {
+ return nil, err
+ }
+
+ var (
+ p = &Info{}
+ text = s.Text()
+ optionalFields string
+ )
+
+ if _, err := fmt.Sscanf(text, mountinfoFormat,
+ &p.ID, &p.Parent, &p.Major, &p.Minor,
+ &p.Root, &p.Mountpoint, &p.Opts, &optionalFields); err != nil {
+ return nil, fmt.Errorf("Scanning '%s' failed: %s", text, err)
+ }
+ // Safe as mountinfo encodes mountpoints with spaces as \040.
+ index := strings.Index(text, " - ")
+ postSeparatorFields := strings.Fields(text[index+3:])
+ if len(postSeparatorFields) < 3 {
+ return nil, fmt.Errorf("Error found less than 3 fields post '-' in %q", text)
+ }
+
+ if optionalFields != "-" {
+ p.Optional = optionalFields
+ }
+
+ p.Fstype = postSeparatorFields[0]
+ p.Source = postSeparatorFields[1]
+ p.VfsOpts = strings.Join(postSeparatorFields[2:], " ")
+ out = append(out, p)
+ }
+ return out, nil
+}
+
+// PidMountInfo collects the mounts for a specific process ID. If the process
+// ID is unknown, it is better to use `GetMounts` which will inspect
+// "/proc/self/mountinfo" instead.
+func PidMountInfo(pid int) ([]*Info, error) {
+ f, err := os.Open(fmt.Sprintf("/proc/%d/mountinfo", pid))
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+
+ return parseInfoFile(f)
+}
diff --git a/unum/vendor/github.com/docker/docker/pkg/mount/mountinfo_solaris.go b/unum/vendor/github.com/docker/docker/pkg/mount/mountinfo_solaris.go
new file mode 100644
index 0000000..ad9ab57
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/pkg/mount/mountinfo_solaris.go
@@ -0,0 +1,37 @@
+// +build solaris,cgo
+
+package mount
+
+/*
+#include <stdio.h>
+#include <sys/mnttab.h>
+*/
+import "C"
+
+import (
+ "fmt"
+)
+
+func parseMountTable() ([]*Info, error) {
+ mnttab := C.fopen(C.CString(C.MNTTAB), C.CString("r"))
+ if mnttab == nil {
+ return nil, fmt.Errorf("Failed to open %s", C.MNTTAB)
+ }
+
+ var out []*Info
+ var mp C.struct_mnttab
+
+ ret := C.getmntent(mnttab, &mp)
+ for ret == 0 {
+ var mountinfo Info
+ mountinfo.Mountpoint = C.GoString(mp.mnt_mountp)
+ mountinfo.Source = C.GoString(mp.mnt_special)
+ mountinfo.Fstype = C.GoString(mp.mnt_fstype)
+ mountinfo.Opts = C.GoString(mp.mnt_mntopts)
+ out = append(out, &mountinfo)
+ ret = C.getmntent(mnttab, &mp)
+ }
+
+ C.fclose(mnttab)
+ return out, nil
+}
diff --git a/unum/vendor/github.com/docker/docker/pkg/mount/mountinfo_unsupported.go b/unum/vendor/github.com/docker/docker/pkg/mount/mountinfo_unsupported.go
new file mode 100644
index 0000000..7fbcf19
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/pkg/mount/mountinfo_unsupported.go
@@ -0,0 +1,12 @@
+// +build !windows,!linux,!freebsd,!solaris freebsd,!cgo solaris,!cgo
+
+package mount
+
+import (
+ "fmt"
+ "runtime"
+)
+
+func parseMountTable() ([]*Info, error) {
+ return nil, fmt.Errorf("mount.parseMountTable is not implemented on %s/%s", runtime.GOOS, runtime.GOARCH)
+}
diff --git a/unum/vendor/github.com/docker/docker/pkg/mount/mountinfo_windows.go b/unum/vendor/github.com/docker/docker/pkg/mount/mountinfo_windows.go
new file mode 100644
index 0000000..dab8a37
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/pkg/mount/mountinfo_windows.go
@@ -0,0 +1,6 @@
+package mount
+
+func parseMountTable() ([]*Info, error) {
+ // Do NOT return an error!
+ return nil, nil
+}
diff --git a/unum/vendor/github.com/docker/docker/pkg/mount/sharedsubtree_linux.go b/unum/vendor/github.com/docker/docker/pkg/mount/sharedsubtree_linux.go
new file mode 100644
index 0000000..8ceec84
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/pkg/mount/sharedsubtree_linux.go
@@ -0,0 +1,69 @@
+// +build linux
+
+package mount
+
+// MakeShared ensures a mounted filesystem has the SHARED mount option enabled.
+// See the supported options in flags.go for further reference.
+func MakeShared(mountPoint string) error {
+ return ensureMountedAs(mountPoint, "shared")
+}
+
+// MakeRShared ensures a mounted filesystem has the RSHARED mount option enabled.
+// See the supported options in flags.go for further reference.
+func MakeRShared(mountPoint string) error {
+ return ensureMountedAs(mountPoint, "rshared")
+}
+
+// MakePrivate ensures a mounted filesystem has the PRIVATE mount option enabled.
+// See the supported options in flags.go for further reference.
+func MakePrivate(mountPoint string) error {
+ return ensureMountedAs(mountPoint, "private")
+}
+
+// MakeRPrivate ensures a mounted filesystem has the RPRIVATE mount option
+// enabled. See the supported options in flags.go for further reference.
+func MakeRPrivate(mountPoint string) error {
+ return ensureMountedAs(mountPoint, "rprivate")
+}
+
+// MakeSlave ensures a mounted filesystem has the SLAVE mount option enabled.
+// See the supported options in flags.go for further reference.
+func MakeSlave(mountPoint string) error {
+ return ensureMountedAs(mountPoint, "slave")
+}
+
+// MakeRSlave ensures a mounted filesystem has the RSLAVE mount option enabled.
+// See the supported options in flags.go for further reference.
+func MakeRSlave(mountPoint string) error {
+ return ensureMountedAs(mountPoint, "rslave")
+}
+
+// MakeUnbindable ensures a mounted filesystem has the UNBINDABLE mount option
+// enabled. See the supported options in flags.go for further reference.
+func MakeUnbindable(mountPoint string) error {
+ return ensureMountedAs(mountPoint, "unbindable")
+}
+
+// MakeRUnbindable ensures a mounted filesystem has the RUNBINDABLE mount
+// option enabled. See the supported options in flags.go for further reference.
+func MakeRUnbindable(mountPoint string) error {
+ return ensureMountedAs(mountPoint, "runbindable")
+}
+
+func ensureMountedAs(mountPoint, options string) error {
+ mounted, err := Mounted(mountPoint)
+ if err != nil {
+ return err
+ }
+
+ if !mounted {
+ if err := Mount(mountPoint, mountPoint, "none", "bind,rw"); err != nil {
+ return err
+ }
+ }
+ if _, err = Mounted(mountPoint); err != nil {
+ return err
+ }
+
+ return ForceMount("", mountPoint, "none", options)
+}
diff --git a/unum/vendor/github.com/docker/docker/pkg/mount/sharedsubtree_solaris.go b/unum/vendor/github.com/docker/docker/pkg/mount/sharedsubtree_solaris.go
new file mode 100644
index 0000000..09f6b03
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/pkg/mount/sharedsubtree_solaris.go
@@ -0,0 +1,58 @@
+// +build solaris
+
+package mount
+
+// MakeShared ensures a mounted filesystem has the SHARED mount option enabled.
+// See the supported options in flags.go for further reference.
+func MakeShared(mountPoint string) error {
+ return ensureMountedAs(mountPoint, "shared")
+}
+
+// MakeRShared ensures a mounted filesystem has the RSHARED mount option enabled.
+// See the supported options in flags.go for further reference.
+func MakeRShared(mountPoint string) error {
+ return ensureMountedAs(mountPoint, "rshared")
+}
+
+// MakePrivate ensures a mounted filesystem has the PRIVATE mount option enabled.
+// See the supported options in flags.go for further reference.
+func MakePrivate(mountPoint string) error {
+ return ensureMountedAs(mountPoint, "private")
+}
+
+// MakeRPrivate ensures a mounted filesystem has the RPRIVATE mount option
+// enabled. See the supported options in flags.go for further reference.
+func MakeRPrivate(mountPoint string) error {
+ return ensureMountedAs(mountPoint, "rprivate")
+}
+
+// MakeSlave ensures a mounted filesystem has the SLAVE mount option enabled.
+// See the supported options in flags.go for further reference.
+func MakeSlave(mountPoint string) error {
+ return ensureMountedAs(mountPoint, "slave")
+}
+
+// MakeRSlave ensures a mounted filesystem has the RSLAVE mount option enabled.
+// See the supported options in flags.go for further reference.
+func MakeRSlave(mountPoint string) error {
+ return ensureMountedAs(mountPoint, "rslave")
+}
+
+// MakeUnbindable ensures a mounted filesystem has the UNBINDABLE mount option
+// enabled. See the supported options in flags.go for further reference.
+func MakeUnbindable(mountPoint string) error {
+ return ensureMountedAs(mountPoint, "unbindable")
+}
+
+// MakeRUnbindable ensures a mounted filesystem has the RUNBINDABLE mount
+// option enabled. See the supported options in flags.go for further reference.
+func MakeRUnbindable(mountPoint string) error {
+ return ensureMountedAs(mountPoint, "runbindable")
+}
+
+func ensureMountedAs(mountPoint, options string) error {
+ // TODO: Solaris does not support bind mounts.
+ // Evaluate lofs and also look at the relevant
+ // mount flags to be supported.
+ return nil
+}
diff --git a/unum/vendor/github.com/docker/docker/pkg/system/chtimes.go b/unum/vendor/github.com/docker/docker/pkg/system/chtimes.go
new file mode 100644
index 0000000..056d199
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/pkg/system/chtimes.go
@@ -0,0 +1,35 @@
+package system
+
+import (
+ "os"
+ "time"
+)
+
+// Chtimes changes the access time and modified time of a file at the given path
+func Chtimes(name string, atime time.Time, mtime time.Time) error {
+ unixMinTime := time.Unix(0, 0)
+ unixMaxTime := maxTime
+
+ // If the modified time is prior to the Unix Epoch, or after the
+ // end of Unix Time, os.Chtimes has undefined behavior
+ // default to Unix Epoch in this case, just in case
+
+ if atime.Before(unixMinTime) || atime.After(unixMaxTime) {
+ atime = unixMinTime
+ }
+
+ if mtime.Before(unixMinTime) || mtime.After(unixMaxTime) {
+ mtime = unixMinTime
+ }
+
+ if err := os.Chtimes(name, atime, mtime); err != nil {
+ return err
+ }
+
+ // Take platform specific action for setting create time.
+ if err := setCTime(name, mtime); err != nil {
+ return err
+ }
+
+ return nil
+}
diff --git a/unum/vendor/github.com/docker/docker/pkg/system/chtimes_unix.go b/unum/vendor/github.com/docker/docker/pkg/system/chtimes_unix.go
new file mode 100644
index 0000000..09d58bc
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/pkg/system/chtimes_unix.go
@@ -0,0 +1,14 @@
+// +build !windows
+
+package system
+
+import (
+ "time"
+)
+
+//setCTime will set the create time on a file. On Unix, the create
+//time is updated as a side effect of setting the modified time, so
+//no action is required.
+func setCTime(path string, ctime time.Time) error {
+ return nil
+}
diff --git a/unum/vendor/github.com/docker/docker/pkg/system/chtimes_windows.go b/unum/vendor/github.com/docker/docker/pkg/system/chtimes_windows.go
new file mode 100644
index 0000000..45428c1
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/pkg/system/chtimes_windows.go
@@ -0,0 +1,28 @@
+// +build windows
+
+package system
+
+import (
+ "time"
+
+ "golang.org/x/sys/windows"
+)
+
+//setCTime will set the create time on a file. On Windows, this requires
+//calling SetFileTime and explicitly including the create time.
+func setCTime(path string, ctime time.Time) error {
+ ctimespec := windows.NsecToTimespec(ctime.UnixNano())
+ pathp, e := windows.UTF16PtrFromString(path)
+ if e != nil {
+ return e
+ }
+ h, e := windows.CreateFile(pathp,
+ windows.FILE_WRITE_ATTRIBUTES, windows.FILE_SHARE_WRITE, nil,
+ windows.OPEN_EXISTING, windows.FILE_FLAG_BACKUP_SEMANTICS, 0)
+ if e != nil {
+ return e
+ }
+ defer windows.Close(h)
+ c := windows.NsecToFiletime(windows.TimespecToNsec(ctimespec))
+ return windows.SetFileTime(h, &c, nil, nil)
+}
diff --git a/unum/vendor/github.com/docker/docker/pkg/system/errors.go b/unum/vendor/github.com/docker/docker/pkg/system/errors.go
new file mode 100644
index 0000000..2883189
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/pkg/system/errors.go
@@ -0,0 +1,10 @@
+package system
+
+import (
+ "errors"
+)
+
+var (
+ // ErrNotSupportedPlatform means the platform is not supported.
+ ErrNotSupportedPlatform = errors.New("platform and architecture is not supported")
+)
diff --git a/unum/vendor/github.com/docker/docker/pkg/system/exitcode.go b/unum/vendor/github.com/docker/docker/pkg/system/exitcode.go
new file mode 100644
index 0000000..60f0514
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/pkg/system/exitcode.go
@@ -0,0 +1,33 @@
+package system
+
+import (
+ "fmt"
+ "os/exec"
+ "syscall"
+)
+
+// GetExitCode returns the ExitStatus of the specified error if its type is
+// exec.ExitError, returns 0 and an error otherwise.
+func GetExitCode(err error) (int, error) {
+ exitCode := 0
+ if exiterr, ok := err.(*exec.ExitError); ok {
+ if procExit, ok := exiterr.Sys().(syscall.WaitStatus); ok {
+ return procExit.ExitStatus(), nil
+ }
+ }
+ return exitCode, fmt.Errorf("failed to get exit code")
+}
+
+// ProcessExitCode process the specified error and returns the exit status code
+// if the error was of type exec.ExitError, returns nothing otherwise.
+func ProcessExitCode(err error) (exitCode int) {
+ if err != nil {
+ var exiterr error
+ if exitCode, exiterr = GetExitCode(err); exiterr != nil {
+ // TODO: Fix this so we check the error's text.
+ // we've failed to retrieve exit code, so we set it to 127
+ exitCode = 127
+ }
+ }
+ return
+}
diff --git a/unum/vendor/github.com/docker/docker/pkg/system/filesys.go b/unum/vendor/github.com/docker/docker/pkg/system/filesys.go
new file mode 100644
index 0000000..102565f
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/pkg/system/filesys.go
@@ -0,0 +1,67 @@
+// +build !windows
+
+package system
+
+import (
+ "io/ioutil"
+ "os"
+ "path/filepath"
+)
+
+// MkdirAllWithACL is a wrapper for MkdirAll on unix systems.
+func MkdirAllWithACL(path string, perm os.FileMode, sddl string) error {
+ return MkdirAll(path, perm, sddl)
+}
+
+// MkdirAll creates a directory named path along with any necessary parents,
+// with permission specified by attribute perm for all dir created.
+func MkdirAll(path string, perm os.FileMode, sddl string) error {
+ return os.MkdirAll(path, perm)
+}
+
+// IsAbs is a platform-specific wrapper for filepath.IsAbs.
+func IsAbs(path string) bool {
+ return filepath.IsAbs(path)
+}
+
+// The functions below here are wrappers for the equivalents in the os and ioutils packages.
+// They are passthrough on Unix platforms, and only relevant on Windows.
+
+// CreateSequential creates the named file with mode 0666 (before umask), truncating
+// it if it already exists. If successful, methods on the returned
+// File can be used for I/O; the associated file descriptor has mode
+// O_RDWR.
+// If there is an error, it will be of type *PathError.
+func CreateSequential(name string) (*os.File, error) {
+ return os.Create(name)
+}
+
+// OpenSequential opens the named file for reading. If successful, methods on
+// the returned file can be used for reading; the associated file
+// descriptor has mode O_RDONLY.
+// If there is an error, it will be of type *PathError.
+func OpenSequential(name string) (*os.File, error) {
+ return os.Open(name)
+}
+
+// OpenFileSequential is the generalized open call; most users will use Open
+// or Create instead. It opens the named file with specified flag
+// (O_RDONLY etc.) and perm, (0666 etc.) if applicable. If successful,
+// methods on the returned File can be used for I/O.
+// If there is an error, it will be of type *PathError.
+func OpenFileSequential(name string, flag int, perm os.FileMode) (*os.File, error) {
+ return os.OpenFile(name, flag, perm)
+}
+
+// TempFileSequential creates a new temporary file in the directory dir
+// with a name beginning with prefix, opens the file for reading
+// and writing, and returns the resulting *os.File.
+// If dir is the empty string, TempFile uses the default directory
+// for temporary files (see os.TempDir).
+// Multiple programs calling TempFile simultaneously
+// will not choose the same file. The caller can use f.Name()
+// to find the pathname of the file. It is the caller's responsibility
+// to remove the file when no longer needed.
+func TempFileSequential(dir, prefix string) (f *os.File, err error) {
+ return ioutil.TempFile(dir, prefix)
+}
diff --git a/unum/vendor/github.com/docker/docker/pkg/system/filesys_windows.go b/unum/vendor/github.com/docker/docker/pkg/system/filesys_windows.go
new file mode 100644
index 0000000..a61b53d
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/pkg/system/filesys_windows.go
@@ -0,0 +1,298 @@
+// +build windows
+
+package system
+
+import (
+ "os"
+ "path/filepath"
+ "regexp"
+ "strconv"
+ "strings"
+ "sync"
+ "syscall"
+ "time"
+ "unsafe"
+
+ winio "github.com/Microsoft/go-winio"
+ "golang.org/x/sys/windows"
+)
+
+const (
+ // SddlAdministratorsLocalSystem is local administrators plus NT AUTHORITY\System
+ SddlAdministratorsLocalSystem = "D:P(A;OICI;GA;;;BA)(A;OICI;GA;;;SY)"
+ // SddlNtvmAdministratorsLocalSystem is NT VIRTUAL MACHINE\Virtual Machines plus local administrators plus NT AUTHORITY\System
+ SddlNtvmAdministratorsLocalSystem = "D:P(A;OICI;GA;;;S-1-5-83-0)(A;OICI;GA;;;BA)(A;OICI;GA;;;SY)"
+)
+
+// MkdirAllWithACL is a wrapper for MkdirAll that creates a directory
+// with an appropriate SDDL defined ACL.
+func MkdirAllWithACL(path string, perm os.FileMode, sddl string) error {
+ return mkdirall(path, true, sddl)
+}
+
+// MkdirAll implementation that is volume path aware for Windows.
+func MkdirAll(path string, _ os.FileMode, sddl string) error {
+ return mkdirall(path, false, sddl)
+}
+
+// mkdirall is a custom version of os.MkdirAll modified for use on Windows
+// so that it is both volume path aware, and can create a directory with
+// a DACL.
+func mkdirall(path string, applyACL bool, sddl string) error {
+ if re := regexp.MustCompile(`^\\\\\?\\Volume{[a-z0-9-]+}$`); re.MatchString(path) {
+ return nil
+ }
+
+ // The rest of this method is largely copied from os.MkdirAll and should be kept
+ // as-is to ensure compatibility.
+
+ // Fast path: if we can tell whether path is a directory or file, stop with success or error.
+ dir, err := os.Stat(path)
+ if err == nil {
+ if dir.IsDir() {
+ return nil
+ }
+ return &os.PathError{
+ Op: "mkdir",
+ Path: path,
+ Err: syscall.ENOTDIR,
+ }
+ }
+
+ // Slow path: make sure parent exists and then call Mkdir for path.
+ i := len(path)
+ for i > 0 && os.IsPathSeparator(path[i-1]) { // Skip trailing path separator.
+ i--
+ }
+
+ j := i
+ for j > 0 && !os.IsPathSeparator(path[j-1]) { // Scan backward over element.
+ j--
+ }
+
+ if j > 1 {
+ // Create parent
+ err = mkdirall(path[0:j-1], false, sddl)
+ if err != nil {
+ return err
+ }
+ }
+
+ // Parent now exists; invoke os.Mkdir or mkdirWithACL and use its result.
+ if applyACL {
+ err = mkdirWithACL(path, sddl)
+ } else {
+ err = os.Mkdir(path, 0)
+ }
+
+ if err != nil {
+ // Handle arguments like "foo/." by
+ // double-checking that directory doesn't exist.
+ dir, err1 := os.Lstat(path)
+ if err1 == nil && dir.IsDir() {
+ return nil
+ }
+ return err
+ }
+ return nil
+}
+
+// mkdirWithACL creates a new directory. If there is an error, it will be of
+// type *PathError. .
+//
+// This is a modified and combined version of os.Mkdir and windows.Mkdir
+// in golang to cater for creating a directory am ACL permitting full
+// access, with inheritance, to any subfolder/file for Built-in Administrators
+// and Local System.
+func mkdirWithACL(name string, sddl string) error {
+ sa := windows.SecurityAttributes{Length: 0}
+ sd, err := winio.SddlToSecurityDescriptor(sddl)
+ if err != nil {
+ return &os.PathError{Op: "mkdir", Path: name, Err: err}
+ }
+ sa.Length = uint32(unsafe.Sizeof(sa))
+ sa.InheritHandle = 1
+ sa.SecurityDescriptor = uintptr(unsafe.Pointer(&sd[0]))
+
+ namep, err := windows.UTF16PtrFromString(name)
+ if err != nil {
+ return &os.PathError{Op: "mkdir", Path: name, Err: err}
+ }
+
+ e := windows.CreateDirectory(namep, &sa)
+ if e != nil {
+ return &os.PathError{Op: "mkdir", Path: name, Err: e}
+ }
+ return nil
+}
+
+// IsAbs is a platform-specific wrapper for filepath.IsAbs. On Windows,
+// golang filepath.IsAbs does not consider a path \windows\system32 as absolute
+// as it doesn't start with a drive-letter/colon combination. However, in
+// docker we need to verify things such as WORKDIR /windows/system32 in
+// a Dockerfile (which gets translated to \windows\system32 when being processed
+// by the daemon. This SHOULD be treated as absolute from a docker processing
+// perspective.
+func IsAbs(path string) bool {
+ if !filepath.IsAbs(path) {
+ if !strings.HasPrefix(path, string(os.PathSeparator)) {
+ return false
+ }
+ }
+ return true
+}
+
+// The origin of the functions below here are the golang OS and windows packages,
+// slightly modified to only cope with files, not directories due to the
+// specific use case.
+//
+// The alteration is to allow a file on Windows to be opened with
+// FILE_FLAG_SEQUENTIAL_SCAN (particular for docker load), to avoid eating
+// the standby list, particularly when accessing large files such as layer.tar.
+
+// CreateSequential creates the named file with mode 0666 (before umask), truncating
+// it if it already exists. If successful, methods on the returned
+// File can be used for I/O; the associated file descriptor has mode
+// O_RDWR.
+// If there is an error, it will be of type *PathError.
+func CreateSequential(name string) (*os.File, error) {
+ return OpenFileSequential(name, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0)
+}
+
+// OpenSequential opens the named file for reading. If successful, methods on
+// the returned file can be used for reading; the associated file
+// descriptor has mode O_RDONLY.
+// If there is an error, it will be of type *PathError.
+func OpenSequential(name string) (*os.File, error) {
+ return OpenFileSequential(name, os.O_RDONLY, 0)
+}
+
+// OpenFileSequential is the generalized open call; most users will use Open
+// or Create instead.
+// If there is an error, it will be of type *PathError.
+func OpenFileSequential(name string, flag int, _ os.FileMode) (*os.File, error) {
+ if name == "" {
+ return nil, &os.PathError{Op: "open", Path: name, Err: syscall.ENOENT}
+ }
+ r, errf := windowsOpenFileSequential(name, flag, 0)
+ if errf == nil {
+ return r, nil
+ }
+ return nil, &os.PathError{Op: "open", Path: name, Err: errf}
+}
+
+func windowsOpenFileSequential(name string, flag int, _ os.FileMode) (file *os.File, err error) {
+ r, e := windowsOpenSequential(name, flag|windows.O_CLOEXEC, 0)
+ if e != nil {
+ return nil, e
+ }
+ return os.NewFile(uintptr(r), name), nil
+}
+
+func makeInheritSa() *windows.SecurityAttributes {
+ var sa windows.SecurityAttributes
+ sa.Length = uint32(unsafe.Sizeof(sa))
+ sa.InheritHandle = 1
+ return &sa
+}
+
+func windowsOpenSequential(path string, mode int, _ uint32) (fd windows.Handle, err error) {
+ if len(path) == 0 {
+ return windows.InvalidHandle, windows.ERROR_FILE_NOT_FOUND
+ }
+ pathp, err := windows.UTF16PtrFromString(path)
+ if err != nil {
+ return windows.InvalidHandle, err
+ }
+ var access uint32
+ switch mode & (windows.O_RDONLY | windows.O_WRONLY | windows.O_RDWR) {
+ case windows.O_RDONLY:
+ access = windows.GENERIC_READ
+ case windows.O_WRONLY:
+ access = windows.GENERIC_WRITE
+ case windows.O_RDWR:
+ access = windows.GENERIC_READ | windows.GENERIC_WRITE
+ }
+ if mode&windows.O_CREAT != 0 {
+ access |= windows.GENERIC_WRITE
+ }
+ if mode&windows.O_APPEND != 0 {
+ access &^= windows.GENERIC_WRITE
+ access |= windows.FILE_APPEND_DATA
+ }
+ sharemode := uint32(windows.FILE_SHARE_READ | windows.FILE_SHARE_WRITE)
+ var sa *windows.SecurityAttributes
+ if mode&windows.O_CLOEXEC == 0 {
+ sa = makeInheritSa()
+ }
+ var createmode uint32
+ switch {
+ case mode&(windows.O_CREAT|windows.O_EXCL) == (windows.O_CREAT | windows.O_EXCL):
+ createmode = windows.CREATE_NEW
+ case mode&(windows.O_CREAT|windows.O_TRUNC) == (windows.O_CREAT | windows.O_TRUNC):
+ createmode = windows.CREATE_ALWAYS
+ case mode&windows.O_CREAT == windows.O_CREAT:
+ createmode = windows.OPEN_ALWAYS
+ case mode&windows.O_TRUNC == windows.O_TRUNC:
+ createmode = windows.TRUNCATE_EXISTING
+ default:
+ createmode = windows.OPEN_EXISTING
+ }
+ // Use FILE_FLAG_SEQUENTIAL_SCAN rather than FILE_ATTRIBUTE_NORMAL as implemented in golang.
+ //https://msdn.microsoft.com/en-us/library/windows/desktop/aa363858(v=vs.85).aspx
+ const fileFlagSequentialScan = 0x08000000 // FILE_FLAG_SEQUENTIAL_SCAN
+ h, e := windows.CreateFile(pathp, access, sharemode, sa, createmode, fileFlagSequentialScan, 0)
+ return h, e
+}
+
+// Helpers for TempFileSequential
+var rand uint32
+var randmu sync.Mutex
+
+func reseed() uint32 {
+ return uint32(time.Now().UnixNano() + int64(os.Getpid()))
+}
+func nextSuffix() string {
+ randmu.Lock()
+ r := rand
+ if r == 0 {
+ r = reseed()
+ }
+ r = r*1664525 + 1013904223 // constants from Numerical Recipes
+ rand = r
+ randmu.Unlock()
+ return strconv.Itoa(int(1e9 + r%1e9))[1:]
+}
+
+// TempFileSequential is a copy of ioutil.TempFile, modified to use sequential
+// file access. Below is the original comment from golang:
+// TempFile creates a new temporary file in the directory dir
+// with a name beginning with prefix, opens the file for reading
+// and writing, and returns the resulting *os.File.
+// If dir is the empty string, TempFile uses the default directory
+// for temporary files (see os.TempDir).
+// Multiple programs calling TempFile simultaneously
+// will not choose the same file. The caller can use f.Name()
+// to find the pathname of the file. It is the caller's responsibility
+// to remove the file when no longer needed.
+func TempFileSequential(dir, prefix string) (f *os.File, err error) {
+ if dir == "" {
+ dir = os.TempDir()
+ }
+
+ nconflict := 0
+ for i := 0; i < 10000; i++ {
+ name := filepath.Join(dir, prefix+nextSuffix())
+ f, err = OpenFileSequential(name, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600)
+ if os.IsExist(err) {
+ if nconflict++; nconflict > 10 {
+ randmu.Lock()
+ rand = reseed()
+ randmu.Unlock()
+ }
+ continue
+ }
+ break
+ }
+ return
+}
diff --git a/unum/vendor/github.com/docker/docker/pkg/system/init.go b/unum/vendor/github.com/docker/docker/pkg/system/init.go
new file mode 100644
index 0000000..1793508
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/pkg/system/init.go
@@ -0,0 +1,22 @@
+package system
+
+import (
+ "syscall"
+ "time"
+ "unsafe"
+)
+
+// Used by chtimes
+var maxTime time.Time
+
+func init() {
+ // chtimes initialization
+ if unsafe.Sizeof(syscall.Timespec{}.Nsec) == 8 {
+ // This is a 64 bit timespec
+ // os.Chtimes limits time to the following
+ maxTime = time.Unix(0, 1<<63-1)
+ } else {
+ // This is a 32 bit timespec
+ maxTime = time.Unix(1<<31-1, 0)
+ }
+}
diff --git a/unum/vendor/github.com/docker/docker/pkg/system/init_windows.go b/unum/vendor/github.com/docker/docker/pkg/system/init_windows.go
new file mode 100644
index 0000000..019c664
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/pkg/system/init_windows.go
@@ -0,0 +1,17 @@
+package system
+
+import "os"
+
+// LCOWSupported determines if Linux Containers on Windows are supported.
+// Note: This feature is in development (06/17) and enabled through an
+// environment variable. At a future time, it will be enabled based
+// on build number. @jhowardmsft
+var lcowSupported = false
+
+func init() {
+ // LCOW initialization
+ if os.Getenv("LCOW_SUPPORTED") != "" {
+ lcowSupported = true
+ }
+
+}
diff --git a/unum/vendor/github.com/docker/docker/pkg/system/lcow_unix.go b/unum/vendor/github.com/docker/docker/pkg/system/lcow_unix.go
new file mode 100644
index 0000000..cff33bb
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/pkg/system/lcow_unix.go
@@ -0,0 +1,8 @@
+// +build !windows
+
+package system
+
+// LCOWSupported returns true if Linux containers on Windows are supported.
+func LCOWSupported() bool {
+ return false
+}
diff --git a/unum/vendor/github.com/docker/docker/pkg/system/lcow_windows.go b/unum/vendor/github.com/docker/docker/pkg/system/lcow_windows.go
new file mode 100644
index 0000000..e54d01e
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/pkg/system/lcow_windows.go
@@ -0,0 +1,6 @@
+package system
+
+// LCOWSupported returns true if Linux containers on Windows are supported.
+func LCOWSupported() bool {
+ return lcowSupported
+}
diff --git a/unum/vendor/github.com/docker/docker/pkg/system/lstat_unix.go b/unum/vendor/github.com/docker/docker/pkg/system/lstat_unix.go
new file mode 100644
index 0000000..bd23c4d
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/pkg/system/lstat_unix.go
@@ -0,0 +1,19 @@
+// +build !windows
+
+package system
+
+import (
+ "syscall"
+)
+
+// Lstat takes a path to a file and returns
+// a system.StatT type pertaining to that file.
+//
+// Throws an error if the file does not exist
+func Lstat(path string) (*StatT, error) {
+ s := &syscall.Stat_t{}
+ if err := syscall.Lstat(path, s); err != nil {
+ return nil, err
+ }
+ return fromStatT(s)
+}
diff --git a/unum/vendor/github.com/docker/docker/pkg/system/lstat_windows.go b/unum/vendor/github.com/docker/docker/pkg/system/lstat_windows.go
new file mode 100644
index 0000000..e51df0d
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/pkg/system/lstat_windows.go
@@ -0,0 +1,14 @@
+package system
+
+import "os"
+
+// Lstat calls os.Lstat to get a fileinfo interface back.
+// This is then copied into our own locally defined structure.
+func Lstat(path string) (*StatT, error) {
+ fi, err := os.Lstat(path)
+ if err != nil {
+ return nil, err
+ }
+
+ return fromStatT(&fi)
+}
diff --git a/unum/vendor/github.com/docker/docker/pkg/system/meminfo.go b/unum/vendor/github.com/docker/docker/pkg/system/meminfo.go
new file mode 100644
index 0000000..3b6e947
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/pkg/system/meminfo.go
@@ -0,0 +1,17 @@
+package system
+
+// MemInfo contains memory statistics of the host system.
+type MemInfo struct {
+ // Total usable RAM (i.e. physical RAM minus a few reserved bits and the
+ // kernel binary code).
+ MemTotal int64
+
+ // Amount of free memory.
+ MemFree int64
+
+ // Total amount of swap space available.
+ SwapTotal int64
+
+ // Amount of swap space that is currently unused.
+ SwapFree int64
+}
diff --git a/unum/vendor/github.com/docker/docker/pkg/system/meminfo_linux.go b/unum/vendor/github.com/docker/docker/pkg/system/meminfo_linux.go
new file mode 100644
index 0000000..385f1d5
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/pkg/system/meminfo_linux.go
@@ -0,0 +1,65 @@
+package system
+
+import (
+ "bufio"
+ "io"
+ "os"
+ "strconv"
+ "strings"
+
+ "github.com/docker/go-units"
+)
+
+// ReadMemInfo retrieves memory statistics of the host system and returns a
+// MemInfo type.
+func ReadMemInfo() (*MemInfo, error) {
+ file, err := os.Open("/proc/meminfo")
+ if err != nil {
+ return nil, err
+ }
+ defer file.Close()
+ return parseMemInfo(file)
+}
+
+// parseMemInfo parses the /proc/meminfo file into
+// a MemInfo object given an io.Reader to the file.
+// Throws error if there are problems reading from the file
+func parseMemInfo(reader io.Reader) (*MemInfo, error) {
+ meminfo := &MemInfo{}
+ scanner := bufio.NewScanner(reader)
+ for scanner.Scan() {
+ // Expected format: ["MemTotal:", "1234", "kB"]
+ parts := strings.Fields(scanner.Text())
+
+ // Sanity checks: Skip malformed entries.
+ if len(parts) < 3 || parts[2] != "kB" {
+ continue
+ }
+
+ // Convert to bytes.
+ size, err := strconv.Atoi(parts[1])
+ if err != nil {
+ continue
+ }
+ bytes := int64(size) * units.KiB
+
+ switch parts[0] {
+ case "MemTotal:":
+ meminfo.MemTotal = bytes
+ case "MemFree:":
+ meminfo.MemFree = bytes
+ case "SwapTotal:":
+ meminfo.SwapTotal = bytes
+ case "SwapFree:":
+ meminfo.SwapFree = bytes
+ }
+
+ }
+
+ // Handle errors that may have occurred during the reading of the file.
+ if err := scanner.Err(); err != nil {
+ return nil, err
+ }
+
+ return meminfo, nil
+}
diff --git a/unum/vendor/github.com/docker/docker/pkg/system/meminfo_solaris.go b/unum/vendor/github.com/docker/docker/pkg/system/meminfo_solaris.go
new file mode 100644
index 0000000..925776e
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/pkg/system/meminfo_solaris.go
@@ -0,0 +1,129 @@
+// +build solaris,cgo
+
+package system
+
+import (
+ "fmt"
+ "unsafe"
+)
+
+// #cgo CFLAGS: -std=c99
+// #cgo LDFLAGS: -lkstat
+// #include <unistd.h>
+// #include <stdlib.h>
+// #include <stdio.h>
+// #include <kstat.h>
+// #include <sys/swap.h>
+// #include <sys/param.h>
+// struct swaptable *allocSwaptable(int num) {
+// struct swaptable *st;
+// struct swapent *swapent;
+// st = (struct swaptable *)malloc(num * sizeof(swapent_t) + sizeof (int));
+// swapent = st->swt_ent;
+// for (int i = 0; i < num; i++,swapent++) {
+// swapent->ste_path = (char *)malloc(MAXPATHLEN * sizeof (char));
+// }
+// st->swt_n = num;
+// return st;
+//}
+// void freeSwaptable (struct swaptable *st) {
+// struct swapent *swapent = st->swt_ent;
+// for (int i = 0; i < st->swt_n; i++,swapent++) {
+// free(swapent->ste_path);
+// }
+// free(st);
+// }
+// swapent_t getSwapEnt(swapent_t *ent, int i) {
+// return ent[i];
+// }
+// int64_t getPpKernel() {
+// int64_t pp_kernel = 0;
+// kstat_ctl_t *ksc;
+// kstat_t *ks;
+// kstat_named_t *knp;
+// kid_t kid;
+//
+// if ((ksc = kstat_open()) == NULL) {
+// return -1;
+// }
+// if ((ks = kstat_lookup(ksc, "unix", 0, "system_pages")) == NULL) {
+// return -1;
+// }
+// if (((kid = kstat_read(ksc, ks, NULL)) == -1) ||
+// ((knp = kstat_data_lookup(ks, "pp_kernel")) == NULL)) {
+// return -1;
+// }
+// switch (knp->data_type) {
+// case KSTAT_DATA_UINT64:
+// pp_kernel = knp->value.ui64;
+// break;
+// case KSTAT_DATA_UINT32:
+// pp_kernel = knp->value.ui32;
+// break;
+// }
+// pp_kernel *= sysconf(_SC_PAGESIZE);
+// return (pp_kernel > 0 ? pp_kernel : -1);
+// }
+import "C"
+
+// Get the system memory info using sysconf same as prtconf
+func getTotalMem() int64 {
+ pagesize := C.sysconf(C._SC_PAGESIZE)
+ npages := C.sysconf(C._SC_PHYS_PAGES)
+ return int64(pagesize * npages)
+}
+
+func getFreeMem() int64 {
+ pagesize := C.sysconf(C._SC_PAGESIZE)
+ npages := C.sysconf(C._SC_AVPHYS_PAGES)
+ return int64(pagesize * npages)
+}
+
+// ReadMemInfo retrieves memory statistics of the host system and returns a
+// MemInfo type.
+func ReadMemInfo() (*MemInfo, error) {
+
+ ppKernel := C.getPpKernel()
+ MemTotal := getTotalMem()
+ MemFree := getFreeMem()
+ SwapTotal, SwapFree, err := getSysSwap()
+
+ if ppKernel < 0 || MemTotal < 0 || MemFree < 0 || SwapTotal < 0 ||
+ SwapFree < 0 {
+ return nil, fmt.Errorf("error getting system memory info %v\n", err)
+ }
+
+ meminfo := &MemInfo{}
+ // Total memory is total physical memory less than memory locked by kernel
+ meminfo.MemTotal = MemTotal - int64(ppKernel)
+ meminfo.MemFree = MemFree
+ meminfo.SwapTotal = SwapTotal
+ meminfo.SwapFree = SwapFree
+
+ return meminfo, nil
+}
+
+func getSysSwap() (int64, int64, error) {
+ var tSwap int64
+ var fSwap int64
+ var diskblksPerPage int64
+ num, err := C.swapctl(C.SC_GETNSWP, nil)
+ if err != nil {
+ return -1, -1, err
+ }
+ st := C.allocSwaptable(num)
+ _, err = C.swapctl(C.SC_LIST, unsafe.Pointer(st))
+ if err != nil {
+ C.freeSwaptable(st)
+ return -1, -1, err
+ }
+
+ diskblksPerPage = int64(C.sysconf(C._SC_PAGESIZE) >> C.DEV_BSHIFT)
+ for i := 0; i < int(num); i++ {
+ swapent := C.getSwapEnt(&st.swt_ent[0], C.int(i))
+ tSwap += int64(swapent.ste_pages) * diskblksPerPage
+ fSwap += int64(swapent.ste_free) * diskblksPerPage
+ }
+ C.freeSwaptable(st)
+ return tSwap, fSwap, nil
+}
diff --git a/unum/vendor/github.com/docker/docker/pkg/system/meminfo_unsupported.go b/unum/vendor/github.com/docker/docker/pkg/system/meminfo_unsupported.go
new file mode 100644
index 0000000..3ce019d
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/pkg/system/meminfo_unsupported.go
@@ -0,0 +1,8 @@
+// +build !linux,!windows,!solaris
+
+package system
+
+// ReadMemInfo is not supported on platforms other than linux and windows.
+func ReadMemInfo() (*MemInfo, error) {
+ return nil, ErrNotSupportedPlatform
+}
diff --git a/unum/vendor/github.com/docker/docker/pkg/system/meminfo_windows.go b/unum/vendor/github.com/docker/docker/pkg/system/meminfo_windows.go
new file mode 100644
index 0000000..883944a
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/pkg/system/meminfo_windows.go
@@ -0,0 +1,45 @@
+package system
+
+import (
+ "unsafe"
+
+ "golang.org/x/sys/windows"
+)
+
+var (
+ modkernel32 = windows.NewLazySystemDLL("kernel32.dll")
+
+ procGlobalMemoryStatusEx = modkernel32.NewProc("GlobalMemoryStatusEx")
+)
+
+// https://msdn.microsoft.com/en-us/library/windows/desktop/aa366589(v=vs.85).aspx
+// https://msdn.microsoft.com/en-us/library/windows/desktop/aa366770(v=vs.85).aspx
+type memorystatusex struct {
+ dwLength uint32
+ dwMemoryLoad uint32
+ ullTotalPhys uint64
+ ullAvailPhys uint64
+ ullTotalPageFile uint64
+ ullAvailPageFile uint64
+ ullTotalVirtual uint64
+ ullAvailVirtual uint64
+ ullAvailExtendedVirtual uint64
+}
+
+// ReadMemInfo retrieves memory statistics of the host system and returns a
+// MemInfo type.
+func ReadMemInfo() (*MemInfo, error) {
+ msi := &memorystatusex{
+ dwLength: 64,
+ }
+ r1, _, _ := procGlobalMemoryStatusEx.Call(uintptr(unsafe.Pointer(msi)))
+ if r1 == 0 {
+ return &MemInfo{}, nil
+ }
+ return &MemInfo{
+ MemTotal: int64(msi.ullTotalPhys),
+ MemFree: int64(msi.ullAvailPhys),
+ SwapTotal: int64(msi.ullTotalPageFile),
+ SwapFree: int64(msi.ullAvailPageFile),
+ }, nil
+}
diff --git a/unum/vendor/github.com/docker/docker/pkg/system/mknod.go b/unum/vendor/github.com/docker/docker/pkg/system/mknod.go
new file mode 100644
index 0000000..af79a65
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/pkg/system/mknod.go
@@ -0,0 +1,22 @@
+// +build !windows
+
+package system
+
+import (
+ "golang.org/x/sys/unix"
+)
+
+// Mknod creates a filesystem node (file, device special file or named pipe) named path
+// with attributes specified by mode and dev.
+func Mknod(path string, mode uint32, dev int) error {
+ return unix.Mknod(path, mode, dev)
+}
+
+// Mkdev is used to build the value of linux devices (in /dev/) which specifies major
+// and minor number of the newly created device special file.
+// Linux device nodes are a bit weird due to backwards compat with 16 bit device nodes.
+// They are, from low to high: the lower 8 bits of the minor, then 12 bits of the major,
+// then the top 12 bits of the minor.
+func Mkdev(major int64, minor int64) uint32 {
+ return uint32(((minor & 0xfff00) << 12) | ((major & 0xfff) << 8) | (minor & 0xff))
+}
diff --git a/unum/vendor/github.com/docker/docker/pkg/system/mknod_windows.go b/unum/vendor/github.com/docker/docker/pkg/system/mknod_windows.go
new file mode 100644
index 0000000..2e863c0
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/pkg/system/mknod_windows.go
@@ -0,0 +1,13 @@
+// +build windows
+
+package system
+
+// Mknod is not implemented on Windows.
+func Mknod(path string, mode uint32, dev int) error {
+ return ErrNotSupportedPlatform
+}
+
+// Mkdev is not implemented on Windows.
+func Mkdev(major int64, minor int64) uint32 {
+ panic("Mkdev not implemented on Windows.")
+}
diff --git a/unum/vendor/github.com/docker/docker/pkg/system/path.go b/unum/vendor/github.com/docker/docker/pkg/system/path.go
new file mode 100644
index 0000000..f634a6b
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/pkg/system/path.go
@@ -0,0 +1,21 @@
+package system
+
+import "runtime"
+
+const defaultUnixPathEnv = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
+
+// DefaultPathEnv is unix style list of directories to search for
+// executables. Each directory is separated from the next by a colon
+// ':' character .
+func DefaultPathEnv(platform string) string {
+ if runtime.GOOS == "windows" {
+ if platform != runtime.GOOS && LCOWSupported() {
+ return defaultUnixPathEnv
+ }
+ // Deliberately empty on Windows containers on Windows as the default path will be set by
+ // the container. Docker has no context of what the default path should be.
+ return ""
+ }
+ return defaultUnixPathEnv
+
+}
diff --git a/unum/vendor/github.com/docker/docker/pkg/system/path_unix.go b/unum/vendor/github.com/docker/docker/pkg/system/path_unix.go
new file mode 100644
index 0000000..f3762e6
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/pkg/system/path_unix.go
@@ -0,0 +1,9 @@
+// +build !windows
+
+package system
+
+// CheckSystemDriveAndRemoveDriveLetter verifies that a path, if it includes a drive letter,
+// is the system drive. This is a no-op on Linux.
+func CheckSystemDriveAndRemoveDriveLetter(path string) (string, error) {
+ return path, nil
+}
diff --git a/unum/vendor/github.com/docker/docker/pkg/system/path_windows.go b/unum/vendor/github.com/docker/docker/pkg/system/path_windows.go
new file mode 100644
index 0000000..aab8915
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/pkg/system/path_windows.go
@@ -0,0 +1,33 @@
+// +build windows
+
+package system
+
+import (
+ "fmt"
+ "path/filepath"
+ "strings"
+)
+
+// CheckSystemDriveAndRemoveDriveLetter verifies and manipulates a Windows path.
+// This is used, for example, when validating a user provided path in docker cp.
+// If a drive letter is supplied, it must be the system drive. The drive letter
+// is always removed. Also, it translates it to OS semantics (IOW / to \). We
+// need the path in this syntax so that it can ultimately be concatenated with
+// a Windows long-path which doesn't support drive-letters. Examples:
+// C: --> Fail
+// C:\ --> \
+// a --> a
+// /a --> \a
+// d:\ --> Fail
+func CheckSystemDriveAndRemoveDriveLetter(path string) (string, error) {
+ if len(path) == 2 && string(path[1]) == ":" {
+ return "", fmt.Errorf("No relative path specified in %q", path)
+ }
+ if !filepath.IsAbs(path) || len(path) < 2 {
+ return filepath.FromSlash(path), nil
+ }
+ if string(path[1]) == ":" && !strings.EqualFold(string(path[0]), "c") {
+ return "", fmt.Errorf("The specified path is not on the system drive (C:)")
+ }
+ return filepath.FromSlash(path[2:]), nil
+}
diff --git a/unum/vendor/github.com/docker/docker/pkg/system/process_unix.go b/unum/vendor/github.com/docker/docker/pkg/system/process_unix.go
new file mode 100644
index 0000000..26c8b42
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/pkg/system/process_unix.go
@@ -0,0 +1,24 @@
+// +build linux freebsd solaris darwin
+
+package system
+
+import (
+ "syscall"
+
+ "golang.org/x/sys/unix"
+)
+
+// IsProcessAlive returns true if process with a given pid is running.
+func IsProcessAlive(pid int) bool {
+ err := unix.Kill(pid, syscall.Signal(0))
+ if err == nil || err == unix.EPERM {
+ return true
+ }
+
+ return false
+}
+
+// KillProcess force-stops a process.
+func KillProcess(pid int) {
+ unix.Kill(pid, unix.SIGKILL)
+}
diff --git a/unum/vendor/github.com/docker/docker/pkg/system/rm.go b/unum/vendor/github.com/docker/docker/pkg/system/rm.go
new file mode 100644
index 0000000..101b569
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/pkg/system/rm.go
@@ -0,0 +1,80 @@
+package system
+
+import (
+ "os"
+ "syscall"
+ "time"
+
+ "github.com/docker/docker/pkg/mount"
+ "github.com/pkg/errors"
+)
+
+// EnsureRemoveAll wraps `os.RemoveAll` to check for specific errors that can
+// often be remedied.
+// Only use `EnsureRemoveAll` if you really want to make every effort to remove
+// a directory.
+//
+// Because of the way `os.Remove` (and by extension `os.RemoveAll`) works, there
+// can be a race between reading directory entries and then actually attempting
+// to remove everything in the directory.
+// These types of errors do not need to be returned since it's ok for the dir to
+// be gone we can just retry the remove operation.
+//
+// This should not return a `os.ErrNotExist` kind of error under any circumstances
+func EnsureRemoveAll(dir string) error {
+ notExistErr := make(map[string]bool)
+
+ // track retries
+ exitOnErr := make(map[string]int)
+ maxRetry := 5
+
+ // Attempt to unmount anything beneath this dir first
+ mount.RecursiveUnmount(dir)
+
+ for {
+ err := os.RemoveAll(dir)
+ if err == nil {
+ return err
+ }
+
+ pe, ok := err.(*os.PathError)
+ if !ok {
+ return err
+ }
+
+ if os.IsNotExist(err) {
+ if notExistErr[pe.Path] {
+ return err
+ }
+ notExistErr[pe.Path] = true
+
+ // There is a race where some subdir can be removed but after the parent
+ // dir entries have been read.
+ // So the path could be from `os.Remove(subdir)`
+ // If the reported non-existent path is not the passed in `dir` we
+ // should just retry, but otherwise return with no error.
+ if pe.Path == dir {
+ return nil
+ }
+ continue
+ }
+
+ if pe.Err != syscall.EBUSY {
+ return err
+ }
+
+ if mounted, _ := mount.Mounted(pe.Path); mounted {
+ if e := mount.Unmount(pe.Path); e != nil {
+ if mounted, _ := mount.Mounted(pe.Path); mounted {
+ return errors.Wrapf(e, "error while removing %s", dir)
+ }
+ }
+ }
+
+ if exitOnErr[pe.Path] == maxRetry {
+ return err
+ }
+ exitOnErr[pe.Path]++
+ time.Sleep(100 * time.Millisecond)
+ }
+}
diff --git a/unum/vendor/github.com/docker/docker/pkg/system/stat_darwin.go b/unum/vendor/github.com/docker/docker/pkg/system/stat_darwin.go
new file mode 100644
index 0000000..715f05b
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/pkg/system/stat_darwin.go
@@ -0,0 +1,13 @@
+package system
+
+import "syscall"
+
+// fromStatT converts a syscall.Stat_t type to a system.Stat_t type
+func fromStatT(s *syscall.Stat_t) (*StatT, error) {
+ return &StatT{size: s.Size,
+ mode: uint32(s.Mode),
+ uid: s.Uid,
+ gid: s.Gid,
+ rdev: uint64(s.Rdev),
+ mtim: s.Mtimespec}, nil
+}
diff --git a/unum/vendor/github.com/docker/docker/pkg/system/stat_freebsd.go b/unum/vendor/github.com/docker/docker/pkg/system/stat_freebsd.go
new file mode 100644
index 0000000..715f05b
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/pkg/system/stat_freebsd.go
@@ -0,0 +1,13 @@
+package system
+
+import "syscall"
+
+// fromStatT converts a syscall.Stat_t type to a system.Stat_t type
+func fromStatT(s *syscall.Stat_t) (*StatT, error) {
+ return &StatT{size: s.Size,
+ mode: uint32(s.Mode),
+ uid: s.Uid,
+ gid: s.Gid,
+ rdev: uint64(s.Rdev),
+ mtim: s.Mtimespec}, nil
+}
diff --git a/unum/vendor/github.com/docker/docker/pkg/system/stat_linux.go b/unum/vendor/github.com/docker/docker/pkg/system/stat_linux.go
new file mode 100644
index 0000000..1939f95
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/pkg/system/stat_linux.go
@@ -0,0 +1,19 @@
+package system
+
+import "syscall"
+
+// fromStatT converts a syscall.Stat_t type to a system.Stat_t type
+func fromStatT(s *syscall.Stat_t) (*StatT, error) {
+ return &StatT{size: s.Size,
+ mode: s.Mode,
+ uid: s.Uid,
+ gid: s.Gid,
+ rdev: s.Rdev,
+ mtim: s.Mtim}, nil
+}
+
+// FromStatT converts a syscall.Stat_t type to a system.Stat_t type
+// This is exposed on Linux as pkg/archive/changes uses it.
+func FromStatT(s *syscall.Stat_t) (*StatT, error) {
+ return fromStatT(s)
+}
diff --git a/unum/vendor/github.com/docker/docker/pkg/system/stat_openbsd.go b/unum/vendor/github.com/docker/docker/pkg/system/stat_openbsd.go
new file mode 100644
index 0000000..b607dea
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/pkg/system/stat_openbsd.go
@@ -0,0 +1,13 @@
+package system
+
+import "syscall"
+
+// fromStatT converts a syscall.Stat_t type to a system.Stat_t type
+func fromStatT(s *syscall.Stat_t) (*StatT, error) {
+ return &StatT{size: s.Size,
+ mode: uint32(s.Mode),
+ uid: s.Uid,
+ gid: s.Gid,
+ rdev: uint64(s.Rdev),
+ mtim: s.Mtim}, nil
+}
diff --git a/unum/vendor/github.com/docker/docker/pkg/system/stat_solaris.go b/unum/vendor/github.com/docker/docker/pkg/system/stat_solaris.go
new file mode 100644
index 0000000..b607dea
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/pkg/system/stat_solaris.go
@@ -0,0 +1,13 @@
+package system
+
+import "syscall"
+
+// fromStatT converts a syscall.Stat_t type to a system.Stat_t type
+func fromStatT(s *syscall.Stat_t) (*StatT, error) {
+ return &StatT{size: s.Size,
+ mode: uint32(s.Mode),
+ uid: s.Uid,
+ gid: s.Gid,
+ rdev: uint64(s.Rdev),
+ mtim: s.Mtim}, nil
+}
diff --git a/unum/vendor/github.com/docker/docker/pkg/system/stat_unix.go b/unum/vendor/github.com/docker/docker/pkg/system/stat_unix.go
new file mode 100644
index 0000000..91c7d12
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/pkg/system/stat_unix.go
@@ -0,0 +1,60 @@
+// +build !windows
+
+package system
+
+import (
+ "syscall"
+)
+
+// StatT type contains status of a file. It contains metadata
+// like permission, owner, group, size, etc about a file.
+type StatT struct {
+ mode uint32
+ uid uint32
+ gid uint32
+ rdev uint64
+ size int64
+ mtim syscall.Timespec
+}
+
+// Mode returns file's permission mode.
+func (s StatT) Mode() uint32 {
+ return s.mode
+}
+
+// UID returns file's user id of owner.
+func (s StatT) UID() uint32 {
+ return s.uid
+}
+
+// GID returns file's group id of owner.
+func (s StatT) GID() uint32 {
+ return s.gid
+}
+
+// Rdev returns file's device ID (if it's special file).
+func (s StatT) Rdev() uint64 {
+ return s.rdev
+}
+
+// Size returns file's size.
+func (s StatT) Size() int64 {
+ return s.size
+}
+
+// Mtim returns file's last modification time.
+func (s StatT) Mtim() syscall.Timespec {
+ return s.mtim
+}
+
+// Stat takes a path to a file and returns
+// a system.StatT type pertaining to that file.
+//
+// Throws an error if the file does not exist
+func Stat(path string) (*StatT, error) {
+ s := &syscall.Stat_t{}
+ if err := syscall.Stat(path, s); err != nil {
+ return nil, err
+ }
+ return fromStatT(s)
+}
diff --git a/unum/vendor/github.com/docker/docker/pkg/system/stat_windows.go b/unum/vendor/github.com/docker/docker/pkg/system/stat_windows.go
new file mode 100644
index 0000000..6c63972
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/pkg/system/stat_windows.go
@@ -0,0 +1,49 @@
+package system
+
+import (
+ "os"
+ "time"
+)
+
+// StatT type contains status of a file. It contains metadata
+// like permission, size, etc about a file.
+type StatT struct {
+ mode os.FileMode
+ size int64
+ mtim time.Time
+}
+
+// Size returns file's size.
+func (s StatT) Size() int64 {
+ return s.size
+}
+
+// Mode returns file's permission mode.
+func (s StatT) Mode() os.FileMode {
+ return os.FileMode(s.mode)
+}
+
+// Mtim returns file's last modification time.
+func (s StatT) Mtim() time.Time {
+ return time.Time(s.mtim)
+}
+
+// Stat takes a path to a file and returns
+// a system.StatT type pertaining to that file.
+//
+// Throws an error if the file does not exist
+func Stat(path string) (*StatT, error) {
+ fi, err := os.Stat(path)
+ if err != nil {
+ return nil, err
+ }
+ return fromStatT(&fi)
+}
+
+// fromStatT converts a os.FileInfo type to a system.StatT type
+func fromStatT(fi *os.FileInfo) (*StatT, error) {
+ return &StatT{
+ size: (*fi).Size(),
+ mode: (*fi).Mode(),
+ mtim: (*fi).ModTime()}, nil
+}
diff --git a/unum/vendor/github.com/docker/docker/pkg/system/syscall_unix.go b/unum/vendor/github.com/docker/docker/pkg/system/syscall_unix.go
new file mode 100644
index 0000000..49dbdd3
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/pkg/system/syscall_unix.go
@@ -0,0 +1,17 @@
+// +build linux freebsd
+
+package system
+
+import "golang.org/x/sys/unix"
+
+// Unmount is a platform-specific helper function to call
+// the unmount syscall.
+func Unmount(dest string) error {
+ return unix.Unmount(dest, 0)
+}
+
+// CommandLineToArgv should not be used on Unix.
+// It simply returns commandLine in the only element in the returned array.
+func CommandLineToArgv(commandLine string) ([]string, error) {
+ return []string{commandLine}, nil
+}
diff --git a/unum/vendor/github.com/docker/docker/pkg/system/syscall_windows.go b/unum/vendor/github.com/docker/docker/pkg/system/syscall_windows.go
new file mode 100644
index 0000000..23e9b20
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/pkg/system/syscall_windows.go
@@ -0,0 +1,122 @@
+package system
+
+import (
+ "unsafe"
+
+ "github.com/sirupsen/logrus"
+ "golang.org/x/sys/windows"
+)
+
+var (
+ ntuserApiset = windows.NewLazyDLL("ext-ms-win-ntuser-window-l1-1-0")
+ procGetVersionExW = modkernel32.NewProc("GetVersionExW")
+ procGetProductInfo = modkernel32.NewProc("GetProductInfo")
+)
+
+// OSVersion is a wrapper for Windows version information
+// https://msdn.microsoft.com/en-us/library/windows/desktop/ms724439(v=vs.85).aspx
+type OSVersion struct {
+ Version uint32
+ MajorVersion uint8
+ MinorVersion uint8
+ Build uint16
+}
+
+// https://msdn.microsoft.com/en-us/library/windows/desktop/ms724833(v=vs.85).aspx
+type osVersionInfoEx struct {
+ OSVersionInfoSize uint32
+ MajorVersion uint32
+ MinorVersion uint32
+ BuildNumber uint32
+ PlatformID uint32
+ CSDVersion [128]uint16
+ ServicePackMajor uint16
+ ServicePackMinor uint16
+ SuiteMask uint16
+ ProductType byte
+ Reserve byte
+}
+
+// GetOSVersion gets the operating system version on Windows. Note that
+// docker.exe must be manifested to get the correct version information.
+func GetOSVersion() OSVersion {
+ var err error
+ osv := OSVersion{}
+ osv.Version, err = windows.GetVersion()
+ if err != nil {
+ // GetVersion never fails.
+ panic(err)
+ }
+ osv.MajorVersion = uint8(osv.Version & 0xFF)
+ osv.MinorVersion = uint8(osv.Version >> 8 & 0xFF)
+ osv.Build = uint16(osv.Version >> 16)
+ return osv
+}
+
+// IsWindowsClient returns true if the SKU is client
+// @engine maintainers - this function should not be removed or modified as it
+// is used to enforce licensing restrictions on Windows.
+func IsWindowsClient() bool {
+ osviex := &osVersionInfoEx{OSVersionInfoSize: 284}
+ r1, _, err := procGetVersionExW.Call(uintptr(unsafe.Pointer(osviex)))
+ if r1 == 0 {
+ logrus.Warnf("GetVersionExW failed - assuming server SKU: %v", err)
+ return false
+ }
+ const verNTWorkstation = 0x00000001
+ return osviex.ProductType == verNTWorkstation
+}
+
+// IsIoTCore returns true if the currently running image is based off of
+// Windows 10 IoT Core.
+// @engine maintainers - this function should not be removed or modified as it
+// is used to enforce licensing restrictions on Windows.
+func IsIoTCore() bool {
+ var returnedProductType uint32
+ r1, _, err := procGetProductInfo.Call(6, 1, 0, 0, uintptr(unsafe.Pointer(&returnedProductType)))
+ if r1 == 0 {
+ logrus.Warnf("GetProductInfo failed - assuming this is not IoT: %v", err)
+ return false
+ }
+ const productIoTUAP = 0x0000007B
+ const productIoTUAPCommercial = 0x00000083
+ return returnedProductType == productIoTUAP || returnedProductType == productIoTUAPCommercial
+}
+
+// Unmount is a platform-specific helper function to call
+// the unmount syscall. Not supported on Windows
+func Unmount(dest string) error {
+ return nil
+}
+
+// CommandLineToArgv wraps the Windows syscall to turn a commandline into an argument array.
+func CommandLineToArgv(commandLine string) ([]string, error) {
+ var argc int32
+
+ argsPtr, err := windows.UTF16PtrFromString(commandLine)
+ if err != nil {
+ return nil, err
+ }
+
+ argv, err := windows.CommandLineToArgv(argsPtr, &argc)
+ if err != nil {
+ return nil, err
+ }
+ defer windows.LocalFree(windows.Handle(uintptr(unsafe.Pointer(argv))))
+
+ newArgs := make([]string, argc)
+ for i, v := range (*argv)[:argc] {
+ newArgs[i] = string(windows.UTF16ToString((*v)[:]))
+ }
+
+ return newArgs, nil
+}
+
+// HasWin32KSupport determines whether containers that depend on win32k can
+// run on this machine. Win32k is the driver used to implement windowing.
+func HasWin32KSupport() bool {
+ // For now, check for ntuser API support on the host. In the future, a host
+ // may support win32k in containers even if the host does not support ntuser
+ // APIs.
+ return ntuserApiset.Load() == nil
+}
diff --git a/unum/vendor/github.com/docker/docker/pkg/system/umask.go b/unum/vendor/github.com/docker/docker/pkg/system/umask.go
new file mode 100644
index 0000000..5a10eda
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/pkg/system/umask.go
@@ -0,0 +1,13 @@
+// +build !windows
+
+package system
+
+import (
+ "golang.org/x/sys/unix"
+)
+
+// Umask sets current process's file mode creation mask to newmask
+// and returns oldmask.
+func Umask(newmask int) (oldmask int, err error) {
+ return unix.Umask(newmask), nil
+}
diff --git a/unum/vendor/github.com/docker/docker/pkg/system/umask_windows.go b/unum/vendor/github.com/docker/docker/pkg/system/umask_windows.go
new file mode 100644
index 0000000..13f1de1
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/pkg/system/umask_windows.go
@@ -0,0 +1,9 @@
+// +build windows
+
+package system
+
+// Umask is not supported on the windows platform.
+func Umask(newmask int) (oldmask int, err error) {
+ // should not be called on cli code path
+ return 0, ErrNotSupportedPlatform
+}
diff --git a/unum/vendor/github.com/docker/docker/pkg/system/utimes_freebsd.go b/unum/vendor/github.com/docker/docker/pkg/system/utimes_freebsd.go
new file mode 100644
index 0000000..6a77524
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/pkg/system/utimes_freebsd.go
@@ -0,0 +1,24 @@
+package system
+
+import (
+ "syscall"
+ "unsafe"
+
+ "golang.org/x/sys/unix"
+)
+
+// LUtimesNano is used to change access and modification time of the specified path.
+// It's used for symbol link file because unix.UtimesNano doesn't support a NOFOLLOW flag atm.
+func LUtimesNano(path string, ts []syscall.Timespec) error {
+ var _path *byte
+ _path, err := unix.BytePtrFromString(path)
+ if err != nil {
+ return err
+ }
+
+ if _, _, err := unix.Syscall(unix.SYS_LUTIMES, uintptr(unsafe.Pointer(_path)), uintptr(unsafe.Pointer(&ts[0])), 0); err != 0 && err != unix.ENOSYS {
+ return err
+ }
+
+ return nil
+}
diff --git a/unum/vendor/github.com/docker/docker/pkg/system/utimes_linux.go b/unum/vendor/github.com/docker/docker/pkg/system/utimes_linux.go
new file mode 100644
index 0000000..edc588a
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/pkg/system/utimes_linux.go
@@ -0,0 +1,25 @@
+package system
+
+import (
+ "syscall"
+ "unsafe"
+
+ "golang.org/x/sys/unix"
+)
+
+// LUtimesNano is used to change access and modification time of the specified path.
+// It's used for symbol link file because unix.UtimesNano doesn't support a NOFOLLOW flag atm.
+func LUtimesNano(path string, ts []syscall.Timespec) error {
+ atFdCwd := unix.AT_FDCWD
+
+ var _path *byte
+ _path, err := unix.BytePtrFromString(path)
+ if err != nil {
+ return err
+ }
+ if _, _, err := unix.Syscall6(unix.SYS_UTIMENSAT, uintptr(atFdCwd), uintptr(unsafe.Pointer(_path)), uintptr(unsafe.Pointer(&ts[0])), unix.AT_SYMLINK_NOFOLLOW, 0, 0); err != 0 && err != unix.ENOSYS {
+ return err
+ }
+
+ return nil
+}
diff --git a/unum/vendor/github.com/docker/docker/pkg/system/utimes_unsupported.go b/unum/vendor/github.com/docker/docker/pkg/system/utimes_unsupported.go
new file mode 100644
index 0000000..1397145
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/pkg/system/utimes_unsupported.go
@@ -0,0 +1,10 @@
+// +build !linux,!freebsd
+
+package system
+
+import "syscall"
+
+// LUtimesNano is only supported on linux and freebsd.
+func LUtimesNano(path string, ts []syscall.Timespec) error {
+ return ErrNotSupportedPlatform
+}
diff --git a/unum/vendor/github.com/docker/docker/pkg/system/xattrs_linux.go b/unum/vendor/github.com/docker/docker/pkg/system/xattrs_linux.go
new file mode 100644
index 0000000..98b111b
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/pkg/system/xattrs_linux.go
@@ -0,0 +1,29 @@
+package system
+
+import "golang.org/x/sys/unix"
+
+// Lgetxattr retrieves the value of the extended attribute identified by attr
+// and associated with the given path in the file system.
+// It will returns a nil slice and nil error if the xattr is not set.
+func Lgetxattr(path string, attr string) ([]byte, error) {
+ dest := make([]byte, 128)
+ sz, errno := unix.Lgetxattr(path, attr, dest)
+ if errno == unix.ENODATA {
+ return nil, nil
+ }
+ if errno == unix.ERANGE {
+ dest = make([]byte, sz)
+ sz, errno = unix.Lgetxattr(path, attr, dest)
+ }
+ if errno != nil {
+ return nil, errno
+ }
+
+ return dest[:sz], nil
+}
+
+// Lsetxattr sets the value of the extended attribute identified by attr
+// and associated with the given path in the file system.
+func Lsetxattr(path string, attr string, data []byte, flags int) error {
+ return unix.Lsetxattr(path, attr, data, flags)
+}
diff --git a/unum/vendor/github.com/docker/docker/pkg/system/xattrs_unsupported.go b/unum/vendor/github.com/docker/docker/pkg/system/xattrs_unsupported.go
new file mode 100644
index 0000000..0114f22
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/pkg/system/xattrs_unsupported.go
@@ -0,0 +1,13 @@
+// +build !linux
+
+package system
+
+// Lgetxattr is not supported on platforms other than linux.
+func Lgetxattr(path string, attr string) ([]byte, error) {
+ return nil, ErrNotSupportedPlatform
+}
+
+// Lsetxattr is not supported on platforms other than linux.
+func Lsetxattr(path string, attr string, data []byte, flags int) error {
+ return ErrNotSupportedPlatform
+}
diff --git a/unum/vendor/github.com/docker/docker/pkg/tlsconfig/tlsconfig_clone.go b/unum/vendor/github.com/docker/docker/pkg/tlsconfig/tlsconfig_clone.go
new file mode 100644
index 0000000..e4dec3a
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/pkg/tlsconfig/tlsconfig_clone.go
@@ -0,0 +1,11 @@
+// +build go1.8
+
+package tlsconfig
+
+import "crypto/tls"
+
+// Clone returns a clone of tls.Config. This function is provided for
+// compatibility for go1.7 that doesn't include this method in stdlib.
+func Clone(c *tls.Config) *tls.Config {
+ return c.Clone()
+}
diff --git a/unum/vendor/github.com/docker/docker/pkg/tlsconfig/tlsconfig_clone_go17.go b/unum/vendor/github.com/docker/docker/pkg/tlsconfig/tlsconfig_clone_go17.go
new file mode 100644
index 0000000..0d5b448
--- /dev/null
+++ b/unum/vendor/github.com/docker/docker/pkg/tlsconfig/tlsconfig_clone_go17.go
@@ -0,0 +1,33 @@
+// +build go1.7,!go1.8
+
+package tlsconfig
+
+import "crypto/tls"
+
+// Clone returns a clone of tls.Config. This function is provided for
+// compatibility for go1.7 that doesn't include this method in stdlib.
+func Clone(c *tls.Config) *tls.Config {
+ return &tls.Config{
+ Rand: c.Rand,
+ Time: c.Time,
+ Certificates: c.Certificates,
+ NameToCertificate: c.NameToCertificate,
+ GetCertificate: c.GetCertificate,
+ RootCAs: c.RootCAs,
+ NextProtos: c.NextProtos,
+ ServerName: c.ServerName,
+ ClientAuth: c.ClientAuth,
+ ClientCAs: c.ClientCAs,
+ InsecureSkipVerify: c.InsecureSkipVerify,
+ CipherSuites: c.CipherSuites,
+ PreferServerCipherSuites: c.PreferServerCipherSuites,
+ SessionTicketsDisabled: c.SessionTicketsDisabled,
+ SessionTicketKey: c.SessionTicketKey,
+ ClientSessionCache: c.ClientSessionCache,
+ MinVersion: c.MinVersion,
+ MaxVersion: c.MaxVersion,
+ CurvePreferences: c.CurvePreferences,
+ DynamicRecordSizingDisabled: c.DynamicRecordSizingDisabled,
+ Renegotiation: c.Renegotiation,
+ }
+}
diff --git a/unum/vendor/github.com/docker/go-connections/LICENSE b/unum/vendor/github.com/docker/go-connections/LICENSE
new file mode 100644
index 0000000..b55b37b
--- /dev/null
+++ b/unum/vendor/github.com/docker/go-connections/LICENSE
@@ -0,0 +1,191 @@
+
+ Apache License
+ Version 2.0, January 2004
+ https://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ Copyright 2015 Docker, Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ https://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/unum/vendor/github.com/docker/go-connections/nat/nat.go b/unum/vendor/github.com/docker/go-connections/nat/nat.go
new file mode 100644
index 0000000..4d5f5ae
--- /dev/null
+++ b/unum/vendor/github.com/docker/go-connections/nat/nat.go
@@ -0,0 +1,242 @@
+// Package nat is a convenience package for manipulation of strings describing network ports.
+package nat
+
+import (
+ "fmt"
+ "net"
+ "strconv"
+ "strings"
+)
+
+const (
+ // portSpecTemplate is the expected format for port specifications
+ portSpecTemplate = "ip:hostPort:containerPort"
+)
+
+// PortBinding represents a binding between a Host IP address and a Host Port
+type PortBinding struct {
+ // HostIP is the host IP Address
+ HostIP string `json:"HostIp"`
+ // HostPort is the host port number
+ HostPort string
+}
+
+// PortMap is a collection of PortBinding indexed by Port
+type PortMap map[Port][]PortBinding
+
+// PortSet is a collection of structs indexed by Port
+type PortSet map[Port]struct{}
+
+// Port is a string containing port number and protocol in the format "80/tcp"
+type Port string
+
+// NewPort creates a new instance of a Port given a protocol and port number or port range
+func NewPort(proto, port string) (Port, error) {
+ // Check for parsing issues on "port" now so we can avoid having
+ // to check it later on.
+
+ portStartInt, portEndInt, err := ParsePortRangeToInt(port)
+ if err != nil {
+ return "", err
+ }
+
+ if portStartInt == portEndInt {
+ return Port(fmt.Sprintf("%d/%s", portStartInt, proto)), nil
+ }
+ return Port(fmt.Sprintf("%d-%d/%s", portStartInt, portEndInt, proto)), nil
+}
+
+// ParsePort parses the port number string and returns an int
+func ParsePort(rawPort string) (int, error) {
+ if len(rawPort) == 0 {
+ return 0, nil
+ }
+ port, err := strconv.ParseUint(rawPort, 10, 16)
+ if err != nil {
+ return 0, err
+ }
+ return int(port), nil
+}
+
+// ParsePortRangeToInt parses the port range string and returns start/end ints
+func ParsePortRangeToInt(rawPort string) (int, int, error) {
+ if len(rawPort) == 0 {
+ return 0, 0, nil
+ }
+ start, end, err := ParsePortRange(rawPort)
+ if err != nil {
+ return 0, 0, err
+ }
+ return int(start), int(end), nil
+}
+
+// Proto returns the protocol of a Port
+func (p Port) Proto() string {
+ proto, _ := SplitProtoPort(string(p))
+ return proto
+}
+
+// Port returns the port number of a Port
+func (p Port) Port() string {
+ _, port := SplitProtoPort(string(p))
+ return port
+}
+
+// Int returns the port number of a Port as an int
+func (p Port) Int() int {
+ portStr := p.Port()
+ // We don't need to check for an error because we're going to
+ // assume that any error would have been found, and reported, in NewPort()
+ port, _ := ParsePort(portStr)
+ return port
+}
+
+// Range returns the start/end port numbers of a Port range as ints
+func (p Port) Range() (int, int, error) {
+ return ParsePortRangeToInt(p.Port())
+}
+
+// SplitProtoPort splits a port in the format of proto/port
+func SplitProtoPort(rawPort string) (string, string) {
+ parts := strings.Split(rawPort, "/")
+ l := len(parts)
+ if len(rawPort) == 0 || l == 0 || len(parts[0]) == 0 {
+ return "", ""
+ }
+ if l == 1 {
+ return "tcp", rawPort
+ }
+ if len(parts[1]) == 0 {
+ return "tcp", parts[0]
+ }
+ return parts[1], parts[0]
+}
+
+func validateProto(proto string) bool {
+ for _, availableProto := range []string{"tcp", "udp"} {
+ if availableProto == proto {
+ return true
+ }
+ }
+ return false
+}
+
+// ParsePortSpecs receives port specs in the format of ip:public:private/proto and parses
+// these in to the internal types
+func ParsePortSpecs(ports []string) (map[Port]struct{}, map[Port][]PortBinding, error) {
+ var (
+ exposedPorts = make(map[Port]struct{}, len(ports))
+ bindings = make(map[Port][]PortBinding)
+ )
+ for _, rawPort := range ports {
+ portMappings, err := ParsePortSpec(rawPort)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ for _, portMapping := range portMappings {
+ port := portMapping.Port
+ if _, exists := exposedPorts[port]; !exists {
+ exposedPorts[port] = struct{}{}
+ }
+ bslice, exists := bindings[port]
+ if !exists {
+ bslice = []PortBinding{}
+ }
+ bindings[port] = append(bslice, portMapping.Binding)
+ }
+ }
+ return exposedPorts, bindings, nil
+}
+
+// PortMapping is a data object mapping a Port to a PortBinding
+type PortMapping struct {
+ Port Port
+ Binding PortBinding
+}
+
+func splitParts(rawport string) (string, string, string) {
+ parts := strings.Split(rawport, ":")
+ n := len(parts)
+ containerport := parts[n-1]
+
+ switch n {
+ case 1:
+ return "", "", containerport
+ case 2:
+ return "", parts[0], containerport
+ case 3:
+ return parts[0], parts[1], containerport
+ default:
+ return strings.Join(parts[:n-2], ":"), parts[n-2], containerport
+ }
+}
+
+// ParsePortSpec parses a port specification string into a slice of PortMappings
+func ParsePortSpec(rawPort string) ([]PortMapping, error) {
+ var proto string
+ rawIP, hostPort, containerPort := splitParts(rawPort)
+ proto, containerPort = SplitProtoPort(containerPort)
+
+ // Strip [] from IPV6 addresses
+ ip, _, err := net.SplitHostPort(rawIP + ":")
+ if err != nil {
+ return nil, fmt.Errorf("Invalid ip address %v: %s", rawIP, err)
+ }
+ if ip != "" && net.ParseIP(ip) == nil {
+ return nil, fmt.Errorf("Invalid ip address: %s", ip)
+ }
+ if containerPort == "" {
+ return nil, fmt.Errorf("No port specified: %s<empty>", rawPort)
+ }
+
+ startPort, endPort, err := ParsePortRange(containerPort)
+ if err != nil {
+ return nil, fmt.Errorf("Invalid containerPort: %s", containerPort)
+ }
+
+ var startHostPort, endHostPort uint64 = 0, 0
+ if len(hostPort) > 0 {
+ startHostPort, endHostPort, err = ParsePortRange(hostPort)
+ if err != nil {
+ return nil, fmt.Errorf("Invalid hostPort: %s", hostPort)
+ }
+ }
+
+ if hostPort != "" && (endPort-startPort) != (endHostPort-startHostPort) {
+ // Allow host port range iff containerPort is not a range.
+ // In this case, use the host port range as the dynamic
+ // host port range to allocate into.
+ if endPort != startPort {
+ return nil, fmt.Errorf("Invalid ranges specified for container and host Ports: %s and %s", containerPort, hostPort)
+ }
+ }
+
+ if !validateProto(strings.ToLower(proto)) {
+ return nil, fmt.Errorf("Invalid proto: %s", proto)
+ }
+
+ ports := []PortMapping{}
+ for i := uint64(0); i <= (endPort - startPort); i++ {
+ containerPort = strconv.FormatUint(startPort+i, 10)
+ if len(hostPort) > 0 {
+ hostPort = strconv.FormatUint(startHostPort+i, 10)
+ }
+ // Set hostPort to a range only if there is a single container port
+ // and a dynamic host port.
+ if startPort == endPort && startHostPort != endHostPort {
+ hostPort = fmt.Sprintf("%s-%s", hostPort, strconv.FormatUint(endHostPort, 10))
+ }
+ port, err := NewPort(strings.ToLower(proto), containerPort)
+ if err != nil {
+ return nil, err
+ }
+
+ binding := PortBinding{
+ HostIP: ip,
+ HostPort: hostPort,
+ }
+ ports = append(ports, PortMapping{Port: port, Binding: binding})
+ }
+ return ports, nil
+}
diff --git a/unum/vendor/github.com/docker/go-connections/nat/parse.go b/unum/vendor/github.com/docker/go-connections/nat/parse.go
new file mode 100644
index 0000000..892adf8
--- /dev/null
+++ b/unum/vendor/github.com/docker/go-connections/nat/parse.go
@@ -0,0 +1,57 @@
+package nat
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+// PartParser parses and validates the specified string (data) using the specified template
+// e.g. ip:public:private -> 192.168.0.1:80:8000
+// DEPRECATED: do not use, this function may be removed in a future version
+func PartParser(template, data string) (map[string]string, error) {
+ // ip:public:private
+ var (
+ templateParts = strings.Split(template, ":")
+ parts = strings.Split(data, ":")
+ out = make(map[string]string, len(templateParts))
+ )
+ if len(parts) != len(templateParts) {
+ return nil, fmt.Errorf("Invalid format to parse. %s should match template %s", data, template)
+ }
+
+ for i, t := range templateParts {
+ value := ""
+ if len(parts) > i {
+ value = parts[i]
+ }
+ out[t] = value
+ }
+ return out, nil
+}
+
+// ParsePortRange parses and validates the specified string as a port-range (8000-9000)
+func ParsePortRange(ports string) (uint64, uint64, error) {
+ if ports == "" {
+ return 0, 0, fmt.Errorf("Empty string specified for ports.")
+ }
+ if !strings.Contains(ports, "-") {
+ start, err := strconv.ParseUint(ports, 10, 16)
+ end := start
+ return start, end, err
+ }
+
+ parts := strings.Split(ports, "-")
+ start, err := strconv.ParseUint(parts[0], 10, 16)
+ if err != nil {
+ return 0, 0, err
+ }
+ end, err := strconv.ParseUint(parts[1], 10, 16)
+ if err != nil {
+ return 0, 0, err
+ }
+ if end < start {
+ return 0, 0, fmt.Errorf("Invalid range specified for the Port: %s", ports)
+ }
+ return start, end, nil
+}
diff --git a/unum/vendor/github.com/docker/go-connections/nat/sort.go b/unum/vendor/github.com/docker/go-connections/nat/sort.go
new file mode 100644
index 0000000..ce95017
--- /dev/null
+++ b/unum/vendor/github.com/docker/go-connections/nat/sort.go
@@ -0,0 +1,96 @@
+package nat
+
+import (
+ "sort"
+ "strings"
+)
+
+type portSorter struct {
+ ports []Port
+ by func(i, j Port) bool
+}
+
+func (s *portSorter) Len() int {
+ return len(s.ports)
+}
+
+func (s *portSorter) Swap(i, j int) {
+ s.ports[i], s.ports[j] = s.ports[j], s.ports[i]
+}
+
+func (s *portSorter) Less(i, j int) bool {
+ ip := s.ports[i]
+ jp := s.ports[j]
+
+ return s.by(ip, jp)
+}
+
+// Sort sorts a list of ports using the provided predicate
+// This function should compare `i` and `j`, returning true if `i` is
+// considered to be less than `j`
+func Sort(ports []Port, predicate func(i, j Port) bool) {
+ s := &portSorter{ports, predicate}
+ sort.Sort(s)
+}
+
+type portMapEntry struct {
+ port Port
+ binding PortBinding
+}
+
+type portMapSorter []portMapEntry
+
+func (s portMapSorter) Len() int { return len(s) }
+func (s portMapSorter) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+
+// sort the port so that the order is:
+// 1. port with larger specified bindings
+// 2. larger port
+// 3. port with tcp protocol
+func (s portMapSorter) Less(i, j int) bool {
+ pi, pj := s[i].port, s[j].port
+ hpi, hpj := toInt(s[i].binding.HostPort), toInt(s[j].binding.HostPort)
+ return hpi > hpj || pi.Int() > pj.Int() || (pi.Int() == pj.Int() && strings.ToLower(pi.Proto()) == "tcp")
+}
+
+// SortPortMap sorts the list of ports and their respected mapping. The ports
+// will explicit HostPort will be placed first.
+func SortPortMap(ports []Port, bindings PortMap) {
+ s := portMapSorter{}
+ for _, p := range ports {
+ if binding, ok := bindings[p]; ok {
+ for _, b := range binding {
+ s = append(s, portMapEntry{port: p, binding: b})
+ }
+ bindings[p] = []PortBinding{}
+ } else {
+ s = append(s, portMapEntry{port: p})
+ }
+ }
+
+ sort.Sort(s)
+ var (
+ i int
+ pm = make(map[Port]struct{})
+ )
+ // reorder ports
+ for _, entry := range s {
+ if _, ok := pm[entry.port]; !ok {
+ ports[i] = entry.port
+ pm[entry.port] = struct{}{}
+ i++
+ }
+ // reorder bindings for this port
+ if _, ok := bindings[entry.port]; ok {
+ bindings[entry.port] = append(bindings[entry.port], entry.binding)
+ }
+ }
+}
+
+func toInt(s string) uint64 {
+ i, _, err := ParsePortRange(s)
+ if err != nil {
+ i = 0
+ }
+ return i
+}
diff --git a/unum/vendor/github.com/docker/go-connections/sockets/README.md b/unum/vendor/github.com/docker/go-connections/sockets/README.md
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/unum/vendor/github.com/docker/go-connections/sockets/README.md
diff --git a/unum/vendor/github.com/docker/go-connections/sockets/inmem_socket.go b/unum/vendor/github.com/docker/go-connections/sockets/inmem_socket.go
new file mode 100644
index 0000000..99846ff
--- /dev/null
+++ b/unum/vendor/github.com/docker/go-connections/sockets/inmem_socket.go
@@ -0,0 +1,81 @@
+package sockets
+
+import (
+ "errors"
+ "net"
+ "sync"
+)
+
+var errClosed = errors.New("use of closed network connection")
+
+// InmemSocket implements net.Listener using in-memory only connections.
+type InmemSocket struct {
+ chConn chan net.Conn
+ chClose chan struct{}
+ addr string
+ mu sync.Mutex
+}
+
+// dummyAddr is used to satisfy net.Addr for the in-mem socket
+// it is just stored as a string and returns the string for all calls
+type dummyAddr string
+
+// NewInmemSocket creates an in-memory only net.Listener
+// The addr argument can be any string, but is used to satisfy the `Addr()` part
+// of the net.Listener interface
+func NewInmemSocket(addr string, bufSize int) *InmemSocket {
+ return &InmemSocket{
+ chConn: make(chan net.Conn, bufSize),
+ chClose: make(chan struct{}),
+ addr: addr,
+ }
+}
+
+// Addr returns the socket's addr string to satisfy net.Listener
+func (s *InmemSocket) Addr() net.Addr {
+ return dummyAddr(s.addr)
+}
+
+// Accept implements the Accept method in the Listener interface; it waits for the next call and returns a generic Conn.
+func (s *InmemSocket) Accept() (net.Conn, error) {
+ select {
+ case conn := <-s.chConn:
+ return conn, nil
+ case <-s.chClose:
+ return nil, errClosed
+ }
+}
+
+// Close closes the listener. It will be unavailable for use once closed.
+func (s *InmemSocket) Close() error {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ select {
+ case <-s.chClose:
+ default:
+ close(s.chClose)
+ }
+ return nil
+}
+
+// Dial is used to establish a connection with the in-mem server
+func (s *InmemSocket) Dial(network, addr string) (net.Conn, error) {
+ srvConn, clientConn := net.Pipe()
+ select {
+ case s.chConn <- srvConn:
+ case <-s.chClose:
+ return nil, errClosed
+ }
+
+ return clientConn, nil
+}
+
+// Network returns the addr string, satisfies net.Addr
+func (a dummyAddr) Network() string {
+ return string(a)
+}
+
+// String returns the string form
+func (a dummyAddr) String() string {
+ return string(a)
+}
diff --git a/unum/vendor/github.com/docker/go-connections/sockets/proxy.go b/unum/vendor/github.com/docker/go-connections/sockets/proxy.go
new file mode 100644
index 0000000..98e9a1d
--- /dev/null
+++ b/unum/vendor/github.com/docker/go-connections/sockets/proxy.go
@@ -0,0 +1,51 @@
+package sockets
+
+import (
+ "net"
+ "net/url"
+ "os"
+ "strings"
+
+ "golang.org/x/net/proxy"
+)
+
+// GetProxyEnv allows access to the uppercase and the lowercase forms of
+// proxy-related variables. See the Go specification for details on these
+// variables. https://golang.org/pkg/net/http/
+func GetProxyEnv(key string) string {
+ proxyValue := os.Getenv(strings.ToUpper(key))
+ if proxyValue == "" {
+ return os.Getenv(strings.ToLower(key))
+ }
+ return proxyValue
+}
+
+// DialerFromEnvironment takes in a "direct" *net.Dialer and returns a
+// proxy.Dialer which will route the connections through the proxy using the
+// given dialer.
+func DialerFromEnvironment(direct *net.Dialer) (proxy.Dialer, error) {
+ allProxy := GetProxyEnv("all_proxy")
+ if len(allProxy) == 0 {
+ return direct, nil
+ }
+
+ proxyURL, err := url.Parse(allProxy)
+ if err != nil {
+ return direct, err
+ }
+
+ proxyFromURL, err := proxy.FromURL(proxyURL, direct)
+ if err != nil {
+ return direct, err
+ }
+
+ noProxy := GetProxyEnv("no_proxy")
+ if len(noProxy) == 0 {
+ return proxyFromURL, nil
+ }
+
+ perHost := proxy.NewPerHost(proxyFromURL, direct)
+ perHost.AddFromString(noProxy)
+
+ return perHost, nil
+}
diff --git a/unum/vendor/github.com/docker/go-connections/sockets/sockets.go b/unum/vendor/github.com/docker/go-connections/sockets/sockets.go
new file mode 100644
index 0000000..a1d7beb
--- /dev/null
+++ b/unum/vendor/github.com/docker/go-connections/sockets/sockets.go
@@ -0,0 +1,38 @@
+// Package sockets provides helper functions to create and configure Unix or TCP sockets.
+package sockets
+
+import (
+ "errors"
+ "net"
+ "net/http"
+ "time"
+)
+
+// Why 32? See https://github.com/docker/docker/pull/8035.
+const defaultTimeout = 32 * time.Second
+
+// ErrProtocolNotAvailable is returned when a given transport protocol is not provided by the operating system.
+var ErrProtocolNotAvailable = errors.New("protocol not available")
+
+// ConfigureTransport configures the specified Transport according to the
+// specified proto and addr.
+// If the proto is unix (using a unix socket to communicate) or npipe the
+// compression is disabled.
+func ConfigureTransport(tr *http.Transport, proto, addr string) error {
+ switch proto {
+ case "unix":
+ return configureUnixTransport(tr, proto, addr)
+ case "npipe":
+ return configureNpipeTransport(tr, proto, addr)
+ default:
+ tr.Proxy = http.ProxyFromEnvironment
+ dialer, err := DialerFromEnvironment(&net.Dialer{
+ Timeout: defaultTimeout,
+ })
+ if err != nil {
+ return err
+ }
+ tr.Dial = dialer.Dial
+ }
+ return nil
+}
diff --git a/unum/vendor/github.com/docker/go-connections/sockets/sockets_unix.go b/unum/vendor/github.com/docker/go-connections/sockets/sockets_unix.go
new file mode 100644
index 0000000..386cf0d
--- /dev/null
+++ b/unum/vendor/github.com/docker/go-connections/sockets/sockets_unix.go
@@ -0,0 +1,35 @@
+// +build !windows
+
+package sockets
+
+import (
+ "fmt"
+ "net"
+ "net/http"
+ "syscall"
+ "time"
+)
+
+const maxUnixSocketPathSize = len(syscall.RawSockaddrUnix{}.Path)
+
+func configureUnixTransport(tr *http.Transport, proto, addr string) error {
+ if len(addr) > maxUnixSocketPathSize {
+ return fmt.Errorf("Unix socket path %q is too long", addr)
+ }
+ // No need for compression in local communications.
+ tr.DisableCompression = true
+ tr.Dial = func(_, _ string) (net.Conn, error) {
+ return net.DialTimeout(proto, addr, defaultTimeout)
+ }
+ return nil
+}
+
+func configureNpipeTransport(tr *http.Transport, proto, addr string) error {
+ return ErrProtocolNotAvailable
+}
+
+// DialPipe connects to a Windows named pipe.
+// This is not supported on other OSes.
+func DialPipe(_ string, _ time.Duration) (net.Conn, error) {
+ return nil, syscall.EAFNOSUPPORT
+}
diff --git a/unum/vendor/github.com/docker/go-connections/sockets/sockets_windows.go b/unum/vendor/github.com/docker/go-connections/sockets/sockets_windows.go
new file mode 100644
index 0000000..5c21644
--- /dev/null
+++ b/unum/vendor/github.com/docker/go-connections/sockets/sockets_windows.go
@@ -0,0 +1,27 @@
+package sockets
+
+import (
+ "net"
+ "net/http"
+ "time"
+
+ "github.com/Microsoft/go-winio"
+)
+
+func configureUnixTransport(tr *http.Transport, proto, addr string) error {
+ return ErrProtocolNotAvailable
+}
+
+func configureNpipeTransport(tr *http.Transport, proto, addr string) error {
+ // No need for compression in local communications.
+ tr.DisableCompression = true
+ tr.Dial = func(_, _ string) (net.Conn, error) {
+ return DialPipe(addr, defaultTimeout)
+ }
+ return nil
+}
+
+// DialPipe connects to a Windows named pipe.
+func DialPipe(addr string, timeout time.Duration) (net.Conn, error) {
+ return winio.DialPipe(addr, &timeout)
+}
diff --git a/unum/vendor/github.com/docker/go-connections/sockets/tcp_socket.go b/unum/vendor/github.com/docker/go-connections/sockets/tcp_socket.go
new file mode 100644
index 0000000..53cbb6c
--- /dev/null
+++ b/unum/vendor/github.com/docker/go-connections/sockets/tcp_socket.go
@@ -0,0 +1,22 @@
+// Package sockets provides helper functions to create and configure Unix or TCP sockets.
+package sockets
+
+import (
+ "crypto/tls"
+ "net"
+)
+
+// NewTCPSocket creates a TCP socket listener with the specified address and
+// the specified tls configuration. If TLSConfig is set, will encapsulate the
+// TCP listener inside a TLS one.
+func NewTCPSocket(addr string, tlsConfig *tls.Config) (net.Listener, error) {
+ l, err := net.Listen("tcp", addr)
+ if err != nil {
+ return nil, err
+ }
+ if tlsConfig != nil {
+ tlsConfig.NextProtos = []string{"http/1.1"}
+ l = tls.NewListener(l, tlsConfig)
+ }
+ return l, nil
+}
diff --git a/unum/vendor/github.com/docker/go-connections/sockets/unix_socket.go b/unum/vendor/github.com/docker/go-connections/sockets/unix_socket.go
new file mode 100644
index 0000000..a8b5dbb
--- /dev/null
+++ b/unum/vendor/github.com/docker/go-connections/sockets/unix_socket.go
@@ -0,0 +1,32 @@
+// +build !windows
+
+package sockets
+
+import (
+ "net"
+ "os"
+ "syscall"
+)
+
+// NewUnixSocket creates a unix socket with the specified path and group.
+func NewUnixSocket(path string, gid int) (net.Listener, error) {
+ if err := syscall.Unlink(path); err != nil && !os.IsNotExist(err) {
+ return nil, err
+ }
+ mask := syscall.Umask(0777)
+ defer syscall.Umask(mask)
+
+ l, err := net.Listen("unix", path)
+ if err != nil {
+ return nil, err
+ }
+ if err := os.Chown(path, 0, gid); err != nil {
+ l.Close()
+ return nil, err
+ }
+ if err := os.Chmod(path, 0660); err != nil {
+ l.Close()
+ return nil, err
+ }
+ return l, nil
+}
diff --git a/unum/vendor/github.com/docker/go-connections/tlsconfig/certpool_go17.go b/unum/vendor/github.com/docker/go-connections/tlsconfig/certpool_go17.go
new file mode 100644
index 0000000..1ca0965
--- /dev/null
+++ b/unum/vendor/github.com/docker/go-connections/tlsconfig/certpool_go17.go
@@ -0,0 +1,18 @@
+// +build go1.7
+
+package tlsconfig
+
+import (
+ "crypto/x509"
+ "runtime"
+)
+
+// SystemCertPool returns a copy of the system cert pool,
+// returns an error if failed to load or empty pool on windows.
+func SystemCertPool() (*x509.CertPool, error) {
+ certpool, err := x509.SystemCertPool()
+ if err != nil && runtime.GOOS == "windows" {
+ return x509.NewCertPool(), nil
+ }
+ return certpool, err
+}
diff --git a/unum/vendor/github.com/docker/go-connections/tlsconfig/certpool_other.go b/unum/vendor/github.com/docker/go-connections/tlsconfig/certpool_other.go
new file mode 100644
index 0000000..9ca9745
--- /dev/null
+++ b/unum/vendor/github.com/docker/go-connections/tlsconfig/certpool_other.go
@@ -0,0 +1,14 @@
+// +build !go1.7
+
+package tlsconfig
+
+import (
+ "crypto/x509"
+
+)
+
+// SystemCertPool returns an new empty cert pool,
+// accessing system cert pool is supported in go 1.7
+func SystemCertPool() (*x509.CertPool, error) {
+ return x509.NewCertPool(), nil
+}
diff --git a/unum/vendor/github.com/docker/go-connections/tlsconfig/config.go b/unum/vendor/github.com/docker/go-connections/tlsconfig/config.go
new file mode 100644
index 0000000..1b31bbb
--- /dev/null
+++ b/unum/vendor/github.com/docker/go-connections/tlsconfig/config.go
@@ -0,0 +1,244 @@
+// Package tlsconfig provides primitives to retrieve secure-enough TLS configurations for both clients and servers.
+//
+// As a reminder from https://golang.org/pkg/crypto/tls/#Config:
+// A Config structure is used to configure a TLS client or server. After one has been passed to a TLS function it must not be modified.
+// A Config may be reused; the tls package will also not modify it.
+package tlsconfig
+
+import (
+ "crypto/tls"
+ "crypto/x509"
+ "encoding/pem"
+ "fmt"
+ "io/ioutil"
+ "os"
+
+ "github.com/pkg/errors"
+)
+
+// Options represents the information needed to create client and server TLS configurations.
+type Options struct {
+ CAFile string
+
+ // If either CertFile or KeyFile is empty, Client() will not load them
+ // preventing the client from authenticating to the server.
+ // However, Server() requires them and will error out if they are empty.
+ CertFile string
+ KeyFile string
+
+ // client-only option
+ InsecureSkipVerify bool
+ // server-only option
+ ClientAuth tls.ClientAuthType
+ // If ExclusiveRootPools is set, then if a CA file is provided, the root pool used for TLS
+ // creds will include exclusively the roots in that CA file. If no CA file is provided,
+ // the system pool will be used.
+ ExclusiveRootPools bool
+ MinVersion uint16
+ // If Passphrase is set, it will be used to decrypt a TLS private key
+ // if the key is encrypted
+ Passphrase string
+}
+
+// Extra (server-side) accepted CBC cipher suites - will phase out in the future
+var acceptedCBCCiphers = []uint16{
+ tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
+ tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
+ tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
+ tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
+ tls.TLS_RSA_WITH_AES_256_CBC_SHA,
+ tls.TLS_RSA_WITH_AES_128_CBC_SHA,
+}
+
+// DefaultServerAcceptedCiphers should be uses by code which already has a crypto/tls
+// options struct but wants to use a commonly accepted set of TLS cipher suites, with
+// known weak algorithms removed.
+var DefaultServerAcceptedCiphers = append(clientCipherSuites, acceptedCBCCiphers...)
+
+// allTLSVersions lists all the TLS versions and is used by the code that validates
+// a uint16 value as a TLS version.
+var allTLSVersions = map[uint16]struct{}{
+ tls.VersionSSL30: {},
+ tls.VersionTLS10: {},
+ tls.VersionTLS11: {},
+ tls.VersionTLS12: {},
+}
+
+// ServerDefault returns a secure-enough TLS configuration for the server TLS configuration.
+func ServerDefault() *tls.Config {
+ return &tls.Config{
+ // Avoid fallback to SSL protocols < TLS1.0
+ MinVersion: tls.VersionTLS10,
+ PreferServerCipherSuites: true,
+ CipherSuites: DefaultServerAcceptedCiphers,
+ }
+}
+
+// ClientDefault returns a secure-enough TLS configuration for the client TLS configuration.
+func ClientDefault() *tls.Config {
+ return &tls.Config{
+ // Prefer TLS1.2 as the client minimum
+ MinVersion: tls.VersionTLS12,
+ CipherSuites: clientCipherSuites,
+ }
+}
+
+// certPool returns an X.509 certificate pool from `caFile`, the certificate file.
+func certPool(caFile string, exclusivePool bool) (*x509.CertPool, error) {
+ // If we should verify the server, we need to load a trusted ca
+ var (
+ certPool *x509.CertPool
+ err error
+ )
+ if exclusivePool {
+ certPool = x509.NewCertPool()
+ } else {
+ certPool, err = SystemCertPool()
+ if err != nil {
+ return nil, fmt.Errorf("failed to read system certificates: %v", err)
+ }
+ }
+ pem, err := ioutil.ReadFile(caFile)
+ if err != nil {
+ return nil, fmt.Errorf("could not read CA certificate %q: %v", caFile, err)
+ }
+ if !certPool.AppendCertsFromPEM(pem) {
+ return nil, fmt.Errorf("failed to append certificates from PEM file: %q", caFile)
+ }
+ return certPool, nil
+}
+
+// isValidMinVersion checks that the input value is a valid tls minimum version
+func isValidMinVersion(version uint16) bool {
+ _, ok := allTLSVersions[version]
+ return ok
+}
+
+// adjustMinVersion sets the MinVersion on `config`, the input configuration.
+// It assumes the current MinVersion on the `config` is the lowest allowed.
+func adjustMinVersion(options Options, config *tls.Config) error {
+ if options.MinVersion > 0 {
+ if !isValidMinVersion(options.MinVersion) {
+ return fmt.Errorf("Invalid minimum TLS version: %x", options.MinVersion)
+ }
+ if options.MinVersion < config.MinVersion {
+ return fmt.Errorf("Requested minimum TLS version is too low. Should be at-least: %x", config.MinVersion)
+ }
+ config.MinVersion = options.MinVersion
+ }
+
+ return nil
+}
+
+// IsErrEncryptedKey returns true if the 'err' is an error of incorrect
+// password when tryin to decrypt a TLS private key
+func IsErrEncryptedKey(err error) bool {
+ return errors.Cause(err) == x509.IncorrectPasswordError
+}
+
+// getPrivateKey returns the private key in 'keyBytes', in PEM-encoded format.
+// If the private key is encrypted, 'passphrase' is used to decrypted the
+// private key.
+func getPrivateKey(keyBytes []byte, passphrase string) ([]byte, error) {
+ // this section makes some small changes to code from notary/tuf/utils/x509.go
+ pemBlock, _ := pem.Decode(keyBytes)
+ if pemBlock == nil {
+ return nil, fmt.Errorf("no valid private key found")
+ }
+
+ var err error
+ if x509.IsEncryptedPEMBlock(pemBlock) {
+ keyBytes, err = x509.DecryptPEMBlock(pemBlock, []byte(passphrase))
+ if err != nil {
+ return nil, errors.Wrap(err, "private key is encrypted, but could not decrypt it")
+ }
+ keyBytes = pem.EncodeToMemory(&pem.Block{Type: pemBlock.Type, Bytes: keyBytes})
+ }
+
+ return keyBytes, nil
+}
+
+// getCert returns a Certificate from the CertFile and KeyFile in 'options',
+// if the key is encrypted, the Passphrase in 'options' will be used to
+// decrypt it.
+func getCert(options Options) ([]tls.Certificate, error) {
+ if options.CertFile == "" && options.KeyFile == "" {
+ return nil, nil
+ }
+
+ errMessage := "Could not load X509 key pair"
+
+ cert, err := ioutil.ReadFile(options.CertFile)
+ if err != nil {
+ return nil, errors.Wrap(err, errMessage)
+ }
+
+ prKeyBytes, err := ioutil.ReadFile(options.KeyFile)
+ if err != nil {
+ return nil, errors.Wrap(err, errMessage)
+ }
+
+ prKeyBytes, err = getPrivateKey(prKeyBytes, options.Passphrase)
+ if err != nil {
+ return nil, errors.Wrap(err, errMessage)
+ }
+
+ tlsCert, err := tls.X509KeyPair(cert, prKeyBytes)
+ if err != nil {
+ return nil, errors.Wrap(err, errMessage)
+ }
+
+ return []tls.Certificate{tlsCert}, nil
+}
+
+// Client returns a TLS configuration meant to be used by a client.
+func Client(options Options) (*tls.Config, error) {
+ tlsConfig := ClientDefault()
+ tlsConfig.InsecureSkipVerify = options.InsecureSkipVerify
+ if !options.InsecureSkipVerify && options.CAFile != "" {
+ CAs, err := certPool(options.CAFile, options.ExclusiveRootPools)
+ if err != nil {
+ return nil, err
+ }
+ tlsConfig.RootCAs = CAs
+ }
+
+ tlsCerts, err := getCert(options)
+ if err != nil {
+ return nil, err
+ }
+ tlsConfig.Certificates = tlsCerts
+
+ if err := adjustMinVersion(options, tlsConfig); err != nil {
+ return nil, err
+ }
+
+ return tlsConfig, nil
+}
+
+// Server returns a TLS configuration meant to be used by a server.
+func Server(options Options) (*tls.Config, error) {
+ tlsConfig := ServerDefault()
+ tlsConfig.ClientAuth = options.ClientAuth
+ tlsCert, err := tls.LoadX509KeyPair(options.CertFile, options.KeyFile)
+ if err != nil {
+ if os.IsNotExist(err) {
+ return nil, fmt.Errorf("Could not load X509 key pair (cert: %q, key: %q): %v", options.CertFile, options.KeyFile, err)
+ }
+ return nil, fmt.Errorf("Error reading X509 key pair (cert: %q, key: %q): %v. Make sure the key is not encrypted.", options.CertFile, options.KeyFile, err)
+ }
+ tlsConfig.Certificates = []tls.Certificate{tlsCert}
+ if options.ClientAuth >= tls.VerifyClientCertIfGiven && options.CAFile != "" {
+ CAs, err := certPool(options.CAFile, options.ExclusiveRootPools)
+ if err != nil {
+ return nil, err
+ }
+ tlsConfig.ClientCAs = CAs
+ }
+
+ if err := adjustMinVersion(options, tlsConfig); err != nil {
+ return nil, err
+ }
+
+ return tlsConfig, nil
+}
diff --git a/unum/vendor/github.com/docker/go-connections/tlsconfig/config_client_ciphers.go b/unum/vendor/github.com/docker/go-connections/tlsconfig/config_client_ciphers.go
new file mode 100644
index 0000000..6b4c6a7
--- /dev/null
+++ b/unum/vendor/github.com/docker/go-connections/tlsconfig/config_client_ciphers.go
@@ -0,0 +1,17 @@
+// +build go1.5
+
+// Package tlsconfig provides primitives to retrieve secure-enough TLS configurations for both clients and servers.
+//
+package tlsconfig
+
+import (
+ "crypto/tls"
+)
+
+// Client TLS cipher suites (dropping CBC ciphers for client preferred suite set)
+var clientCipherSuites = []uint16{
+ tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
+ tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
+ tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
+ tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
+}
diff --git a/unum/vendor/github.com/docker/go-connections/tlsconfig/config_legacy_client_ciphers.go b/unum/vendor/github.com/docker/go-connections/tlsconfig/config_legacy_client_ciphers.go
new file mode 100644
index 0000000..ee22df4
--- /dev/null
+++ b/unum/vendor/github.com/docker/go-connections/tlsconfig/config_legacy_client_ciphers.go
@@ -0,0 +1,15 @@
+// +build !go1.5
+
+// Package tlsconfig provides primitives to retrieve secure-enough TLS configurations for both clients and servers.
+//
+package tlsconfig
+
+import (
+ "crypto/tls"
+)
+
+// Client TLS cipher suites (dropping CBC ciphers for client preferred suite set)
+var clientCipherSuites = []uint16{
+ tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
+ tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
+}
diff --git a/unum/vendor/github.com/docker/go-units/LICENSE b/unum/vendor/github.com/docker/go-units/LICENSE
new file mode 100644
index 0000000..b55b37b
--- /dev/null
+++ b/unum/vendor/github.com/docker/go-units/LICENSE
@@ -0,0 +1,191 @@
+
+ Apache License
+ Version 2.0, January 2004
+ https://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ Copyright 2015 Docker, Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ https://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/unum/vendor/github.com/docker/go-units/README.md b/unum/vendor/github.com/docker/go-units/README.md
new file mode 100644
index 0000000..4f70a4e
--- /dev/null
+++ b/unum/vendor/github.com/docker/go-units/README.md
@@ -0,0 +1,16 @@
+[![GoDoc](https://godoc.org/github.com/docker/go-units?status.svg)](https://godoc.org/github.com/docker/go-units)
+
+# Introduction
+
+go-units is a library to transform human friendly measurements into machine friendly values.
+
+## Usage
+
+See the [docs in godoc](https://godoc.org/github.com/docker/go-units) for examples and documentation.
+
+## Copyright and license
+
+Copyright © 2015 Docker, Inc.
+
+go-units is licensed under the Apache License, Version 2.0.
+See [LICENSE](LICENSE) for the full text of the license.
diff --git a/unum/vendor/github.com/docker/go-units/duration.go b/unum/vendor/github.com/docker/go-units/duration.go
new file mode 100644
index 0000000..ba02af2
--- /dev/null
+++ b/unum/vendor/github.com/docker/go-units/duration.go
@@ -0,0 +1,35 @@
+// Package units provides helper function to parse and print size and time units
+// in human-readable format.
+package units
+
+import (
+ "fmt"
+ "time"
+)
+
+// HumanDuration returns a human-readable approximation of a duration
+// (eg. "About a minute", "4 hours ago", etc.).
+func HumanDuration(d time.Duration) string {
+ if seconds := int(d.Seconds()); seconds < 1 {
+ return "Less than a second"
+ } else if seconds == 1 {
+ return "1 second"
+ } else if seconds < 60 {
+ return fmt.Sprintf("%d seconds", seconds)
+ } else if minutes := int(d.Minutes()); minutes == 1 {
+ return "About a minute"
+ } else if minutes < 46 {
+ return fmt.Sprintf("%d minutes", minutes)
+ } else if hours := int(d.Hours() + 0.5); hours == 1 {
+ return "About an hour"
+ } else if hours < 48 {
+ return fmt.Sprintf("%d hours", hours)
+ } else if hours < 24*7*2 {
+ return fmt.Sprintf("%d days", hours/24)
+ } else if hours < 24*30*2 {
+ return fmt.Sprintf("%d weeks", hours/24/7)
+ } else if hours < 24*365*2 {
+ return fmt.Sprintf("%d months", hours/24/30)
+ }
+ return fmt.Sprintf("%d years", int(d.Hours())/24/365)
+}
diff --git a/unum/vendor/github.com/docker/go-units/size.go b/unum/vendor/github.com/docker/go-units/size.go
new file mode 100644
index 0000000..44616c2
--- /dev/null
+++ b/unum/vendor/github.com/docker/go-units/size.go
@@ -0,0 +1,108 @@
+package units
+
+import (
+ "fmt"
+ "regexp"
+ "strconv"
+ "strings"
+)
+
+// See: http://en.wikipedia.org/wiki/Binary_prefix
+const (
+ // Decimal
+
+ KB = 1000
+ MB = 1000 * KB
+ GB = 1000 * MB
+ TB = 1000 * GB
+ PB = 1000 * TB
+
+ // Binary
+
+ KiB = 1024
+ MiB = 1024 * KiB
+ GiB = 1024 * MiB
+ TiB = 1024 * GiB
+ PiB = 1024 * TiB
+)
+
+type unitMap map[string]int64
+
+var (
+ decimalMap = unitMap{"k": KB, "m": MB, "g": GB, "t": TB, "p": PB}
+ binaryMap = unitMap{"k": KiB, "m": MiB, "g": GiB, "t": TiB, "p": PiB}
+ sizeRegex = regexp.MustCompile(`^(\d+(\.\d+)*) ?([kKmMgGtTpP])?[bB]?$`)
+)
+
+var decimapAbbrs = []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"}
+var binaryAbbrs = []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"}
+
+func getSizeAndUnit(size float64, base float64, _map []string) (float64, string) {
+ i := 0
+ unitsLimit := len(_map) - 1
+ for size >= base && i < unitsLimit {
+ size = size / base
+ i++
+ }
+ return size, _map[i]
+}
+
+// CustomSize returns a human-readable approximation of a size
+// using custom format.
+func CustomSize(format string, size float64, base float64, _map []string) string {
+ size, unit := getSizeAndUnit(size, base, _map)
+ return fmt.Sprintf(format, size, unit)
+}
+
+// HumanSizeWithPrecision allows the size to be in any precision,
+// instead of 4 digit precision used in units.HumanSize.
+func HumanSizeWithPrecision(size float64, precision int) string {
+ size, unit := getSizeAndUnit(size, 1000.0, decimapAbbrs)
+ return fmt.Sprintf("%.*g%s", precision, size, unit)
+}
+
+// HumanSize returns a human-readable approximation of a size
+// capped at 4 valid numbers (eg. "2.746 MB", "796 KB").
+func HumanSize(size float64) string {
+ return HumanSizeWithPrecision(size, 4)
+}
+
+// BytesSize returns a human-readable size in bytes, kibibytes,
+// mebibytes, gibibytes, or tebibytes (eg. "44kiB", "17MiB").
+func BytesSize(size float64) string {
+ return CustomSize("%.4g%s", size, 1024.0, binaryAbbrs)
+}
+
+// FromHumanSize returns an integer from a human-readable specification of a
+// size using SI standard (eg. "44kB", "17MB").
+func FromHumanSize(size string) (int64, error) {
+ return parseSize(size, decimalMap)
+}
+
+// RAMInBytes parses a human-readable string representing an amount of RAM
+// in bytes, kibibytes, mebibytes, gibibytes, or tebibytes and
+// returns the number of bytes, or -1 if the string is unparseable.
+// Units are case-insensitive, and the 'b' suffix is optional.
+func RAMInBytes(size string) (int64, error) {
+ return parseSize(size, binaryMap)
+}
+
+// Parses the human-readable size string into the amount it represents.
+func parseSize(sizeStr string, uMap unitMap) (int64, error) {
+ matches := sizeRegex.FindStringSubmatch(sizeStr)
+ if len(matches) != 4 {
+ return -1, fmt.Errorf("invalid size: '%s'", sizeStr)
+ }
+
+ size, err := strconv.ParseFloat(matches[1], 64)
+ if err != nil {
+ return -1, err
+ }
+
+ unitPrefix := strings.ToLower(matches[3])
+ if mul, ok := uMap[unitPrefix]; ok {
+ size *= float64(mul)
+ }
+
+ return int64(size), nil
+}
diff --git a/unum/vendor/github.com/docker/go-units/ulimit.go b/unum/vendor/github.com/docker/go-units/ulimit.go
new file mode 100644
index 0000000..5ac7fd8
--- /dev/null
+++ b/unum/vendor/github.com/docker/go-units/ulimit.go
@@ -0,0 +1,118 @@
+package units
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+// Ulimit is a human friendly version of Rlimit.
+type Ulimit struct {
+ Name string
+ Hard int64
+ Soft int64
+}
+
+// Rlimit specifies the resource limits, such as max open files.
+type Rlimit struct {
+ Type int `json:"type,omitempty"`
+ Hard uint64 `json:"hard,omitempty"`
+ Soft uint64 `json:"soft,omitempty"`
+}
+
+const (
+ // magic numbers for making the syscall
+ // some of these are defined in the syscall package, but not all.
+ // Also since Windows client doesn't get access to the syscall package, need to
+ // define these here
+ rlimitAs = 9
+ rlimitCore = 4
+ rlimitCPU = 0
+ rlimitData = 2
+ rlimitFsize = 1
+ rlimitLocks = 10
+ rlimitMemlock = 8
+ rlimitMsgqueue = 12
+ rlimitNice = 13
+ rlimitNofile = 7
+ rlimitNproc = 6
+ rlimitRss = 5
+ rlimitRtprio = 14
+ rlimitRttime = 15
+ rlimitSigpending = 11
+ rlimitStack = 3
+)
+
+var ulimitNameMapping = map[string]int{
+ //"as": rlimitAs, // Disabled since this doesn't seem usable with the way Docker inits a container.
+ "core": rlimitCore,
+ "cpu": rlimitCPU,
+ "data": rlimitData,
+ "fsize": rlimitFsize,
+ "locks": rlimitLocks,
+ "memlock": rlimitMemlock,
+ "msgqueue": rlimitMsgqueue,
+ "nice": rlimitNice,
+ "nofile": rlimitNofile,
+ "nproc": rlimitNproc,
+ "rss": rlimitRss,
+ "rtprio": rlimitRtprio,
+ "rttime": rlimitRttime,
+ "sigpending": rlimitSigpending,
+ "stack": rlimitStack,
+}
+
+// ParseUlimit parses and returns a Ulimit from the specified string.
+func ParseUlimit(val string) (*Ulimit, error) {
+ parts := strings.SplitN(val, "=", 2)
+ if len(parts) != 2 {
+ return nil, fmt.Errorf("invalid ulimit argument: %s", val)
+ }
+
+ if _, exists := ulimitNameMapping[parts[0]]; !exists {
+ return nil, fmt.Errorf("invalid ulimit type: %s", parts[0])
+ }
+
+ var (
+ soft int64
+ hard = &soft // default to soft in case no hard was set
+ temp int64
+ err error
+ )
+ switch limitVals := strings.Split(parts[1], ":"); len(limitVals) {
+ case 2:
+ temp, err = strconv.ParseInt(limitVals[1], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ hard = &temp
+ fallthrough
+ case 1:
+ soft, err = strconv.ParseInt(limitVals[0], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ default:
+ return nil, fmt.Errorf("too many limit value arguments - %s, can only have up to two, `soft[:hard]`", parts[1])
+ }
+
+ if soft > *hard {
+ return nil, fmt.Errorf("ulimit soft limit must be less than or equal to hard limit: %d > %d", soft, *hard)
+ }
+
+ return &Ulimit{Name: parts[0], Soft: soft, Hard: *hard}, nil
+}
+
+// GetRlimit returns the RLimit corresponding to Ulimit.
+func (u *Ulimit) GetRlimit() (*Rlimit, error) {
+ t, exists := ulimitNameMapping[u.Name]
+ if !exists {
+ return nil, fmt.Errorf("invalid ulimit name %s", u.Name)
+ }
+
+ return &Rlimit{Type: t, Soft: uint64(u.Soft), Hard: uint64(u.Hard)}, nil
+}
+
+func (u *Ulimit) String() string {
+ return fmt.Sprintf("%s=%d:%d", u.Name, u.Soft, u.Hard)
+}
diff --git a/unum/vendor/github.com/docker/libtrust/LICENSE b/unum/vendor/github.com/docker/libtrust/LICENSE
new file mode 100644
index 0000000..2744858
--- /dev/null
+++ b/unum/vendor/github.com/docker/libtrust/LICENSE
@@ -0,0 +1,191 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ Copyright 2014 Docker, Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/unum/vendor/github.com/docker/libtrust/README.md b/unum/vendor/github.com/docker/libtrust/README.md
new file mode 100644
index 0000000..8e7db38
--- /dev/null
+++ b/unum/vendor/github.com/docker/libtrust/README.md
@@ -0,0 +1,18 @@
+# libtrust
+
+Libtrust is library for managing authentication and authorization using public key cryptography.
+
+Authentication is handled using the identity attached to the public key.
+Libtrust provides multiple methods to prove possession of the private key associated with an identity.
+ - TLS x509 certificates
+ - Signature verification
+ - Key Challenge
+
+Authorization and access control is managed through a distributed trust graph.
+Trust servers are used as the authorities of the trust graph and allow caching portions of the graph for faster access.
+
+## Copyright and license
+
+Code and documentation copyright 2014 Docker, inc. Code released under the Apache 2.0 license.
+Docs released under Creative commons.
+
diff --git a/unum/vendor/github.com/docker/libtrust/certificates.go b/unum/vendor/github.com/docker/libtrust/certificates.go
new file mode 100644
index 0000000..3dcca33
--- /dev/null
+++ b/unum/vendor/github.com/docker/libtrust/certificates.go
@@ -0,0 +1,175 @@
+package libtrust
+
+import (
+ "crypto/rand"
+ "crypto/x509"
+ "crypto/x509/pkix"
+ "encoding/pem"
+ "fmt"
+ "io/ioutil"
+ "math/big"
+ "net"
+ "time"
+)
+
+type certTemplateInfo struct {
+ commonName string
+ domains []string
+ ipAddresses []net.IP
+ isCA bool
+ clientAuth bool
+ serverAuth bool
+}
+
+func generateCertTemplate(info *certTemplateInfo) *x509.Certificate {
+ // Generate a certificate template which is valid from the past week to
+ // 10 years from now. The usage of the certificate depends on the
+ // specified fields in the given certTempInfo object.
+ var (
+ keyUsage x509.KeyUsage
+ extKeyUsage []x509.ExtKeyUsage
+ )
+
+ if info.isCA {
+ keyUsage = x509.KeyUsageCertSign
+ }
+
+ if info.clientAuth {
+ extKeyUsage = append(extKeyUsage, x509.ExtKeyUsageClientAuth)
+ }
+
+ if info.serverAuth {
+ extKeyUsage = append(extKeyUsage, x509.ExtKeyUsageServerAuth)
+ }
+
+ return &x509.Certificate{
+ SerialNumber: big.NewInt(0),
+ Subject: pkix.Name{
+ CommonName: info.commonName,
+ },
+ NotBefore: time.Now().Add(-time.Hour * 24 * 7),
+ NotAfter: time.Now().Add(time.Hour * 24 * 365 * 10),
+ DNSNames: info.domains,
+ IPAddresses: info.ipAddresses,
+ IsCA: info.isCA,
+ KeyUsage: keyUsage,
+ ExtKeyUsage: extKeyUsage,
+ BasicConstraintsValid: info.isCA,
+ }
+}
+
+func generateCert(pub PublicKey, priv PrivateKey, subInfo, issInfo *certTemplateInfo) (cert *x509.Certificate, err error) {
+ pubCertTemplate := generateCertTemplate(subInfo)
+ privCertTemplate := generateCertTemplate(issInfo)
+
+ certDER, err := x509.CreateCertificate(
+ rand.Reader, pubCertTemplate, privCertTemplate,
+ pub.CryptoPublicKey(), priv.CryptoPrivateKey(),
+ )
+ if err != nil {
+ return nil, fmt.Errorf("failed to create certificate: %s", err)
+ }
+
+ cert, err = x509.ParseCertificate(certDER)
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse certificate: %s", err)
+ }
+
+ return
+}
+
+// GenerateSelfSignedServerCert creates a self-signed certificate for the
+// given key which is to be used for TLS servers with the given domains and
+// IP addresses.
+func GenerateSelfSignedServerCert(key PrivateKey, domains []string, ipAddresses []net.IP) (*x509.Certificate, error) {
+ info := &certTemplateInfo{
+ commonName: key.KeyID(),
+ domains: domains,
+ ipAddresses: ipAddresses,
+ serverAuth: true,
+ }
+
+ return generateCert(key.PublicKey(), key, info, info)
+}
+
+// GenerateSelfSignedClientCert creates a self-signed certificate for the
+// given key which is to be used for TLS clients.
+func GenerateSelfSignedClientCert(key PrivateKey) (*x509.Certificate, error) {
+ info := &certTemplateInfo{
+ commonName: key.KeyID(),
+ clientAuth: true,
+ }
+
+ return generateCert(key.PublicKey(), key, info, info)
+}
+
+// GenerateCACert creates a certificate which can be used as a trusted
+// certificate authority.
+func GenerateCACert(signer PrivateKey, trustedKey PublicKey) (*x509.Certificate, error) {
+ subjectInfo := &certTemplateInfo{
+ commonName: trustedKey.KeyID(),
+ isCA: true,
+ }
+ issuerInfo := &certTemplateInfo{
+ commonName: signer.KeyID(),
+ }
+
+ return generateCert(trustedKey, signer, subjectInfo, issuerInfo)
+}
+
+// GenerateCACertPool creates a certificate authority pool to be used for a
+// TLS configuration. Any self-signed certificates issued by the specified
+// trusted keys will be verified during a TLS handshake
+func GenerateCACertPool(signer PrivateKey, trustedKeys []PublicKey) (*x509.CertPool, error) {
+ certPool := x509.NewCertPool()
+
+ for _, trustedKey := range trustedKeys {
+ cert, err := GenerateCACert(signer, trustedKey)
+ if err != nil {
+ return nil, fmt.Errorf("failed to generate CA certificate: %s", err)
+ }
+
+ certPool.AddCert(cert)
+ }
+
+ return certPool, nil
+}
+
+// LoadCertificateBundle loads certificates from the given file. The file should be pem encoded
+// containing one or more certificates. The expected pem type is "CERTIFICATE".
+func LoadCertificateBundle(filename string) ([]*x509.Certificate, error) {
+ b, err := ioutil.ReadFile(filename)
+ if err != nil {
+ return nil, err
+ }
+ certificates := []*x509.Certificate{}
+ var block *pem.Block
+ block, b = pem.Decode(b)
+ for ; block != nil; block, b = pem.Decode(b) {
+ if block.Type == "CERTIFICATE" {
+ cert, err := x509.ParseCertificate(block.Bytes)
+ if err != nil {
+ return nil, err
+ }
+ certificates = append(certificates, cert)
+ } else {
+ return nil, fmt.Errorf("invalid pem block type: %s", block.Type)
+ }
+ }
+
+ return certificates, nil
+}
+
+// LoadCertificatePool loads a CA pool from the given file. The file should be pem encoded
+// containing one or more certificates. The expected pem type is "CERTIFICATE".
+func LoadCertificatePool(filename string) (*x509.CertPool, error) {
+ certs, err := LoadCertificateBundle(filename)
+ if err != nil {
+ return nil, err
+ }
+ pool := x509.NewCertPool()
+ for _, cert := range certs {
+ pool.AddCert(cert)
+ }
+ return pool, nil
+}
diff --git a/unum/vendor/github.com/docker/libtrust/doc.go b/unum/vendor/github.com/docker/libtrust/doc.go
new file mode 100644
index 0000000..ec5d215
--- /dev/null
+++ b/unum/vendor/github.com/docker/libtrust/doc.go
@@ -0,0 +1,9 @@
+/*
+Package libtrust provides an interface for managing authentication and
+authorization using public key cryptography. Authentication is handled
+using the identity attached to the public key and verified through TLS
+x509 certificates, a key challenge, or signature. Authorization and
+access control is managed through a trust graph distributed between
+both remote trust servers and locally cached and managed data.
+*/
+package libtrust
diff --git a/unum/vendor/github.com/docker/libtrust/ec_key.go b/unum/vendor/github.com/docker/libtrust/ec_key.go
new file mode 100644
index 0000000..00bbe4b
--- /dev/null
+++ b/unum/vendor/github.com/docker/libtrust/ec_key.go
@@ -0,0 +1,428 @@
+package libtrust
+
+import (
+ "crypto"
+ "crypto/ecdsa"
+ "crypto/elliptic"
+ "crypto/rand"
+ "crypto/x509"
+ "encoding/json"
+ "encoding/pem"
+ "errors"
+ "fmt"
+ "io"
+ "math/big"
+)
+
+/*
+ * EC DSA PUBLIC KEY
+ */
+
+// ecPublicKey implements a libtrust.PublicKey using elliptic curve digital
+// signature algorithms.
+type ecPublicKey struct {
+ *ecdsa.PublicKey
+ curveName string
+ signatureAlgorithm *signatureAlgorithm
+ extended map[string]interface{}
+}
+
+func fromECPublicKey(cryptoPublicKey *ecdsa.PublicKey) (*ecPublicKey, error) {
+ curve := cryptoPublicKey.Curve
+
+ switch {
+ case curve == elliptic.P256():
+ return &ecPublicKey{cryptoPublicKey, "P-256", es256, map[string]interface{}{}}, nil
+ case curve == elliptic.P384():
+ return &ecPublicKey{cryptoPublicKey, "P-384", es384, map[string]interface{}{}}, nil
+ case curve == elliptic.P521():
+ return &ecPublicKey{cryptoPublicKey, "P-521", es512, map[string]interface{}{}}, nil
+ default:
+ return nil, errors.New("unsupported elliptic curve")
+ }
+}
+
+// KeyType returns the key type for elliptic curve keys, i.e., "EC".
+func (k *ecPublicKey) KeyType() string {
+ return "EC"
+}
+
+// CurveName returns the elliptic curve identifier.
+// Possible values are "P-256", "P-384", and "P-521".
+func (k *ecPublicKey) CurveName() string {
+ return k.curveName
+}
+
+// KeyID returns a distinct identifier which is unique to this Public Key.
+func (k *ecPublicKey) KeyID() string {
+ return keyIDFromCryptoKey(k)
+}
+
+func (k *ecPublicKey) String() string {
+ return fmt.Sprintf("EC Public Key <%s>", k.KeyID())
+}
+
+// Verify verifyies the signature of the data in the io.Reader using this
+// PublicKey. The alg parameter should identify the digital signature
+// algorithm which was used to produce the signature and should be supported
+// by this public key. Returns a nil error if the signature is valid.
+func (k *ecPublicKey) Verify(data io.Reader, alg string, signature []byte) error {
+ // For EC keys there is only one supported signature algorithm depending
+ // on the curve parameters.
+ if k.signatureAlgorithm.HeaderParam() != alg {
+ return fmt.Errorf("unable to verify signature: EC Public Key with curve %q does not support signature algorithm %q", k.curveName, alg)
+ }
+
+ // signature is the concatenation of (r, s), base64Url encoded.
+ sigLength := len(signature)
+ expectedOctetLength := 2 * ((k.Params().BitSize + 7) >> 3)
+ if sigLength != expectedOctetLength {
+ return fmt.Errorf("signature length is %d octets long, should be %d", sigLength, expectedOctetLength)
+ }
+
+ rBytes, sBytes := signature[:sigLength/2], signature[sigLength/2:]
+ r := new(big.Int).SetBytes(rBytes)
+ s := new(big.Int).SetBytes(sBytes)
+
+ hasher := k.signatureAlgorithm.HashID().New()
+ _, err := io.Copy(hasher, data)
+ if err != nil {
+ return fmt.Errorf("error reading data to sign: %s", err)
+ }
+ hash := hasher.Sum(nil)
+
+ if !ecdsa.Verify(k.PublicKey, hash, r, s) {
+ return errors.New("invalid signature")
+ }
+
+ return nil
+}
+
+// CryptoPublicKey returns the internal object which can be used as a
+// crypto.PublicKey for use with other standard library operations. The type
+// is either *rsa.PublicKey or *ecdsa.PublicKey
+func (k *ecPublicKey) CryptoPublicKey() crypto.PublicKey {
+ return k.PublicKey
+}
+
+func (k *ecPublicKey) toMap() map[string]interface{} {
+ jwk := make(map[string]interface{})
+ for k, v := range k.extended {
+ jwk[k] = v
+ }
+ jwk["kty"] = k.KeyType()
+ jwk["kid"] = k.KeyID()
+ jwk["crv"] = k.CurveName()
+
+ xBytes := k.X.Bytes()
+ yBytes := k.Y.Bytes()
+ octetLength := (k.Params().BitSize + 7) >> 3
+ // MUST include leading zeros in the output so that x, y are each
+ // *octetLength* bytes long.
+ xBuf := make([]byte, octetLength-len(xBytes), octetLength)
+ yBuf := make([]byte, octetLength-len(yBytes), octetLength)
+ xBuf = append(xBuf, xBytes...)
+ yBuf = append(yBuf, yBytes...)
+
+ jwk["x"] = joseBase64UrlEncode(xBuf)
+ jwk["y"] = joseBase64UrlEncode(yBuf)
+
+ return jwk
+}
+
+// MarshalJSON serializes this Public Key using the JWK JSON serialization format for
+// elliptic curve keys.
+func (k *ecPublicKey) MarshalJSON() (data []byte, err error) {
+ return json.Marshal(k.toMap())
+}
+
+// PEMBlock serializes this Public Key to DER-encoded PKIX format.
+func (k *ecPublicKey) PEMBlock() (*pem.Block, error) {
+ derBytes, err := x509.MarshalPKIXPublicKey(k.PublicKey)
+ if err != nil {
+ return nil, fmt.Errorf("unable to serialize EC PublicKey to DER-encoded PKIX format: %s", err)
+ }
+ k.extended["kid"] = k.KeyID() // For display purposes.
+ return createPemBlock("PUBLIC KEY", derBytes, k.extended)
+}
+
+func (k *ecPublicKey) AddExtendedField(field string, value interface{}) {
+ k.extended[field] = value
+}
+
+func (k *ecPublicKey) GetExtendedField(field string) interface{} {
+ v, ok := k.extended[field]
+ if !ok {
+ return nil
+ }
+ return v
+}
+
+func ecPublicKeyFromMap(jwk map[string]interface{}) (*ecPublicKey, error) {
+ // JWK key type (kty) has already been determined to be "EC".
+ // Need to extract 'crv', 'x', 'y', and 'kid' and check for
+ // consistency.
+
+ // Get the curve identifier value.
+ crv, err := stringFromMap(jwk, "crv")
+ if err != nil {
+ return nil, fmt.Errorf("JWK EC Public Key curve identifier: %s", err)
+ }
+
+ var (
+ curve elliptic.Curve
+ sigAlg *signatureAlgorithm
+ )
+
+ switch {
+ case crv == "P-256":
+ curve = elliptic.P256()
+ sigAlg = es256
+ case crv == "P-384":
+ curve = elliptic.P384()
+ sigAlg = es384
+ case crv == "P-521":
+ curve = elliptic.P521()
+ sigAlg = es512
+ default:
+ return nil, fmt.Errorf("JWK EC Public Key curve identifier not supported: %q\n", crv)
+ }
+
+ // Get the X and Y coordinates for the public key point.
+ xB64Url, err := stringFromMap(jwk, "x")
+ if err != nil {
+ return nil, fmt.Errorf("JWK EC Public Key x-coordinate: %s", err)
+ }
+ x, err := parseECCoordinate(xB64Url, curve)
+ if err != nil {
+ return nil, fmt.Errorf("JWK EC Public Key x-coordinate: %s", err)
+ }
+
+ yB64Url, err := stringFromMap(jwk, "y")
+ if err != nil {
+ return nil, fmt.Errorf("JWK EC Public Key y-coordinate: %s", err)
+ }
+ y, err := parseECCoordinate(yB64Url, curve)
+ if err != nil {
+ return nil, fmt.Errorf("JWK EC Public Key y-coordinate: %s", err)
+ }
+
+ key := &ecPublicKey{
+ PublicKey: &ecdsa.PublicKey{Curve: curve, X: x, Y: y},
+ curveName: crv, signatureAlgorithm: sigAlg,
+ }
+
+ // Key ID is optional too, but if it exists, it should match the key.
+ _, ok := jwk["kid"]
+ if ok {
+ kid, err := stringFromMap(jwk, "kid")
+ if err != nil {
+ return nil, fmt.Errorf("JWK EC Public Key ID: %s", err)
+ }
+ if kid != key.KeyID() {
+ return nil, fmt.Errorf("JWK EC Public Key ID does not match: %s", kid)
+ }
+ }
+
+ key.extended = jwk
+
+ return key, nil
+}
+
+/*
+ * EC DSA PRIVATE KEY
+ */
+
+// ecPrivateKey implements a JWK Private Key using elliptic curve digital signature
+// algorithms.
+type ecPrivateKey struct {
+ ecPublicKey
+ *ecdsa.PrivateKey
+}
+
+func fromECPrivateKey(cryptoPrivateKey *ecdsa.PrivateKey) (*ecPrivateKey, error) {
+ publicKey, err := fromECPublicKey(&cryptoPrivateKey.PublicKey)
+ if err != nil {
+ return nil, err
+ }
+
+ return &ecPrivateKey{*publicKey, cryptoPrivateKey}, nil
+}
+
+// PublicKey returns the Public Key data associated with this Private Key.
+func (k *ecPrivateKey) PublicKey() PublicKey {
+ return &k.ecPublicKey
+}
+
+func (k *ecPrivateKey) String() string {
+ return fmt.Sprintf("EC Private Key <%s>", k.KeyID())
+}
+
+// Sign signs the data read from the io.Reader using a signature algorithm supported
+// by the elliptic curve private key. If the specified hashing algorithm is
+// supported by this key, that hash function is used to generate the signature
+// otherwise the the default hashing algorithm for this key is used. Returns
+// the signature and the name of the JWK signature algorithm used, e.g.,
+// "ES256", "ES384", "ES512".
+func (k *ecPrivateKey) Sign(data io.Reader, hashID crypto.Hash) (signature []byte, alg string, err error) {
+ // Generate a signature of the data using the internal alg.
+ // The given hashId is only a suggestion, and since EC keys only support
+ // on signature/hash algorithm given the curve name, we disregard it for
+ // the elliptic curve JWK signature implementation.
+ hasher := k.signatureAlgorithm.HashID().New()
+ _, err = io.Copy(hasher, data)
+ if err != nil {
+ return nil, "", fmt.Errorf("error reading data to sign: %s", err)
+ }
+ hash := hasher.Sum(nil)
+
+ r, s, err := ecdsa.Sign(rand.Reader, k.PrivateKey, hash)
+ if err != nil {
+ return nil, "", fmt.Errorf("error producing signature: %s", err)
+ }
+ rBytes, sBytes := r.Bytes(), s.Bytes()
+ octetLength := (k.ecPublicKey.Params().BitSize + 7) >> 3
+ // MUST include leading zeros in the output
+ rBuf := make([]byte, octetLength-len(rBytes), octetLength)
+ sBuf := make([]byte, octetLength-len(sBytes), octetLength)
+
+ rBuf = append(rBuf, rBytes...)
+ sBuf = append(sBuf, sBytes...)
+
+ signature = append(rBuf, sBuf...)
+ alg = k.signatureAlgorithm.HeaderParam()
+
+ return
+}
+
+// CryptoPrivateKey returns the internal object which can be used as a
+// crypto.PublicKey for use with other standard library operations. The type
+// is either *rsa.PublicKey or *ecdsa.PublicKey
+func (k *ecPrivateKey) CryptoPrivateKey() crypto.PrivateKey {
+ return k.PrivateKey
+}
+
+func (k *ecPrivateKey) toMap() map[string]interface{} {
+ jwk := k.ecPublicKey.toMap()
+
+ dBytes := k.D.Bytes()
+ // The length of this octet string MUST be ceiling(log-base-2(n)/8)
+ // octets (where n is the order of the curve). This is because the private
+ // key d must be in the interval [1, n-1] so the bitlength of d should be
+ // no larger than the bitlength of n-1. The easiest way to find the octet
+ // length is to take bitlength(n-1), add 7 to force a carry, and shift this
+ // bit sequence right by 3, which is essentially dividing by 8 and adding
+ // 1 if there is any remainder. Thus, the private key value d should be
+ // output to (bitlength(n-1)+7)>>3 octets.
+ n := k.ecPublicKey.Params().N
+ octetLength := (new(big.Int).Sub(n, big.NewInt(1)).BitLen() + 7) >> 3
+ // Create a buffer with the necessary zero-padding.
+ dBuf := make([]byte, octetLength-len(dBytes), octetLength)
+ dBuf = append(dBuf, dBytes...)
+
+ jwk["d"] = joseBase64UrlEncode(dBuf)
+
+ return jwk
+}
+
+// MarshalJSON serializes this Private Key using the JWK JSON serialization format for
+// elliptic curve keys.
+func (k *ecPrivateKey) MarshalJSON() (data []byte, err error) {
+ return json.Marshal(k.toMap())
+}
+
+// PEMBlock serializes this Private Key to DER-encoded PKIX format.
+func (k *ecPrivateKey) PEMBlock() (*pem.Block, error) {
+ derBytes, err := x509.MarshalECPrivateKey(k.PrivateKey)
+ if err != nil {
+ return nil, fmt.Errorf("unable to serialize EC PrivateKey to DER-encoded PKIX format: %s", err)
+ }
+ k.extended["keyID"] = k.KeyID() // For display purposes.
+ return createPemBlock("EC PRIVATE KEY", derBytes, k.extended)
+}
+
+func ecPrivateKeyFromMap(jwk map[string]interface{}) (*ecPrivateKey, error) {
+ dB64Url, err := stringFromMap(jwk, "d")
+ if err != nil {
+ return nil, fmt.Errorf("JWK EC Private Key: %s", err)
+ }
+
+ // JWK key type (kty) has already been determined to be "EC".
+ // Need to extract the public key information, then extract the private
+ // key value 'd'.
+ publicKey, err := ecPublicKeyFromMap(jwk)
+ if err != nil {
+ return nil, err
+ }
+
+ d, err := parseECPrivateParam(dB64Url, publicKey.Curve)
+ if err != nil {
+ return nil, fmt.Errorf("JWK EC Private Key d-param: %s", err)
+ }
+
+ key := &ecPrivateKey{
+ ecPublicKey: *publicKey,
+ PrivateKey: &ecdsa.PrivateKey{
+ PublicKey: *publicKey.PublicKey,
+ D: d,
+ },
+ }
+
+ return key, nil
+}
+
+/*
+ * Key Generation Functions.
+ */
+
+func generateECPrivateKey(curve elliptic.Curve) (k *ecPrivateKey, err error) {
+ k = new(ecPrivateKey)
+ k.PrivateKey, err = ecdsa.GenerateKey(curve, rand.Reader)
+ if err != nil {
+ return nil, err
+ }
+
+ k.ecPublicKey.PublicKey = &k.PrivateKey.PublicKey
+ k.extended = make(map[string]interface{})
+
+ return
+}
+
+// GenerateECP256PrivateKey generates a key pair using elliptic curve P-256.
+func GenerateECP256PrivateKey() (PrivateKey, error) {
+ k, err := generateECPrivateKey(elliptic.P256())
+ if err != nil {
+ return nil, fmt.Errorf("error generating EC P-256 key: %s", err)
+ }
+
+ k.curveName = "P-256"
+ k.signatureAlgorithm = es256
+
+ return k, nil
+}
+
+// GenerateECP384PrivateKey generates a key pair using elliptic curve P-384.
+func GenerateECP384PrivateKey() (PrivateKey, error) {
+ k, err := generateECPrivateKey(elliptic.P384())
+ if err != nil {
+ return nil, fmt.Errorf("error generating EC P-384 key: %s", err)
+ }
+
+ k.curveName = "P-384"
+ k.signatureAlgorithm = es384
+
+ return k, nil
+}
+
+// GenerateECP521PrivateKey generates aß key pair using elliptic curve P-521.
+func GenerateECP521PrivateKey() (PrivateKey, error) {
+ k, err := generateECPrivateKey(elliptic.P521())
+ if err != nil {
+ return nil, fmt.Errorf("error generating EC P-521 key: %s", err)
+ }
+
+ k.curveName = "P-521"
+ k.signatureAlgorithm = es512
+
+ return k, nil
+}
diff --git a/unum/vendor/github.com/docker/libtrust/filter.go b/unum/vendor/github.com/docker/libtrust/filter.go
new file mode 100644
index 0000000..5b2b4fc
--- /dev/null
+++ b/unum/vendor/github.com/docker/libtrust/filter.go
@@ -0,0 +1,50 @@
+package libtrust
+
+import (
+ "path/filepath"
+)
+
+// FilterByHosts filters the list of PublicKeys to only those which contain a
+// 'hosts' pattern which matches the given host. If *includeEmpty* is true,
+// then keys which do not specify any hosts are also returned.
+func FilterByHosts(keys []PublicKey, host string, includeEmpty bool) ([]PublicKey, error) {
+ filtered := make([]PublicKey, 0, len(keys))
+
+ for _, pubKey := range keys {
+ var hosts []string
+ switch v := pubKey.GetExtendedField("hosts").(type) {
+ case []string:
+ hosts = v
+ case []interface{}:
+ for _, value := range v {
+ h, ok := value.(string)
+ if !ok {
+ continue
+ }
+ hosts = append(hosts, h)
+ }
+ }
+
+ if len(hosts) == 0 {
+ if includeEmpty {
+ filtered = append(filtered, pubKey)
+ }
+ continue
+ }
+
+ // Check if any hosts match pattern
+ for _, hostPattern := range hosts {
+ match, err := filepath.Match(hostPattern, host)
+ if err != nil {
+ return nil, err
+ }
+
+ if match {
+ filtered = append(filtered, pubKey)
+ continue
+ }
+ }
+ }
+
+ return filtered, nil
+}
diff --git a/unum/vendor/github.com/docker/libtrust/hash.go b/unum/vendor/github.com/docker/libtrust/hash.go
new file mode 100644
index 0000000..a2df787
--- /dev/null
+++ b/unum/vendor/github.com/docker/libtrust/hash.go
@@ -0,0 +1,56 @@
+package libtrust
+
+import (
+ "crypto"
+ _ "crypto/sha256" // Registrer SHA224 and SHA256
+ _ "crypto/sha512" // Registrer SHA384 and SHA512
+ "fmt"
+)
+
+type signatureAlgorithm struct {
+ algHeaderParam string
+ hashID crypto.Hash
+}
+
+func (h *signatureAlgorithm) HeaderParam() string {
+ return h.algHeaderParam
+}
+
+func (h *signatureAlgorithm) HashID() crypto.Hash {
+ return h.hashID
+}
+
+var (
+ rs256 = &signatureAlgorithm{"RS256", crypto.SHA256}
+ rs384 = &signatureAlgorithm{"RS384", crypto.SHA384}
+ rs512 = &signatureAlgorithm{"RS512", crypto.SHA512}
+ es256 = &signatureAlgorithm{"ES256", crypto.SHA256}
+ es384 = &signatureAlgorithm{"ES384", crypto.SHA384}
+ es512 = &signatureAlgorithm{"ES512", crypto.SHA512}
+)
+
+func rsaSignatureAlgorithmByName(alg string) (*signatureAlgorithm, error) {
+ switch {
+ case alg == "RS256":
+ return rs256, nil
+ case alg == "RS384":
+ return rs384, nil
+ case alg == "RS512":
+ return rs512, nil
+ default:
+ return nil, fmt.Errorf("RSA Digital Signature Algorithm %q not supported", alg)
+ }
+}
+
+func rsaPKCS1v15SignatureAlgorithmForHashID(hashID crypto.Hash) *signatureAlgorithm {
+ switch {
+ case hashID == crypto.SHA512:
+ return rs512
+ case hashID == crypto.SHA384:
+ return rs384
+ case hashID == crypto.SHA256:
+ fallthrough
+ default:
+ return rs256
+ }
+}
diff --git a/unum/vendor/github.com/docker/libtrust/jsonsign.go b/unum/vendor/github.com/docker/libtrust/jsonsign.go
new file mode 100644
index 0000000..cb2ca9a
--- /dev/null
+++ b/unum/vendor/github.com/docker/libtrust/jsonsign.go
@@ -0,0 +1,657 @@
+package libtrust
+
+import (
+ "bytes"
+ "crypto"
+ "crypto/x509"
+ "encoding/base64"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "sort"
+ "time"
+ "unicode"
+)
+
+var (
+ // ErrInvalidSignContent is used when the content to be signed is invalid.
+ ErrInvalidSignContent = errors.New("invalid sign content")
+
+ // ErrInvalidJSONContent is used when invalid json is encountered.
+ ErrInvalidJSONContent = errors.New("invalid json content")
+
+ // ErrMissingSignatureKey is used when the specified signature key
+ // does not exist in the JSON content.
+ ErrMissingSignatureKey = errors.New("missing signature key")
+)
+
+type jsHeader struct {
+ JWK PublicKey `json:"jwk,omitempty"`
+ Algorithm string `json:"alg"`
+ Chain []string `json:"x5c,omitempty"`
+}
+
+type jsSignature struct {
+ Header jsHeader `json:"header"`
+ Signature string `json:"signature"`
+ Protected string `json:"protected,omitempty"`
+}
+
+type jsSignaturesSorted []jsSignature
+
+func (jsbkid jsSignaturesSorted) Swap(i, j int) { jsbkid[i], jsbkid[j] = jsbkid[j], jsbkid[i] }
+func (jsbkid jsSignaturesSorted) Len() int { return len(jsbkid) }
+
+func (jsbkid jsSignaturesSorted) Less(i, j int) bool {
+ ki, kj := jsbkid[i].Header.JWK.KeyID(), jsbkid[j].Header.JWK.KeyID()
+ si, sj := jsbkid[i].Signature, jsbkid[j].Signature
+
+ if ki == kj {
+ return si < sj
+ }
+
+ return ki < kj
+}
+
+type signKey struct {
+ PrivateKey
+ Chain []*x509.Certificate
+}
+
+// JSONSignature represents a signature of a json object.
+type JSONSignature struct {
+ payload string
+ signatures []jsSignature
+ indent string
+ formatLength int
+ formatTail []byte
+}
+
+func newJSONSignature() *JSONSignature {
+ return &JSONSignature{
+ signatures: make([]jsSignature, 0, 1),
+ }
+}
+
+// Payload returns the encoded payload of the signature. This
+// payload should not be signed directly
+func (js *JSONSignature) Payload() ([]byte, error) {
+ return joseBase64UrlDecode(js.payload)
+}
+
+func (js *JSONSignature) protectedHeader() (string, error) {
+ protected := map[string]interface{}{
+ "formatLength": js.formatLength,
+ "formatTail": joseBase64UrlEncode(js.formatTail),
+ "time": time.Now().UTC().Format(time.RFC3339),
+ }
+ protectedBytes, err := json.Marshal(protected)
+ if err != nil {
+ return "", err
+ }
+
+ return joseBase64UrlEncode(protectedBytes), nil
+}
+
+func (js *JSONSignature) signBytes(protectedHeader string) ([]byte, error) {
+ buf := make([]byte, len(js.payload)+len(protectedHeader)+1)
+ copy(buf, protectedHeader)
+ buf[len(protectedHeader)] = '.'
+ copy(buf[len(protectedHeader)+1:], js.payload)
+ return buf, nil
+}
+
+// Sign adds a signature using the given private key.
+func (js *JSONSignature) Sign(key PrivateKey) error {
+ protected, err := js.protectedHeader()
+ if err != nil {
+ return err
+ }
+ signBytes, err := js.signBytes(protected)
+ if err != nil {
+ return err
+ }
+ sigBytes, algorithm, err := key.Sign(bytes.NewReader(signBytes), crypto.SHA256)
+ if err != nil {
+ return err
+ }
+
+ js.signatures = append(js.signatures, jsSignature{
+ Header: jsHeader{
+ JWK: key.PublicKey(),
+ Algorithm: algorithm,
+ },
+ Signature: joseBase64UrlEncode(sigBytes),
+ Protected: protected,
+ })
+
+ return nil
+}
+
+// SignWithChain adds a signature using the given private key
+// and setting the x509 chain. The public key of the first element
+// in the chain must be the public key corresponding with the sign key.
+func (js *JSONSignature) SignWithChain(key PrivateKey, chain []*x509.Certificate) error {
+ // Ensure key.Chain[0] is public key for key
+ //key.Chain.PublicKey
+ //key.PublicKey().CryptoPublicKey()
+
+ // Verify chain
+ protected, err := js.protectedHeader()
+ if err != nil {
+ return err
+ }
+ signBytes, err := js.signBytes(protected)
+ if err != nil {
+ return err
+ }
+ sigBytes, algorithm, err := key.Sign(bytes.NewReader(signBytes), crypto.SHA256)
+ if err != nil {
+ return err
+ }
+
+ header := jsHeader{
+ Chain: make([]string, len(chain)),
+ Algorithm: algorithm,
+ }
+
+ for i, cert := range chain {
+ header.Chain[i] = base64.StdEncoding.EncodeToString(cert.Raw)
+ }
+
+ js.signatures = append(js.signatures, jsSignature{
+ Header: header,
+ Signature: joseBase64UrlEncode(sigBytes),
+ Protected: protected,
+ })
+
+ return nil
+}
+
+// Verify verifies all the signatures and returns the list of
+// public keys used to sign. Any x509 chains are not checked.
+func (js *JSONSignature) Verify() ([]PublicKey, error) {
+ keys := make([]PublicKey, len(js.signatures))
+ for i, signature := range js.signatures {
+ signBytes, err := js.signBytes(signature.Protected)
+ if err != nil {
+ return nil, err
+ }
+ var publicKey PublicKey
+ if len(signature.Header.Chain) > 0 {
+ certBytes, err := base64.StdEncoding.DecodeString(signature.Header.Chain[0])
+ if err != nil {
+ return nil, err
+ }
+ cert, err := x509.ParseCertificate(certBytes)
+ if err != nil {
+ return nil, err
+ }
+ publicKey, err = FromCryptoPublicKey(cert.PublicKey)
+ if err != nil {
+ return nil, err
+ }
+ } else if signature.Header.JWK != nil {
+ publicKey = signature.Header.JWK
+ } else {
+ return nil, errors.New("missing public key")
+ }
+
+ sigBytes, err := joseBase64UrlDecode(signature.Signature)
+ if err != nil {
+ return nil, err
+ }
+
+ err = publicKey.Verify(bytes.NewReader(signBytes), signature.Header.Algorithm, sigBytes)
+ if err != nil {
+ return nil, err
+ }
+
+ keys[i] = publicKey
+ }
+ return keys, nil
+}
+
+// VerifyChains verifies all the signatures and the chains associated
+// with each signature and returns the list of verified chains.
+// Signatures without an x509 chain are not checked.
+func (js *JSONSignature) VerifyChains(ca *x509.CertPool) ([][]*x509.Certificate, error) {
+ chains := make([][]*x509.Certificate, 0, len(js.signatures))
+ for _, signature := range js.signatures {
+ signBytes, err := js.signBytes(signature.Protected)
+ if err != nil {
+ return nil, err
+ }
+ var publicKey PublicKey
+ if len(signature.Header.Chain) > 0 {
+ certBytes, err := base64.StdEncoding.DecodeString(signature.Header.Chain[0])
+ if err != nil {
+ return nil, err
+ }
+ cert, err := x509.ParseCertificate(certBytes)
+ if err != nil {
+ return nil, err
+ }
+ publicKey, err = FromCryptoPublicKey(cert.PublicKey)
+ if err != nil {
+ return nil, err
+ }
+ intermediates := x509.NewCertPool()
+ if len(signature.Header.Chain) > 1 {
+ intermediateChain := signature.Header.Chain[1:]
+ for i := range intermediateChain {
+ certBytes, err := base64.StdEncoding.DecodeString(intermediateChain[i])
+ if err != nil {
+ return nil, err
+ }
+ intermediate, err := x509.ParseCertificate(certBytes)
+ if err != nil {
+ return nil, err
+ }
+ intermediates.AddCert(intermediate)
+ }
+ }
+
+ verifyOptions := x509.VerifyOptions{
+ Intermediates: intermediates,
+ Roots: ca,
+ }
+
+ verifiedChains, err := cert.Verify(verifyOptions)
+ if err != nil {
+ return nil, err
+ }
+ chains = append(chains, verifiedChains...)
+
+ sigBytes, err := joseBase64UrlDecode(signature.Signature)
+ if err != nil {
+ return nil, err
+ }
+
+ err = publicKey.Verify(bytes.NewReader(signBytes), signature.Header.Algorithm, sigBytes)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ }
+ return chains, nil
+}
+
+// JWS returns JSON serialized JWS according to
+// http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-7.2
+func (js *JSONSignature) JWS() ([]byte, error) {
+ if len(js.signatures) == 0 {
+ return nil, errors.New("missing signature")
+ }
+
+ sort.Sort(jsSignaturesSorted(js.signatures))
+
+ jsonMap := map[string]interface{}{
+ "payload": js.payload,
+ "signatures": js.signatures,
+ }
+
+ return json.MarshalIndent(jsonMap, "", " ")
+}
+
+func notSpace(r rune) bool {
+ return !unicode.IsSpace(r)
+}
+
+func detectJSONIndent(jsonContent []byte) (indent string) {
+ if len(jsonContent) > 2 && jsonContent[0] == '{' && jsonContent[1] == '\n' {
+ quoteIndex := bytes.IndexRune(jsonContent[1:], '"')
+ if quoteIndex > 0 {
+ indent = string(jsonContent[2 : quoteIndex+1])
+ }
+ }
+ return
+}
+
+type jsParsedHeader struct {
+ JWK json.RawMessage `json:"jwk"`
+ Algorithm string `json:"alg"`
+ Chain []string `json:"x5c"`
+}
+
+type jsParsedSignature struct {
+ Header jsParsedHeader `json:"header"`
+ Signature string `json:"signature"`
+ Protected string `json:"protected"`
+}
+
+// ParseJWS parses a JWS serialized JSON object into a Json Signature.
+func ParseJWS(content []byte) (*JSONSignature, error) {
+ type jsParsed struct {
+ Payload string `json:"payload"`
+ Signatures []jsParsedSignature `json:"signatures"`
+ }
+ parsed := &jsParsed{}
+ err := json.Unmarshal(content, parsed)
+ if err != nil {
+ return nil, err
+ }
+ if len(parsed.Signatures) == 0 {
+ return nil, errors.New("missing signatures")
+ }
+ payload, err := joseBase64UrlDecode(parsed.Payload)
+ if err != nil {
+ return nil, err
+ }
+
+ js, err := NewJSONSignature(payload)
+ if err != nil {
+ return nil, err
+ }
+ js.signatures = make([]jsSignature, len(parsed.Signatures))
+ for i, signature := range parsed.Signatures {
+ header := jsHeader{
+ Algorithm: signature.Header.Algorithm,
+ }
+ if signature.Header.Chain != nil {
+ header.Chain = signature.Header.Chain
+ }
+ if signature.Header.JWK != nil {
+ publicKey, err := UnmarshalPublicKeyJWK([]byte(signature.Header.JWK))
+ if err != nil {
+ return nil, err
+ }
+ header.JWK = publicKey
+ }
+ js.signatures[i] = jsSignature{
+ Header: header,
+ Signature: signature.Signature,
+ Protected: signature.Protected,
+ }
+ }
+
+ return js, nil
+}
+
+// NewJSONSignature returns a new unsigned JWS from a json byte array.
+// JSONSignature will need to be signed before serializing or storing.
+// Optionally, one or more signatures can be provided as byte buffers,
+// containing serialized JWS signatures, to assemble a fully signed JWS
+// package. It is the callers responsibility to ensure uniqueness of the
+// provided signatures.
+func NewJSONSignature(content []byte, signatures ...[]byte) (*JSONSignature, error) {
+ var dataMap map[string]interface{}
+ err := json.Unmarshal(content, &dataMap)
+ if err != nil {
+ return nil, err
+ }
+
+ js := newJSONSignature()
+ js.indent = detectJSONIndent(content)
+
+ js.payload = joseBase64UrlEncode(content)
+
+ // Find trailing } and whitespace, put in protected header
+ closeIndex := bytes.LastIndexFunc(content, notSpace)
+ if content[closeIndex] != '}' {
+ return nil, ErrInvalidJSONContent
+ }
+ lastRuneIndex := bytes.LastIndexFunc(content[:closeIndex], notSpace)
+ if content[lastRuneIndex] == ',' {
+ return nil, ErrInvalidJSONContent
+ }
+ js.formatLength = lastRuneIndex + 1
+ js.formatTail = content[js.formatLength:]
+
+ if len(signatures) > 0 {
+ for _, signature := range signatures {
+ var parsedJSig jsParsedSignature
+
+ if err := json.Unmarshal(signature, &parsedJSig); err != nil {
+ return nil, err
+ }
+
+ // TODO(stevvooe): A lot of the code below is repeated in
+ // ParseJWS. It will require more refactoring to fix that.
+ jsig := jsSignature{
+ Header: jsHeader{
+ Algorithm: parsedJSig.Header.Algorithm,
+ },
+ Signature: parsedJSig.Signature,
+ Protected: parsedJSig.Protected,
+ }
+
+ if parsedJSig.Header.Chain != nil {
+ jsig.Header.Chain = parsedJSig.Header.Chain
+ }
+
+ if parsedJSig.Header.JWK != nil {
+ publicKey, err := UnmarshalPublicKeyJWK([]byte(parsedJSig.Header.JWK))
+ if err != nil {
+ return nil, err
+ }
+ jsig.Header.JWK = publicKey
+ }
+
+ js.signatures = append(js.signatures, jsig)
+ }
+ }
+
+ return js, nil
+}
+
+// NewJSONSignatureFromMap returns a new unsigned JSONSignature from a map or
+// struct. JWS will need to be signed before serializing or storing.
+func NewJSONSignatureFromMap(content interface{}) (*JSONSignature, error) {
+ switch content.(type) {
+ case map[string]interface{}:
+ case struct{}:
+ default:
+ return nil, errors.New("invalid data type")
+ }
+
+ js := newJSONSignature()
+ js.indent = " "
+
+ payload, err := json.MarshalIndent(content, "", js.indent)
+ if err != nil {
+ return nil, err
+ }
+ js.payload = joseBase64UrlEncode(payload)
+
+ // Remove '\n}' from formatted section, put in protected header
+ js.formatLength = len(payload) - 2
+ js.formatTail = payload[js.formatLength:]
+
+ return js, nil
+}
+
+func readIntFromMap(key string, m map[string]interface{}) (int, bool) {
+ value, ok := m[key]
+ if !ok {
+ return 0, false
+ }
+ switch v := value.(type) {
+ case int:
+ return v, true
+ case float64:
+ return int(v), true
+ default:
+ return 0, false
+ }
+}
+
+func readStringFromMap(key string, m map[string]interface{}) (v string, ok bool) {
+ value, ok := m[key]
+ if !ok {
+ return "", false
+ }
+ v, ok = value.(string)
+ return
+}
+
+// ParsePrettySignature parses a formatted signature into a
+// JSON signature. If the signatures are missing the format information
+// an error is thrown. The formatted signature must be created by
+// the same method as format signature.
+func ParsePrettySignature(content []byte, signatureKey string) (*JSONSignature, error) {
+ var contentMap map[string]json.RawMessage
+ err := json.Unmarshal(content, &contentMap)
+ if err != nil {
+ return nil, fmt.Errorf("error unmarshalling content: %s", err)
+ }
+ sigMessage, ok := contentMap[signatureKey]
+ if !ok {
+ return nil, ErrMissingSignatureKey
+ }
+
+ var signatureBlocks []jsParsedSignature
+ err = json.Unmarshal([]byte(sigMessage), &signatureBlocks)
+ if err != nil {
+ return nil, fmt.Errorf("error unmarshalling signatures: %s", err)
+ }
+
+ js := newJSONSignature()
+ js.signatures = make([]jsSignature, len(signatureBlocks))
+
+ for i, signatureBlock := range signatureBlocks {
+ protectedBytes, err := joseBase64UrlDecode(signatureBlock.Protected)
+ if err != nil {
+ return nil, fmt.Errorf("base64 decode error: %s", err)
+ }
+ var protectedHeader map[string]interface{}
+ err = json.Unmarshal(protectedBytes, &protectedHeader)
+ if err != nil {
+ return nil, fmt.Errorf("error unmarshalling protected header: %s", err)
+ }
+
+ formatLength, ok := readIntFromMap("formatLength", protectedHeader)
+ if !ok {
+ return nil, errors.New("missing formatted length")
+ }
+ encodedTail, ok := readStringFromMap("formatTail", protectedHeader)
+ if !ok {
+ return nil, errors.New("missing formatted tail")
+ }
+ formatTail, err := joseBase64UrlDecode(encodedTail)
+ if err != nil {
+ return nil, fmt.Errorf("base64 decode error on tail: %s", err)
+ }
+ if js.formatLength == 0 {
+ js.formatLength = formatLength
+ } else if js.formatLength != formatLength {
+ return nil, errors.New("conflicting format length")
+ }
+ if len(js.formatTail) == 0 {
+ js.formatTail = formatTail
+ } else if bytes.Compare(js.formatTail, formatTail) != 0 {
+ return nil, errors.New("conflicting format tail")
+ }
+
+ header := jsHeader{
+ Algorithm: signatureBlock.Header.Algorithm,
+ Chain: signatureBlock.Header.Chain,
+ }
+ if signatureBlock.Header.JWK != nil {
+ publicKey, err := UnmarshalPublicKeyJWK([]byte(signatureBlock.Header.JWK))
+ if err != nil {
+ return nil, fmt.Errorf("error unmarshalling public key: %s", err)
+ }
+ header.JWK = publicKey
+ }
+ js.signatures[i] = jsSignature{
+ Header: header,
+ Signature: signatureBlock.Signature,
+ Protected: signatureBlock.Protected,
+ }
+ }
+ if js.formatLength > len(content) {
+ return nil, errors.New("invalid format length")
+ }
+ formatted := make([]byte, js.formatLength+len(js.formatTail))
+ copy(formatted, content[:js.formatLength])
+ copy(formatted[js.formatLength:], js.formatTail)
+ js.indent = detectJSONIndent(formatted)
+ js.payload = joseBase64UrlEncode(formatted)
+
+ return js, nil
+}
+
+// PrettySignature formats a json signature into an easy to read
+// single json serialized object.
+func (js *JSONSignature) PrettySignature(signatureKey string) ([]byte, error) {
+ if len(js.signatures) == 0 {
+ return nil, errors.New("no signatures")
+ }
+ payload, err := joseBase64UrlDecode(js.payload)
+ if err != nil {
+ return nil, err
+ }
+ payload = payload[:js.formatLength]
+
+ sort.Sort(jsSignaturesSorted(js.signatures))
+
+ var marshalled []byte
+ var marshallErr error
+ if js.indent != "" {
+ marshalled, marshallErr = json.MarshalIndent(js.signatures, js.indent, js.indent)
+ } else {
+ marshalled, marshallErr = json.Marshal(js.signatures)
+ }
+ if marshallErr != nil {
+ return nil, marshallErr
+ }
+
+ buf := bytes.NewBuffer(make([]byte, 0, len(payload)+len(marshalled)+34))
+ buf.Write(payload)
+ buf.WriteByte(',')
+ if js.indent != "" {
+ buf.WriteByte('\n')
+ buf.WriteString(js.indent)
+ buf.WriteByte('"')
+ buf.WriteString(signatureKey)
+ buf.WriteString("\": ")
+ buf.Write(marshalled)
+ buf.WriteByte('\n')
+ } else {
+ buf.WriteByte('"')
+ buf.WriteString(signatureKey)
+ buf.WriteString("\":")
+ buf.Write(marshalled)
+ }
+ buf.WriteByte('}')
+
+ return buf.Bytes(), nil
+}
+
+// Signatures provides the signatures on this JWS as opaque blobs, sorted by
+// keyID. These blobs can be stored and reassembled with payloads. Internally,
+// they are simply marshaled json web signatures but implementations should
+// not rely on this.
+func (js *JSONSignature) Signatures() ([][]byte, error) {
+ sort.Sort(jsSignaturesSorted(js.signatures))
+
+ var sb [][]byte
+ for _, jsig := range js.signatures {
+ p, err := json.Marshal(jsig)
+ if err != nil {
+ return nil, err
+ }
+
+ sb = append(sb, p)
+ }
+
+ return sb, nil
+}
+
+// Merge combines the signatures from one or more other signatures into the
+// method receiver. If the payloads differ for any argument, an error will be
+// returned and the receiver will not be modified.
+func (js *JSONSignature) Merge(others ...*JSONSignature) error {
+ merged := js.signatures
+ for _, other := range others {
+ if js.payload != other.payload {
+ return fmt.Errorf("payloads differ from merge target")
+ }
+ merged = append(merged, other.signatures...)
+ }
+
+ js.signatures = merged
+ return nil
+}
diff --git a/unum/vendor/github.com/docker/libtrust/key.go b/unum/vendor/github.com/docker/libtrust/key.go
new file mode 100644
index 0000000..73642db
--- /dev/null
+++ b/unum/vendor/github.com/docker/libtrust/key.go
@@ -0,0 +1,253 @@
+package libtrust
+
+import (
+ "crypto"
+ "crypto/ecdsa"
+ "crypto/rsa"
+ "crypto/x509"
+ "encoding/json"
+ "encoding/pem"
+ "errors"
+ "fmt"
+ "io"
+)
+
+// PublicKey is a generic interface for a Public Key.
+type PublicKey interface {
+ // KeyType returns the key type for this key. For elliptic curve keys,
+ // this value should be "EC". For RSA keys, this value should be "RSA".
+ KeyType() string
+ // KeyID returns a distinct identifier which is unique to this Public Key.
+ // The format generated by this library is a base32 encoding of a 240 bit
+ // hash of the public key data divided into 12 groups like so:
+ // ABCD:EFGH:IJKL:MNOP:QRST:UVWX:YZ23:4567:ABCD:EFGH:IJKL:MNOP
+ KeyID() string
+ // Verify verifyies the signature of the data in the io.Reader using this
+ // Public Key. The alg parameter should identify the digital signature
+ // algorithm which was used to produce the signature and should be
+ // supported by this public key. Returns a nil error if the signature
+ // is valid.
+ Verify(data io.Reader, alg string, signature []byte) error
+ // CryptoPublicKey returns the internal object which can be used as a
+ // crypto.PublicKey for use with other standard library operations. The type
+ // is either *rsa.PublicKey or *ecdsa.PublicKey
+ CryptoPublicKey() crypto.PublicKey
+ // These public keys can be serialized to the standard JSON encoding for
+ // JSON Web Keys. See section 6 of the IETF draft RFC for JOSE JSON Web
+ // Algorithms.
+ MarshalJSON() ([]byte, error)
+ // These keys can also be serialized to the standard PEM encoding.
+ PEMBlock() (*pem.Block, error)
+ // The string representation of a key is its key type and ID.
+ String() string
+ AddExtendedField(string, interface{})
+ GetExtendedField(string) interface{}
+}
+
+// PrivateKey is a generic interface for a Private Key.
+type PrivateKey interface {
+ // A PrivateKey contains all fields and methods of a PublicKey of the
+ // same type. The MarshalJSON method also outputs the private key as a
+ // JSON Web Key, and the PEMBlock method outputs the private key as a
+ // PEM block.
+ PublicKey
+ // PublicKey returns the PublicKey associated with this PrivateKey.
+ PublicKey() PublicKey
+ // Sign signs the data read from the io.Reader using a signature algorithm
+ // supported by the private key. If the specified hashing algorithm is
+ // supported by this key, that hash function is used to generate the
+ // signature otherwise the the default hashing algorithm for this key is
+ // used. Returns the signature and identifier of the algorithm used.
+ Sign(data io.Reader, hashID crypto.Hash) (signature []byte, alg string, err error)
+ // CryptoPrivateKey returns the internal object which can be used as a
+ // crypto.PublicKey for use with other standard library operations. The
+ // type is either *rsa.PublicKey or *ecdsa.PublicKey
+ CryptoPrivateKey() crypto.PrivateKey
+}
+
+// FromCryptoPublicKey returns a libtrust PublicKey representation of the given
+// *ecdsa.PublicKey or *rsa.PublicKey. Returns a non-nil error when the given
+// key is of an unsupported type.
+func FromCryptoPublicKey(cryptoPublicKey crypto.PublicKey) (PublicKey, error) {
+ switch cryptoPublicKey := cryptoPublicKey.(type) {
+ case *ecdsa.PublicKey:
+ return fromECPublicKey(cryptoPublicKey)
+ case *rsa.PublicKey:
+ return fromRSAPublicKey(cryptoPublicKey), nil
+ default:
+ return nil, fmt.Errorf("public key type %T is not supported", cryptoPublicKey)
+ }
+}
+
+// FromCryptoPrivateKey returns a libtrust PrivateKey representation of the given
+// *ecdsa.PrivateKey or *rsa.PrivateKey. Returns a non-nil error when the given
+// key is of an unsupported type.
+func FromCryptoPrivateKey(cryptoPrivateKey crypto.PrivateKey) (PrivateKey, error) {
+ switch cryptoPrivateKey := cryptoPrivateKey.(type) {
+ case *ecdsa.PrivateKey:
+ return fromECPrivateKey(cryptoPrivateKey)
+ case *rsa.PrivateKey:
+ return fromRSAPrivateKey(cryptoPrivateKey), nil
+ default:
+ return nil, fmt.Errorf("private key type %T is not supported", cryptoPrivateKey)
+ }
+}
+
+// UnmarshalPublicKeyPEM parses the PEM encoded data and returns a libtrust
+// PublicKey or an error if there is a problem with the encoding.
+func UnmarshalPublicKeyPEM(data []byte) (PublicKey, error) {
+ pemBlock, _ := pem.Decode(data)
+ if pemBlock == nil {
+ return nil, errors.New("unable to find PEM encoded data")
+ } else if pemBlock.Type != "PUBLIC KEY" {
+ return nil, fmt.Errorf("unable to get PublicKey from PEM type: %s", pemBlock.Type)
+ }
+
+ return pubKeyFromPEMBlock(pemBlock)
+}
+
+// UnmarshalPublicKeyPEMBundle parses the PEM encoded data as a bundle of
+// PEM blocks appended one after the other and returns a slice of PublicKey
+// objects that it finds.
+func UnmarshalPublicKeyPEMBundle(data []byte) ([]PublicKey, error) {
+ pubKeys := []PublicKey{}
+
+ for {
+ var pemBlock *pem.Block
+ pemBlock, data = pem.Decode(data)
+ if pemBlock == nil {
+ break
+ } else if pemBlock.Type != "PUBLIC KEY" {
+ return nil, fmt.Errorf("unable to get PublicKey from PEM type: %s", pemBlock.Type)
+ }
+
+ pubKey, err := pubKeyFromPEMBlock(pemBlock)
+ if err != nil {
+ return nil, err
+ }
+
+ pubKeys = append(pubKeys, pubKey)
+ }
+
+ return pubKeys, nil
+}
+
+// UnmarshalPrivateKeyPEM parses the PEM encoded data and returns a libtrust
+// PrivateKey or an error if there is a problem with the encoding.
+func UnmarshalPrivateKeyPEM(data []byte) (PrivateKey, error) {
+ pemBlock, _ := pem.Decode(data)
+ if pemBlock == nil {
+ return nil, errors.New("unable to find PEM encoded data")
+ }
+
+ var key PrivateKey
+
+ switch {
+ case pemBlock.Type == "RSA PRIVATE KEY":
+ rsaPrivateKey, err := x509.ParsePKCS1PrivateKey(pemBlock.Bytes)
+ if err != nil {
+ return nil, fmt.Errorf("unable to decode RSA Private Key PEM data: %s", err)
+ }
+ key = fromRSAPrivateKey(rsaPrivateKey)
+ case pemBlock.Type == "EC PRIVATE KEY":
+ ecPrivateKey, err := x509.ParseECPrivateKey(pemBlock.Bytes)
+ if err != nil {
+ return nil, fmt.Errorf("unable to decode EC Private Key PEM data: %s", err)
+ }
+ key, err = fromECPrivateKey(ecPrivateKey)
+ if err != nil {
+ return nil, err
+ }
+ default:
+ return nil, fmt.Errorf("unable to get PrivateKey from PEM type: %s", pemBlock.Type)
+ }
+
+ addPEMHeadersToKey(pemBlock, key.PublicKey())
+
+ return key, nil
+}
+
+// UnmarshalPublicKeyJWK unmarshals the given JSON Web Key into a generic
+// Public Key to be used with libtrust.
+func UnmarshalPublicKeyJWK(data []byte) (PublicKey, error) {
+ jwk := make(map[string]interface{})
+
+ err := json.Unmarshal(data, &jwk)
+ if err != nil {
+ return nil, fmt.Errorf(
+ "decoding JWK Public Key JSON data: %s\n", err,
+ )
+ }
+
+ // Get the Key Type value.
+ kty, err := stringFromMap(jwk, "kty")
+ if err != nil {
+ return nil, fmt.Errorf("JWK Public Key type: %s", err)
+ }
+
+ switch {
+ case kty == "EC":
+ // Call out to unmarshal EC public key.
+ return ecPublicKeyFromMap(jwk)
+ case kty == "RSA":
+ // Call out to unmarshal RSA public key.
+ return rsaPublicKeyFromMap(jwk)
+ default:
+ return nil, fmt.Errorf(
+ "JWK Public Key type not supported: %q\n", kty,
+ )
+ }
+}
+
+// UnmarshalPublicKeyJWKSet parses the JSON encoded data as a JSON Web Key Set
+// and returns a slice of Public Key objects.
+func UnmarshalPublicKeyJWKSet(data []byte) ([]PublicKey, error) {
+ rawKeys, err := loadJSONKeySetRaw(data)
+ if err != nil {
+ return nil, err
+ }
+
+ pubKeys := make([]PublicKey, 0, len(rawKeys))
+
+ for _, rawKey := range rawKeys {
+ pubKey, err := UnmarshalPublicKeyJWK(rawKey)
+ if err != nil {
+ return nil, err
+ }
+ pubKeys = append(pubKeys, pubKey)
+ }
+
+ return pubKeys, nil
+}
+
+// UnmarshalPrivateKeyJWK unmarshals the given JSON Web Key into a generic
+// Private Key to be used with libtrust.
+func UnmarshalPrivateKeyJWK(data []byte) (PrivateKey, error) {
+ jwk := make(map[string]interface{})
+
+ err := json.Unmarshal(data, &jwk)
+ if err != nil {
+ return nil, fmt.Errorf(
+ "decoding JWK Private Key JSON data: %s\n", err,
+ )
+ }
+
+ // Get the Key Type value.
+ kty, err := stringFromMap(jwk, "kty")
+ if err != nil {
+ return nil, fmt.Errorf("JWK Private Key type: %s", err)
+ }
+
+ switch {
+ case kty == "EC":
+ // Call out to unmarshal EC private key.
+ return ecPrivateKeyFromMap(jwk)
+ case kty == "RSA":
+ // Call out to unmarshal RSA private key.
+ return rsaPrivateKeyFromMap(jwk)
+ default:
+ return nil, fmt.Errorf(
+ "JWK Private Key type not supported: %q\n", kty,
+ )
+ }
+}
diff --git a/unum/vendor/github.com/docker/libtrust/key_files.go b/unum/vendor/github.com/docker/libtrust/key_files.go
new file mode 100644
index 0000000..c526de5
--- /dev/null
+++ b/unum/vendor/github.com/docker/libtrust/key_files.go
@@ -0,0 +1,255 @@
+package libtrust
+
+import (
+ "encoding/json"
+ "encoding/pem"
+ "errors"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "strings"
+)
+
+var (
+ // ErrKeyFileDoesNotExist indicates that the private key file does not exist.
+ ErrKeyFileDoesNotExist = errors.New("key file does not exist")
+)
+
+func readKeyFileBytes(filename string) ([]byte, error) {
+ data, err := ioutil.ReadFile(filename)
+ if err != nil {
+ if os.IsNotExist(err) {
+ err = ErrKeyFileDoesNotExist
+ } else {
+ err = fmt.Errorf("unable to read key file %s: %s", filename, err)
+ }
+
+ return nil, err
+ }
+
+ return data, nil
+}
+
+/*
+ Loading and Saving of Public and Private Keys in either PEM or JWK format.
+*/
+
+// LoadKeyFile opens the given filename and attempts to read a Private Key
+// encoded in either PEM or JWK format (if .json or .jwk file extension).
+func LoadKeyFile(filename string) (PrivateKey, error) {
+ contents, err := readKeyFileBytes(filename)
+ if err != nil {
+ return nil, err
+ }
+
+ var key PrivateKey
+
+ if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") {
+ key, err = UnmarshalPrivateKeyJWK(contents)
+ if err != nil {
+ return nil, fmt.Errorf("unable to decode private key JWK: %s", err)
+ }
+ } else {
+ key, err = UnmarshalPrivateKeyPEM(contents)
+ if err != nil {
+ return nil, fmt.Errorf("unable to decode private key PEM: %s", err)
+ }
+ }
+
+ return key, nil
+}
+
+// LoadPublicKeyFile opens the given filename and attempts to read a Public Key
+// encoded in either PEM or JWK format (if .json or .jwk file extension).
+func LoadPublicKeyFile(filename string) (PublicKey, error) {
+ contents, err := readKeyFileBytes(filename)
+ if err != nil {
+ return nil, err
+ }
+
+ var key PublicKey
+
+ if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") {
+ key, err = UnmarshalPublicKeyJWK(contents)
+ if err != nil {
+ return nil, fmt.Errorf("unable to decode public key JWK: %s", err)
+ }
+ } else {
+ key, err = UnmarshalPublicKeyPEM(contents)
+ if err != nil {
+ return nil, fmt.Errorf("unable to decode public key PEM: %s", err)
+ }
+ }
+
+ return key, nil
+}
+
+// SaveKey saves the given key to a file using the provided filename.
+// This process will overwrite any existing file at the provided location.
+func SaveKey(filename string, key PrivateKey) error {
+ var encodedKey []byte
+ var err error
+
+ if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") {
+ // Encode in JSON Web Key format.
+ encodedKey, err = json.MarshalIndent(key, "", " ")
+ if err != nil {
+ return fmt.Errorf("unable to encode private key JWK: %s", err)
+ }
+ } else {
+ // Encode in PEM format.
+ pemBlock, err := key.PEMBlock()
+ if err != nil {
+ return fmt.Errorf("unable to encode private key PEM: %s", err)
+ }
+ encodedKey = pem.EncodeToMemory(pemBlock)
+ }
+
+ err = ioutil.WriteFile(filename, encodedKey, os.FileMode(0600))
+ if err != nil {
+ return fmt.Errorf("unable to write private key file %s: %s", filename, err)
+ }
+
+ return nil
+}
+
+// SavePublicKey saves the given public key to the file.
+func SavePublicKey(filename string, key PublicKey) error {
+ var encodedKey []byte
+ var err error
+
+ if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") {
+ // Encode in JSON Web Key format.
+ encodedKey, err = json.MarshalIndent(key, "", " ")
+ if err != nil {
+ return fmt.Errorf("unable to encode public key JWK: %s", err)
+ }
+ } else {
+ // Encode in PEM format.
+ pemBlock, err := key.PEMBlock()
+ if err != nil {
+ return fmt.Errorf("unable to encode public key PEM: %s", err)
+ }
+ encodedKey = pem.EncodeToMemory(pemBlock)
+ }
+
+ err = ioutil.WriteFile(filename, encodedKey, os.FileMode(0644))
+ if err != nil {
+ return fmt.Errorf("unable to write public key file %s: %s", filename, err)
+ }
+
+ return nil
+}
+
+// Public Key Set files
+
+type jwkSet struct {
+ Keys []json.RawMessage `json:"keys"`
+}
+
+// LoadKeySetFile loads a key set
+func LoadKeySetFile(filename string) ([]PublicKey, error) {
+ if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") {
+ return loadJSONKeySetFile(filename)
+ }
+
+ // Must be a PEM format file
+ return loadPEMKeySetFile(filename)
+}
+
+func loadJSONKeySetRaw(data []byte) ([]json.RawMessage, error) {
+ if len(data) == 0 {
+ // This is okay, just return an empty slice.
+ return []json.RawMessage{}, nil
+ }
+
+ keySet := jwkSet{}
+
+ err := json.Unmarshal(data, &keySet)
+ if err != nil {
+ return nil, fmt.Errorf("unable to decode JSON Web Key Set: %s", err)
+ }
+
+ return keySet.Keys, nil
+}
+
+func loadJSONKeySetFile(filename string) ([]PublicKey, error) {
+ contents, err := readKeyFileBytes(filename)
+ if err != nil && err != ErrKeyFileDoesNotExist {
+ return nil, err
+ }
+
+ return UnmarshalPublicKeyJWKSet(contents)
+}
+
+func loadPEMKeySetFile(filename string) ([]PublicKey, error) {
+ data, err := readKeyFileBytes(filename)
+ if err != nil && err != ErrKeyFileDoesNotExist {
+ return nil, err
+ }
+
+ return UnmarshalPublicKeyPEMBundle(data)
+}
+
+// AddKeySetFile adds a key to a key set
+func AddKeySetFile(filename string, key PublicKey) error {
+ if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") {
+ return addKeySetJSONFile(filename, key)
+ }
+
+ // Must be a PEM format file
+ return addKeySetPEMFile(filename, key)
+}
+
+func addKeySetJSONFile(filename string, key PublicKey) error {
+ encodedKey, err := json.Marshal(key)
+ if err != nil {
+ return fmt.Errorf("unable to encode trusted client key: %s", err)
+ }
+
+ contents, err := readKeyFileBytes(filename)
+ if err != nil && err != ErrKeyFileDoesNotExist {
+ return err
+ }
+
+ rawEntries, err := loadJSONKeySetRaw(contents)
+ if err != nil {
+ return err
+ }
+
+ rawEntries = append(rawEntries, json.RawMessage(encodedKey))
+ entriesWrapper := jwkSet{Keys: rawEntries}
+
+ encodedEntries, err := json.MarshalIndent(entriesWrapper, "", " ")
+ if err != nil {
+ return fmt.Errorf("unable to encode trusted client keys: %s", err)
+ }
+
+ err = ioutil.WriteFile(filename, encodedEntries, os.FileMode(0644))
+ if err != nil {
+ return fmt.Errorf("unable to write trusted client keys file %s: %s", filename, err)
+ }
+
+ return nil
+}
+
+func addKeySetPEMFile(filename string, key PublicKey) error {
+ // Encode to PEM, open file for appending, write PEM.
+ file, err := os.OpenFile(filename, os.O_CREATE|os.O_APPEND|os.O_RDWR, os.FileMode(0644))
+ if err != nil {
+ return fmt.Errorf("unable to open trusted client keys file %s: %s", filename, err)
+ }
+ defer file.Close()
+
+ pemBlock, err := key.PEMBlock()
+ if err != nil {
+ return fmt.Errorf("unable to encoded trusted key: %s", err)
+ }
+
+ _, err = file.Write(pem.EncodeToMemory(pemBlock))
+ if err != nil {
+ return fmt.Errorf("unable to write trusted keys file: %s", err)
+ }
+
+ return nil
+}
diff --git a/unum/vendor/github.com/docker/libtrust/key_manager.go b/unum/vendor/github.com/docker/libtrust/key_manager.go
new file mode 100644
index 0000000..9a98ae3
--- /dev/null
+++ b/unum/vendor/github.com/docker/libtrust/key_manager.go
@@ -0,0 +1,175 @@
+package libtrust
+
+import (
+ "crypto/tls"
+ "crypto/x509"
+ "fmt"
+ "io/ioutil"
+ "net"
+ "os"
+ "path"
+ "sync"
+)
+
+// ClientKeyManager manages client keys on the filesystem
+type ClientKeyManager struct {
+ key PrivateKey
+ clientFile string
+ clientDir string
+
+ clientLock sync.RWMutex
+ clients []PublicKey
+
+ configLock sync.Mutex
+ configs []*tls.Config
+}
+
+// NewClientKeyManager loads a new manager from a set of key files
+// and managed by the given private key.
+func NewClientKeyManager(trustKey PrivateKey, clientFile, clientDir string) (*ClientKeyManager, error) {
+ m := &ClientKeyManager{
+ key: trustKey,
+ clientFile: clientFile,
+ clientDir: clientDir,
+ }
+ if err := m.loadKeys(); err != nil {
+ return nil, err
+ }
+ // TODO Start watching file and directory
+
+ return m, nil
+}
+
+func (c *ClientKeyManager) loadKeys() (err error) {
+ // Load authorized keys file
+ var clients []PublicKey
+ if c.clientFile != "" {
+ clients, err = LoadKeySetFile(c.clientFile)
+ if err != nil {
+ return fmt.Errorf("unable to load authorized keys: %s", err)
+ }
+ }
+
+ // Add clients from authorized keys directory
+ files, err := ioutil.ReadDir(c.clientDir)
+ if err != nil && !os.IsNotExist(err) {
+ return fmt.Errorf("unable to open authorized keys directory: %s", err)
+ }
+ for _, f := range files {
+ if !f.IsDir() {
+ publicKey, err := LoadPublicKeyFile(path.Join(c.clientDir, f.Name()))
+ if err != nil {
+ return fmt.Errorf("unable to load authorized key file: %s", err)
+ }
+ clients = append(clients, publicKey)
+ }
+ }
+
+ c.clientLock.Lock()
+ c.clients = clients
+ c.clientLock.Unlock()
+
+ return nil
+}
+
+// RegisterTLSConfig registers a tls configuration to manager
+// such that any changes to the keys may be reflected in
+// the tls client CA pool
+func (c *ClientKeyManager) RegisterTLSConfig(tlsConfig *tls.Config) error {
+ c.clientLock.RLock()
+ certPool, err := GenerateCACertPool(c.key, c.clients)
+ if err != nil {
+ return fmt.Errorf("CA pool generation error: %s", err)
+ }
+ c.clientLock.RUnlock()
+
+ tlsConfig.ClientCAs = certPool
+
+ c.configLock.Lock()
+ c.configs = append(c.configs, tlsConfig)
+ c.configLock.Unlock()
+
+ return nil
+}
+
+// NewIdentityAuthTLSConfig creates a tls.Config for the server to use for
+// libtrust identity authentication for the domain specified
+func NewIdentityAuthTLSConfig(trustKey PrivateKey, clients *ClientKeyManager, addr string, domain string) (*tls.Config, error) {
+ tlsConfig := newTLSConfig()
+
+ tlsConfig.ClientAuth = tls.RequireAndVerifyClientCert
+ if err := clients.RegisterTLSConfig(tlsConfig); err != nil {
+ return nil, err
+ }
+
+ // Generate cert
+ ips, domains, err := parseAddr(addr)
+ if err != nil {
+ return nil, err
+ }
+ // add domain that it expects clients to use
+ domains = append(domains, domain)
+ x509Cert, err := GenerateSelfSignedServerCert(trustKey, domains, ips)
+ if err != nil {
+ return nil, fmt.Errorf("certificate generation error: %s", err)
+ }
+ tlsConfig.Certificates = []tls.Certificate{{
+ Certificate: [][]byte{x509Cert.Raw},
+ PrivateKey: trustKey.CryptoPrivateKey(),
+ Leaf: x509Cert,
+ }}
+
+ return tlsConfig, nil
+}
+
+// NewCertAuthTLSConfig creates a tls.Config for the server to use for
+// certificate authentication
+func NewCertAuthTLSConfig(caPath, certPath, keyPath string) (*tls.Config, error) {
+ tlsConfig := newTLSConfig()
+
+ cert, err := tls.LoadX509KeyPair(certPath, keyPath)
+ if err != nil {
+ return nil, fmt.Errorf("Couldn't load X509 key pair (%s, %s): %s. Key encrypted?", certPath, keyPath, err)
+ }
+ tlsConfig.Certificates = []tls.Certificate{cert}
+
+ // Verify client certificates against a CA?
+ if caPath != "" {
+ certPool := x509.NewCertPool()
+ file, err := ioutil.ReadFile(caPath)
+ if err != nil {
+ return nil, fmt.Errorf("Couldn't read CA certificate: %s", err)
+ }
+ certPool.AppendCertsFromPEM(file)
+
+ tlsConfig.ClientAuth = tls.RequireAndVerifyClientCert
+ tlsConfig.ClientCAs = certPool
+ }
+
+ return tlsConfig, nil
+}
+
+func newTLSConfig() *tls.Config {
+ return &tls.Config{
+ NextProtos: []string{"http/1.1"},
+ // Avoid fallback on insecure SSL protocols
+ MinVersion: tls.VersionTLS10,
+ }
+}
+
+// parseAddr parses an address into an array of IPs and domains
+func parseAddr(addr string) ([]net.IP, []string, error) {
+ host, _, err := net.SplitHostPort(addr)
+ if err != nil {
+ return nil, nil, err
+ }
+ var domains []string
+ var ips []net.IP
+ ip := net.ParseIP(host)
+ if ip != nil {
+ ips = []net.IP{ip}
+ } else {
+ domains = []string{host}
+ }
+ return ips, domains, nil
+}
diff --git a/unum/vendor/github.com/docker/libtrust/rsa_key.go b/unum/vendor/github.com/docker/libtrust/rsa_key.go
new file mode 100644
index 0000000..dac4cac
--- /dev/null
+++ b/unum/vendor/github.com/docker/libtrust/rsa_key.go
@@ -0,0 +1,427 @@
+package libtrust
+
+import (
+ "crypto"
+ "crypto/rand"
+ "crypto/rsa"
+ "crypto/x509"
+ "encoding/json"
+ "encoding/pem"
+ "errors"
+ "fmt"
+ "io"
+ "math/big"
+)
+
+/*
+ * RSA DSA PUBLIC KEY
+ */
+
+// rsaPublicKey implements a JWK Public Key using RSA digital signature algorithms.
+type rsaPublicKey struct {
+ *rsa.PublicKey
+ extended map[string]interface{}
+}
+
+func fromRSAPublicKey(cryptoPublicKey *rsa.PublicKey) *rsaPublicKey {
+ return &rsaPublicKey{cryptoPublicKey, map[string]interface{}{}}
+}
+
+// KeyType returns the JWK key type for RSA keys, i.e., "RSA".
+func (k *rsaPublicKey) KeyType() string {
+ return "RSA"
+}
+
+// KeyID returns a distinct identifier which is unique to this Public Key.
+func (k *rsaPublicKey) KeyID() string {
+ return keyIDFromCryptoKey(k)
+}
+
+func (k *rsaPublicKey) String() string {
+ return fmt.Sprintf("RSA Public Key <%s>", k.KeyID())
+}
+
+// Verify verifyies the signature of the data in the io.Reader using this Public Key.
+// The alg parameter should be the name of the JWA digital signature algorithm
+// which was used to produce the signature and should be supported by this
+// public key. Returns a nil error if the signature is valid.
+func (k *rsaPublicKey) Verify(data io.Reader, alg string, signature []byte) error {
+ // Verify the signature of the given date, return non-nil error if valid.
+ sigAlg, err := rsaSignatureAlgorithmByName(alg)
+ if err != nil {
+ return fmt.Errorf("unable to verify Signature: %s", err)
+ }
+
+ hasher := sigAlg.HashID().New()
+ _, err = io.Copy(hasher, data)
+ if err != nil {
+ return fmt.Errorf("error reading data to sign: %s", err)
+ }
+ hash := hasher.Sum(nil)
+
+ err = rsa.VerifyPKCS1v15(k.PublicKey, sigAlg.HashID(), hash, signature)
+ if err != nil {
+ return fmt.Errorf("invalid %s signature: %s", sigAlg.HeaderParam(), err)
+ }
+
+ return nil
+}
+
+// CryptoPublicKey returns the internal object which can be used as a
+// crypto.PublicKey for use with other standard library operations. The type
+// is either *rsa.PublicKey or *ecdsa.PublicKey
+func (k *rsaPublicKey) CryptoPublicKey() crypto.PublicKey {
+ return k.PublicKey
+}
+
+func (k *rsaPublicKey) toMap() map[string]interface{} {
+ jwk := make(map[string]interface{})
+ for k, v := range k.extended {
+ jwk[k] = v
+ }
+ jwk["kty"] = k.KeyType()
+ jwk["kid"] = k.KeyID()
+ jwk["n"] = joseBase64UrlEncode(k.N.Bytes())
+ jwk["e"] = joseBase64UrlEncode(serializeRSAPublicExponentParam(k.E))
+
+ return jwk
+}
+
+// MarshalJSON serializes this Public Key using the JWK JSON serialization format for
+// RSA keys.
+func (k *rsaPublicKey) MarshalJSON() (data []byte, err error) {
+ return json.Marshal(k.toMap())
+}
+
+// PEMBlock serializes this Public Key to DER-encoded PKIX format.
+func (k *rsaPublicKey) PEMBlock() (*pem.Block, error) {
+ derBytes, err := x509.MarshalPKIXPublicKey(k.PublicKey)
+ if err != nil {
+ return nil, fmt.Errorf("unable to serialize RSA PublicKey to DER-encoded PKIX format: %s", err)
+ }
+ k.extended["kid"] = k.KeyID() // For display purposes.
+ return createPemBlock("PUBLIC KEY", derBytes, k.extended)
+}
+
+func (k *rsaPublicKey) AddExtendedField(field string, value interface{}) {
+ k.extended[field] = value
+}
+
+func (k *rsaPublicKey) GetExtendedField(field string) interface{} {
+ v, ok := k.extended[field]
+ if !ok {
+ return nil
+ }
+ return v
+}
+
+func rsaPublicKeyFromMap(jwk map[string]interface{}) (*rsaPublicKey, error) {
+ // JWK key type (kty) has already been determined to be "RSA".
+ // Need to extract 'n', 'e', and 'kid' and check for
+ // consistency.
+
+ // Get the modulus parameter N.
+ nB64Url, err := stringFromMap(jwk, "n")
+ if err != nil {
+ return nil, fmt.Errorf("JWK RSA Public Key modulus: %s", err)
+ }
+
+ n, err := parseRSAModulusParam(nB64Url)
+ if err != nil {
+ return nil, fmt.Errorf("JWK RSA Public Key modulus: %s", err)
+ }
+
+ // Get the public exponent E.
+ eB64Url, err := stringFromMap(jwk, "e")
+ if err != nil {
+ return nil, fmt.Errorf("JWK RSA Public Key exponent: %s", err)
+ }
+
+ e, err := parseRSAPublicExponentParam(eB64Url)
+ if err != nil {
+ return nil, fmt.Errorf("JWK RSA Public Key exponent: %s", err)
+ }
+
+ key := &rsaPublicKey{
+ PublicKey: &rsa.PublicKey{N: n, E: e},
+ }
+
+ // Key ID is optional, but if it exists, it should match the key.
+ _, ok := jwk["kid"]
+ if ok {
+ kid, err := stringFromMap(jwk, "kid")
+ if err != nil {
+ return nil, fmt.Errorf("JWK RSA Public Key ID: %s", err)
+ }
+ if kid != key.KeyID() {
+ return nil, fmt.Errorf("JWK RSA Public Key ID does not match: %s", kid)
+ }
+ }
+
+ if _, ok := jwk["d"]; ok {
+ return nil, fmt.Errorf("JWK RSA Public Key cannot contain private exponent")
+ }
+
+ key.extended = jwk
+
+ return key, nil
+}
+
+/*
+ * RSA DSA PRIVATE KEY
+ */
+
+// rsaPrivateKey implements a JWK Private Key using RSA digital signature algorithms.
+type rsaPrivateKey struct {
+ rsaPublicKey
+ *rsa.PrivateKey
+}
+
+func fromRSAPrivateKey(cryptoPrivateKey *rsa.PrivateKey) *rsaPrivateKey {
+ return &rsaPrivateKey{
+ *fromRSAPublicKey(&cryptoPrivateKey.PublicKey),
+ cryptoPrivateKey,
+ }
+}
+
+// PublicKey returns the Public Key data associated with this Private Key.
+func (k *rsaPrivateKey) PublicKey() PublicKey {
+ return &k.rsaPublicKey
+}
+
+func (k *rsaPrivateKey) String() string {
+ return fmt.Sprintf("RSA Private Key <%s>", k.KeyID())
+}
+
+// Sign signs the data read from the io.Reader using a signature algorithm supported
+// by the RSA private key. If the specified hashing algorithm is supported by
+// this key, that hash function is used to generate the signature otherwise the
+// the default hashing algorithm for this key is used. Returns the signature
+// and the name of the JWK signature algorithm used, e.g., "RS256", "RS384",
+// "RS512".
+func (k *rsaPrivateKey) Sign(data io.Reader, hashID crypto.Hash) (signature []byte, alg string, err error) {
+ // Generate a signature of the data using the internal alg.
+ sigAlg := rsaPKCS1v15SignatureAlgorithmForHashID(hashID)
+ hasher := sigAlg.HashID().New()
+
+ _, err = io.Copy(hasher, data)
+ if err != nil {
+ return nil, "", fmt.Errorf("error reading data to sign: %s", err)
+ }
+ hash := hasher.Sum(nil)
+
+ signature, err = rsa.SignPKCS1v15(rand.Reader, k.PrivateKey, sigAlg.HashID(), hash)
+ if err != nil {
+ return nil, "", fmt.Errorf("error producing signature: %s", err)
+ }
+
+ alg = sigAlg.HeaderParam()
+
+ return
+}
+
+// CryptoPrivateKey returns the internal object which can be used as a
+// crypto.PublicKey for use with other standard library operations. The type
+// is either *rsa.PublicKey or *ecdsa.PublicKey
+func (k *rsaPrivateKey) CryptoPrivateKey() crypto.PrivateKey {
+ return k.PrivateKey
+}
+
+func (k *rsaPrivateKey) toMap() map[string]interface{} {
+ k.Precompute() // Make sure the precomputed values are stored.
+ jwk := k.rsaPublicKey.toMap()
+
+ jwk["d"] = joseBase64UrlEncode(k.D.Bytes())
+ jwk["p"] = joseBase64UrlEncode(k.Primes[0].Bytes())
+ jwk["q"] = joseBase64UrlEncode(k.Primes[1].Bytes())
+ jwk["dp"] = joseBase64UrlEncode(k.Precomputed.Dp.Bytes())
+ jwk["dq"] = joseBase64UrlEncode(k.Precomputed.Dq.Bytes())
+ jwk["qi"] = joseBase64UrlEncode(k.Precomputed.Qinv.Bytes())
+
+ otherPrimes := k.Primes[2:]
+
+ if len(otherPrimes) > 0 {
+ otherPrimesInfo := make([]interface{}, len(otherPrimes))
+ for i, r := range otherPrimes {
+ otherPrimeInfo := make(map[string]string, 3)
+ otherPrimeInfo["r"] = joseBase64UrlEncode(r.Bytes())
+ crtVal := k.Precomputed.CRTValues[i]
+ otherPrimeInfo["d"] = joseBase64UrlEncode(crtVal.Exp.Bytes())
+ otherPrimeInfo["t"] = joseBase64UrlEncode(crtVal.Coeff.Bytes())
+ otherPrimesInfo[i] = otherPrimeInfo
+ }
+ jwk["oth"] = otherPrimesInfo
+ }
+
+ return jwk
+}
+
+// MarshalJSON serializes this Private Key using the JWK JSON serialization format for
+// RSA keys.
+func (k *rsaPrivateKey) MarshalJSON() (data []byte, err error) {
+ return json.Marshal(k.toMap())
+}
+
+// PEMBlock serializes this Private Key to DER-encoded PKIX format.
+func (k *rsaPrivateKey) PEMBlock() (*pem.Block, error) {
+ derBytes := x509.MarshalPKCS1PrivateKey(k.PrivateKey)
+ k.extended["keyID"] = k.KeyID() // For display purposes.
+ return createPemBlock("RSA PRIVATE KEY", derBytes, k.extended)
+}
+
+func rsaPrivateKeyFromMap(jwk map[string]interface{}) (*rsaPrivateKey, error) {
+ // The JWA spec for RSA Private Keys (draft rfc section 5.3.2) states that
+ // only the private key exponent 'd' is REQUIRED, the others are just for
+ // signature/decryption optimizations and SHOULD be included when the JWK
+ // is produced. We MAY choose to accept a JWK which only includes 'd', but
+ // we're going to go ahead and not choose to accept it without the extra
+ // fields. Only the 'oth' field will be optional (for multi-prime keys).
+ privateExponent, err := parseRSAPrivateKeyParamFromMap(jwk, "d")
+ if err != nil {
+ return nil, fmt.Errorf("JWK RSA Private Key exponent: %s", err)
+ }
+ firstPrimeFactor, err := parseRSAPrivateKeyParamFromMap(jwk, "p")
+ if err != nil {
+ return nil, fmt.Errorf("JWK RSA Private Key prime factor: %s", err)
+ }
+ secondPrimeFactor, err := parseRSAPrivateKeyParamFromMap(jwk, "q")
+ if err != nil {
+ return nil, fmt.Errorf("JWK RSA Private Key prime factor: %s", err)
+ }
+ firstFactorCRT, err := parseRSAPrivateKeyParamFromMap(jwk, "dp")
+ if err != nil {
+ return nil, fmt.Errorf("JWK RSA Private Key CRT exponent: %s", err)
+ }
+ secondFactorCRT, err := parseRSAPrivateKeyParamFromMap(jwk, "dq")
+ if err != nil {
+ return nil, fmt.Errorf("JWK RSA Private Key CRT exponent: %s", err)
+ }
+ crtCoeff, err := parseRSAPrivateKeyParamFromMap(jwk, "qi")
+ if err != nil {
+ return nil, fmt.Errorf("JWK RSA Private Key CRT coefficient: %s", err)
+ }
+
+ var oth interface{}
+ if _, ok := jwk["oth"]; ok {
+ oth = jwk["oth"]
+ delete(jwk, "oth")
+ }
+
+ // JWK key type (kty) has already been determined to be "RSA".
+ // Need to extract the public key information, then extract the private
+ // key values.
+ publicKey, err := rsaPublicKeyFromMap(jwk)
+ if err != nil {
+ return nil, err
+ }
+
+ privateKey := &rsa.PrivateKey{
+ PublicKey: *publicKey.PublicKey,
+ D: privateExponent,
+ Primes: []*big.Int{firstPrimeFactor, secondPrimeFactor},
+ Precomputed: rsa.PrecomputedValues{
+ Dp: firstFactorCRT,
+ Dq: secondFactorCRT,
+ Qinv: crtCoeff,
+ },
+ }
+
+ if oth != nil {
+ // Should be an array of more JSON objects.
+ otherPrimesInfo, ok := oth.([]interface{})
+ if !ok {
+ return nil, errors.New("JWK RSA Private Key: Invalid other primes info: must be an array")
+ }
+ numOtherPrimeFactors := len(otherPrimesInfo)
+ if numOtherPrimeFactors == 0 {
+ return nil, errors.New("JWK RSA Privake Key: Invalid other primes info: must be absent or non-empty")
+ }
+ otherPrimeFactors := make([]*big.Int, numOtherPrimeFactors)
+ productOfPrimes := new(big.Int).Mul(firstPrimeFactor, secondPrimeFactor)
+ crtValues := make([]rsa.CRTValue, numOtherPrimeFactors)
+
+ for i, val := range otherPrimesInfo {
+ otherPrimeinfo, ok := val.(map[string]interface{})
+ if !ok {
+ return nil, errors.New("JWK RSA Private Key: Invalid other prime info: must be a JSON object")
+ }
+
+ otherPrimeFactor, err := parseRSAPrivateKeyParamFromMap(otherPrimeinfo, "r")
+ if err != nil {
+ return nil, fmt.Errorf("JWK RSA Private Key prime factor: %s", err)
+ }
+ otherFactorCRT, err := parseRSAPrivateKeyParamFromMap(otherPrimeinfo, "d")
+ if err != nil {
+ return nil, fmt.Errorf("JWK RSA Private Key CRT exponent: %s", err)
+ }
+ otherCrtCoeff, err := parseRSAPrivateKeyParamFromMap(otherPrimeinfo, "t")
+ if err != nil {
+ return nil, fmt.Errorf("JWK RSA Private Key CRT coefficient: %s", err)
+ }
+
+ crtValue := crtValues[i]
+ crtValue.Exp = otherFactorCRT
+ crtValue.Coeff = otherCrtCoeff
+ crtValue.R = productOfPrimes
+ otherPrimeFactors[i] = otherPrimeFactor
+ productOfPrimes = new(big.Int).Mul(productOfPrimes, otherPrimeFactor)
+ }
+
+ privateKey.Primes = append(privateKey.Primes, otherPrimeFactors...)
+ privateKey.Precomputed.CRTValues = crtValues
+ }
+
+ key := &rsaPrivateKey{
+ rsaPublicKey: *publicKey,
+ PrivateKey: privateKey,
+ }
+
+ return key, nil
+}
+
+/*
+ * Key Generation Functions.
+ */
+
+func generateRSAPrivateKey(bits int) (k *rsaPrivateKey, err error) {
+ k = new(rsaPrivateKey)
+ k.PrivateKey, err = rsa.GenerateKey(rand.Reader, bits)
+ if err != nil {
+ return nil, err
+ }
+
+ k.rsaPublicKey.PublicKey = &k.PrivateKey.PublicKey
+ k.extended = make(map[string]interface{})
+
+ return
+}
+
+// GenerateRSA2048PrivateKey generates a key pair using 2048-bit RSA.
+func GenerateRSA2048PrivateKey() (PrivateKey, error) {
+ k, err := generateRSAPrivateKey(2048)
+ if err != nil {
+ return nil, fmt.Errorf("error generating RSA 2048-bit key: %s", err)
+ }
+
+ return k, nil
+}
+
+// GenerateRSA3072PrivateKey generates a key pair using 3072-bit RSA.
+func GenerateRSA3072PrivateKey() (PrivateKey, error) {
+ k, err := generateRSAPrivateKey(3072)
+ if err != nil {
+ return nil, fmt.Errorf("error generating RSA 3072-bit key: %s", err)
+ }
+
+ return k, nil
+}
+
+// GenerateRSA4096PrivateKey generates a key pair using 4096-bit RSA.
+func GenerateRSA4096PrivateKey() (PrivateKey, error) {
+ k, err := generateRSAPrivateKey(4096)
+ if err != nil {
+ return nil, fmt.Errorf("error generating RSA 4096-bit key: %s", err)
+ }
+
+ return k, nil
+}
diff --git a/unum/vendor/github.com/docker/libtrust/util.go b/unum/vendor/github.com/docker/libtrust/util.go
new file mode 100644
index 0000000..d88176c
--- /dev/null
+++ b/unum/vendor/github.com/docker/libtrust/util.go
@@ -0,0 +1,363 @@
+package libtrust
+
+import (
+ "bytes"
+ "crypto"
+ "crypto/elliptic"
+ "crypto/tls"
+ "crypto/x509"
+ "encoding/base32"
+ "encoding/base64"
+ "encoding/binary"
+ "encoding/pem"
+ "errors"
+ "fmt"
+ "math/big"
+ "net/url"
+ "os"
+ "path/filepath"
+ "strings"
+ "time"
+)
+
+// LoadOrCreateTrustKey will load a PrivateKey from the specified path
+func LoadOrCreateTrustKey(trustKeyPath string) (PrivateKey, error) {
+ if err := os.MkdirAll(filepath.Dir(trustKeyPath), 0700); err != nil {
+ return nil, err
+ }
+
+ trustKey, err := LoadKeyFile(trustKeyPath)
+ if err == ErrKeyFileDoesNotExist {
+ trustKey, err = GenerateECP256PrivateKey()
+ if err != nil {
+ return nil, fmt.Errorf("error generating key: %s", err)
+ }
+
+ if err := SaveKey(trustKeyPath, trustKey); err != nil {
+ return nil, fmt.Errorf("error saving key file: %s", err)
+ }
+
+ dir, file := filepath.Split(trustKeyPath)
+ if err := SavePublicKey(filepath.Join(dir, "public-"+file), trustKey.PublicKey()); err != nil {
+ return nil, fmt.Errorf("error saving public key file: %s", err)
+ }
+ } else if err != nil {
+ return nil, fmt.Errorf("error loading key file: %s", err)
+ }
+ return trustKey, nil
+}
+
+// NewIdentityAuthTLSClientConfig returns a tls.Config configured to use identity
+// based authentication from the specified dockerUrl, the rootConfigPath and
+// the server name to which it is connecting.
+// If trustUnknownHosts is true it will automatically add the host to the
+// known-hosts.json in rootConfigPath.
+func NewIdentityAuthTLSClientConfig(dockerUrl string, trustUnknownHosts bool, rootConfigPath string, serverName string) (*tls.Config, error) {
+ tlsConfig := newTLSConfig()
+
+ trustKeyPath := filepath.Join(rootConfigPath, "key.json")
+ knownHostsPath := filepath.Join(rootConfigPath, "known-hosts.json")
+
+ u, err := url.Parse(dockerUrl)
+ if err != nil {
+ return nil, fmt.Errorf("unable to parse machine url")
+ }
+
+ if u.Scheme == "unix" {
+ return nil, nil
+ }
+
+ addr := u.Host
+ proto := "tcp"
+
+ trustKey, err := LoadOrCreateTrustKey(trustKeyPath)
+ if err != nil {
+ return nil, fmt.Errorf("unable to load trust key: %s", err)
+ }
+
+ knownHosts, err := LoadKeySetFile(knownHostsPath)
+ if err != nil {
+ return nil, fmt.Errorf("could not load trusted hosts file: %s", err)
+ }
+
+ allowedHosts, err := FilterByHosts(knownHosts, addr, false)
+ if err != nil {
+ return nil, fmt.Errorf("error filtering hosts: %s", err)
+ }
+
+ certPool, err := GenerateCACertPool(trustKey, allowedHosts)
+ if err != nil {
+ return nil, fmt.Errorf("Could not create CA pool: %s", err)
+ }
+
+ tlsConfig.ServerName = serverName
+ tlsConfig.RootCAs = certPool
+
+ x509Cert, err := GenerateSelfSignedClientCert(trustKey)
+ if err != nil {
+ return nil, fmt.Errorf("certificate generation error: %s", err)
+ }
+
+ tlsConfig.Certificates = []tls.Certificate{{
+ Certificate: [][]byte{x509Cert.Raw},
+ PrivateKey: trustKey.CryptoPrivateKey(),
+ Leaf: x509Cert,
+ }}
+
+ tlsConfig.InsecureSkipVerify = true
+
+ testConn, err := tls.Dial(proto, addr, tlsConfig)
+ if err != nil {
+ return nil, fmt.Errorf("tls Handshake error: %s", err)
+ }
+
+ opts := x509.VerifyOptions{
+ Roots: tlsConfig.RootCAs,
+ CurrentTime: time.Now(),
+ DNSName: tlsConfig.ServerName,
+ Intermediates: x509.NewCertPool(),
+ }
+
+ certs := testConn.ConnectionState().PeerCertificates
+ for i, cert := range certs {
+ if i == 0 {
+ continue
+ }
+ opts.Intermediates.AddCert(cert)
+ }
+
+ if _, err := certs[0].Verify(opts); err != nil {
+ if _, ok := err.(x509.UnknownAuthorityError); ok {
+ if trustUnknownHosts {
+ pubKey, err := FromCryptoPublicKey(certs[0].PublicKey)
+ if err != nil {
+ return nil, fmt.Errorf("error extracting public key from cert: %s", err)
+ }
+
+ pubKey.AddExtendedField("hosts", []string{addr})
+
+ if err := AddKeySetFile(knownHostsPath, pubKey); err != nil {
+ return nil, fmt.Errorf("error adding machine to known hosts: %s", err)
+ }
+ } else {
+ return nil, fmt.Errorf("unable to connect. unknown host: %s", addr)
+ }
+ }
+ }
+
+ testConn.Close()
+ tlsConfig.InsecureSkipVerify = false
+
+ return tlsConfig, nil
+}
+
+// joseBase64UrlEncode encodes the given data using the standard base64 url
+// encoding format but with all trailing '=' characters ommitted in accordance
+// with the jose specification.
+// http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-2
+func joseBase64UrlEncode(b []byte) string {
+ return strings.TrimRight(base64.URLEncoding.EncodeToString(b), "=")
+}
+
+// joseBase64UrlDecode decodes the given string using the standard base64 url
+// decoder but first adds the appropriate number of trailing '=' characters in
+// accordance with the jose specification.
+// http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-2
+func joseBase64UrlDecode(s string) ([]byte, error) {
+ s = strings.Replace(s, "\n", "", -1)
+ s = strings.Replace(s, " ", "", -1)
+ switch len(s) % 4 {
+ case 0:
+ case 2:
+ s += "=="
+ case 3:
+ s += "="
+ default:
+ return nil, errors.New("illegal base64url string")
+ }
+ return base64.URLEncoding.DecodeString(s)
+}
+
+func keyIDEncode(b []byte) string {
+ s := strings.TrimRight(base32.StdEncoding.EncodeToString(b), "=")
+ var buf bytes.Buffer
+ var i int
+ for i = 0; i < len(s)/4-1; i++ {
+ start := i * 4
+ end := start + 4
+ buf.WriteString(s[start:end] + ":")
+ }
+ buf.WriteString(s[i*4:])
+ return buf.String()
+}
+
+func keyIDFromCryptoKey(pubKey PublicKey) string {
+ // Generate and return a 'libtrust' fingerprint of the public key.
+ // For an RSA key this should be:
+ // SHA256(DER encoded ASN1)
+ // Then truncated to 240 bits and encoded into 12 base32 groups like so:
+ // ABCD:EFGH:IJKL:MNOP:QRST:UVWX:YZ23:4567:ABCD:EFGH:IJKL:MNOP
+ derBytes, err := x509.MarshalPKIXPublicKey(pubKey.CryptoPublicKey())
+ if err != nil {
+ return ""
+ }
+ hasher := crypto.SHA256.New()
+ hasher.Write(derBytes)
+ return keyIDEncode(hasher.Sum(nil)[:30])
+}
+
+func stringFromMap(m map[string]interface{}, key string) (string, error) {
+ val, ok := m[key]
+ if !ok {
+ return "", fmt.Errorf("%q value not specified", key)
+ }
+
+ str, ok := val.(string)
+ if !ok {
+ return "", fmt.Errorf("%q value must be a string", key)
+ }
+ delete(m, key)
+
+ return str, nil
+}
+
+func parseECCoordinate(cB64Url string, curve elliptic.Curve) (*big.Int, error) {
+ curveByteLen := (curve.Params().BitSize + 7) >> 3
+
+ cBytes, err := joseBase64UrlDecode(cB64Url)
+ if err != nil {
+ return nil, fmt.Errorf("invalid base64 URL encoding: %s", err)
+ }
+ cByteLength := len(cBytes)
+ if cByteLength != curveByteLen {
+ return nil, fmt.Errorf("invalid number of octets: got %d, should be %d", cByteLength, curveByteLen)
+ }
+ return new(big.Int).SetBytes(cBytes), nil
+}
+
+func parseECPrivateParam(dB64Url string, curve elliptic.Curve) (*big.Int, error) {
+ dBytes, err := joseBase64UrlDecode(dB64Url)
+ if err != nil {
+ return nil, fmt.Errorf("invalid base64 URL encoding: %s", err)
+ }
+
+ // The length of this octet string MUST be ceiling(log-base-2(n)/8)
+ // octets (where n is the order of the curve). This is because the private
+ // key d must be in the interval [1, n-1] so the bitlength of d should be
+ // no larger than the bitlength of n-1. The easiest way to find the octet
+ // length is to take bitlength(n-1), add 7 to force a carry, and shift this
+ // bit sequence right by 3, which is essentially dividing by 8 and adding
+ // 1 if there is any remainder. Thus, the private key value d should be
+ // output to (bitlength(n-1)+7)>>3 octets.
+ n := curve.Params().N
+ octetLength := (new(big.Int).Sub(n, big.NewInt(1)).BitLen() + 7) >> 3
+ dByteLength := len(dBytes)
+
+ if dByteLength != octetLength {
+ return nil, fmt.Errorf("invalid number of octets: got %d, should be %d", dByteLength, octetLength)
+ }
+
+ return new(big.Int).SetBytes(dBytes), nil
+}
+
+func parseRSAModulusParam(nB64Url string) (*big.Int, error) {
+ nBytes, err := joseBase64UrlDecode(nB64Url)
+ if err != nil {
+ return nil, fmt.Errorf("invalid base64 URL encoding: %s", err)
+ }
+
+ return new(big.Int).SetBytes(nBytes), nil
+}
+
+func serializeRSAPublicExponentParam(e int) []byte {
+ // We MUST use the minimum number of octets to represent E.
+ // E is supposed to be 65537 for performance and security reasons
+ // and is what golang's rsa package generates, but it might be
+ // different if imported from some other generator.
+ buf := make([]byte, 4)
+ binary.BigEndian.PutUint32(buf, uint32(e))
+ var i int
+ for i = 0; i < 8; i++ {
+ if buf[i] != 0 {
+ break
+ }
+ }
+ return buf[i:]
+}
+
+func parseRSAPublicExponentParam(eB64Url string) (int, error) {
+ eBytes, err := joseBase64UrlDecode(eB64Url)
+ if err != nil {
+ return 0, fmt.Errorf("invalid base64 URL encoding: %s", err)
+ }
+ // Only the minimum number of bytes were used to represent E, but
+ // binary.BigEndian.Uint32 expects at least 4 bytes, so we need
+ // to add zero padding if necassary.
+ byteLen := len(eBytes)
+ buf := make([]byte, 4-byteLen, 4)
+ eBytes = append(buf, eBytes...)
+
+ return int(binary.BigEndian.Uint32(eBytes)), nil
+}
+
+func parseRSAPrivateKeyParamFromMap(m map[string]interface{}, key string) (*big.Int, error) {
+ b64Url, err := stringFromMap(m, key)
+ if err != nil {
+ return nil, err
+ }
+
+ paramBytes, err := joseBase64UrlDecode(b64Url)
+ if err != nil {
+ return nil, fmt.Errorf("invaled base64 URL encoding: %s", err)
+ }
+
+ return new(big.Int).SetBytes(paramBytes), nil
+}
+
+func createPemBlock(name string, derBytes []byte, headers map[string]interface{}) (*pem.Block, error) {
+ pemBlock := &pem.Block{Type: name, Bytes: derBytes, Headers: map[string]string{}}
+ for k, v := range headers {
+ switch val := v.(type) {
+ case string:
+ pemBlock.Headers[k] = val
+ case []string:
+ if k == "hosts" {
+ pemBlock.Headers[k] = strings.Join(val, ",")
+ } else {
+ // Return error, non-encodable type
+ }
+ default:
+ // Return error, non-encodable type
+ }
+ }
+
+ return pemBlock, nil
+}
+
+func pubKeyFromPEMBlock(pemBlock *pem.Block) (PublicKey, error) {
+ cryptoPublicKey, err := x509.ParsePKIXPublicKey(pemBlock.Bytes)
+ if err != nil {
+ return nil, fmt.Errorf("unable to decode Public Key PEM data: %s", err)
+ }
+
+ pubKey, err := FromCryptoPublicKey(cryptoPublicKey)
+ if err != nil {
+ return nil, err
+ }
+
+ addPEMHeadersToKey(pemBlock, pubKey)
+
+ return pubKey, nil
+}
+
+func addPEMHeadersToKey(pemBlock *pem.Block, pubKey PublicKey) {
+ for key, value := range pemBlock.Headers {
+ var safeVal interface{}
+ if key == "hosts" {
+ safeVal = strings.Split(value, ",")
+ } else {
+ safeVal = value
+ }
+ pubKey.AddExtendedField(key, safeVal)
+ }
+}