diff options
Diffstat (limited to 'vendor/github.com/fsouza/go-dockerclient/external/github.com/docker')
91 files changed, 7339 insertions, 0 deletions
diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/envfile.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/envfile.go new file mode 100644 index 0000000..ba8b4f2 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/envfile.go | |||
@@ -0,0 +1,67 @@ | |||
1 | package opts | ||
2 | |||
3 | import ( | ||
4 | "bufio" | ||
5 | "fmt" | ||
6 | "os" | ||
7 | "strings" | ||
8 | ) | ||
9 | |||
10 | // ParseEnvFile reads a file with environment variables enumerated by lines | ||
11 | // | ||
12 | // ``Environment variable names used by the utilities in the Shell and | ||
13 | // Utilities volume of IEEE Std 1003.1-2001 consist solely of uppercase | ||
14 | // letters, digits, and the '_' (underscore) from the characters defined in | ||
15 | // Portable Character Set and do not begin with a digit. *But*, other | ||
16 | // characters may be permitted by an implementation; applications shall | ||
17 | // tolerate the presence of such names.'' | ||
18 | // -- http://pubs.opengroup.org/onlinepubs/009695399/basedefs/xbd_chap08.html | ||
19 | // | ||
20 | // As of #16585, it's up to application inside docker to validate or not | ||
21 | // environment variables, that's why we just strip leading whitespace and | ||
22 | // nothing more. | ||
23 | func ParseEnvFile(filename string) ([]string, error) { | ||
24 | fh, err := os.Open(filename) | ||
25 | if err != nil { | ||
26 | return []string{}, err | ||
27 | } | ||
28 | defer fh.Close() | ||
29 | |||
30 | lines := []string{} | ||
31 | scanner := bufio.NewScanner(fh) | ||
32 | for scanner.Scan() { | ||
33 | // trim the line from all leading whitespace first | ||
34 | line := strings.TrimLeft(scanner.Text(), whiteSpaces) | ||
35 | // line is not empty, and not starting with '#' | ||
36 | if len(line) > 0 && !strings.HasPrefix(line, "#") { | ||
37 | data := strings.SplitN(line, "=", 2) | ||
38 | |||
39 | // trim the front of a variable, but nothing else | ||
40 | variable := strings.TrimLeft(data[0], whiteSpaces) | ||
41 | if strings.ContainsAny(variable, whiteSpaces) { | ||
42 | return []string{}, ErrBadEnvVariable{fmt.Sprintf("variable '%s' has white spaces", variable)} | ||
43 | } | ||
44 | |||
45 | if len(data) > 1 { | ||
46 | |||
47 | // pass the value through, no trimming | ||
48 | lines = append(lines, fmt.Sprintf("%s=%s", variable, data[1])) | ||
49 | } else { | ||
50 | // if only a pass-through variable is given, clean it up. | ||
51 | lines = append(lines, fmt.Sprintf("%s=%s", strings.TrimSpace(line), os.Getenv(line))) | ||
52 | } | ||
53 | } | ||
54 | } | ||
55 | return lines, scanner.Err() | ||
56 | } | ||
57 | |||
58 | var whiteSpaces = " \t" | ||
59 | |||
60 | // ErrBadEnvVariable typed error for bad environment variable | ||
61 | type ErrBadEnvVariable struct { | ||
62 | msg string | ||
63 | } | ||
64 | |||
65 | func (e ErrBadEnvVariable) Error() string { | ||
66 | return fmt.Sprintf("poorly formatted environment: %s", e.msg) | ||
67 | } | ||
diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/hosts.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/hosts.go new file mode 100644 index 0000000..d1b6985 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/hosts.go | |||
@@ -0,0 +1,146 @@ | |||
1 | package opts | ||
2 | |||
3 | import ( | ||
4 | "fmt" | ||
5 | "net" | ||
6 | "net/url" | ||
7 | "runtime" | ||
8 | "strconv" | ||
9 | "strings" | ||
10 | ) | ||
11 | |||
12 | var ( | ||
13 | // DefaultHTTPPort Default HTTP Port used if only the protocol is provided to -H flag e.g. docker daemon -H tcp:// | ||
14 | // TODO Windows. DefaultHTTPPort is only used on Windows if a -H parameter | ||
15 | // is not supplied. A better longer term solution would be to use a named | ||
16 | // pipe as the default on the Windows daemon. | ||
17 | // These are the IANA registered port numbers for use with Docker | ||
18 | // see http://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.xhtml?search=docker | ||
19 | DefaultHTTPPort = 2375 // Default HTTP Port | ||
20 | // DefaultTLSHTTPPort Default HTTP Port used when TLS enabled | ||
21 | DefaultTLSHTTPPort = 2376 // Default TLS encrypted HTTP Port | ||
22 | // DefaultUnixSocket Path for the unix socket. | ||
23 | // Docker daemon by default always listens on the default unix socket | ||
24 | DefaultUnixSocket = "/var/run/docker.sock" | ||
25 | // DefaultTCPHost constant defines the default host string used by docker on Windows | ||
26 | DefaultTCPHost = fmt.Sprintf("tcp://%s:%d", DefaultHTTPHost, DefaultHTTPPort) | ||
27 | // DefaultTLSHost constant defines the default host string used by docker for TLS sockets | ||
28 | DefaultTLSHost = fmt.Sprintf("tcp://%s:%d", DefaultHTTPHost, DefaultTLSHTTPPort) | ||
29 | ) | ||
30 | |||
31 | // ValidateHost validates that the specified string is a valid host and returns it. | ||
32 | func ValidateHost(val string) (string, error) { | ||
33 | _, err := parseDockerDaemonHost(DefaultTCPHost, DefaultTLSHost, DefaultUnixSocket, "", val) | ||
34 | if err != nil { | ||
35 | return val, err | ||
36 | } | ||
37 | // Note: unlike most flag validators, we don't return the mutated value here | ||
38 | // we need to know what the user entered later (using ParseHost) to adjust for tls | ||
39 | return val, nil | ||
40 | } | ||
41 | |||
42 | // ParseHost and set defaults for a Daemon host string | ||
43 | func ParseHost(defaultHost, val string) (string, error) { | ||
44 | host, err := parseDockerDaemonHost(DefaultTCPHost, DefaultTLSHost, DefaultUnixSocket, defaultHost, val) | ||
45 | if err != nil { | ||
46 | return val, err | ||
47 | } | ||
48 | return host, nil | ||
49 | } | ||
50 | |||
51 | // parseDockerDaemonHost parses the specified address and returns an address that will be used as the host. | ||
52 | // Depending of the address specified, will use the defaultTCPAddr or defaultUnixAddr | ||
53 | // defaultUnixAddr must be a absolute file path (no `unix://` prefix) | ||
54 | // defaultTCPAddr must be the full `tcp://host:port` form | ||
55 | func parseDockerDaemonHost(defaultTCPAddr, defaultTLSHost, defaultUnixAddr, defaultAddr, addr string) (string, error) { | ||
56 | addr = strings.TrimSpace(addr) | ||
57 | if addr == "" { | ||
58 | if defaultAddr == defaultTLSHost { | ||
59 | return defaultTLSHost, nil | ||
60 | } | ||
61 | if runtime.GOOS != "windows" { | ||
62 | return fmt.Sprintf("unix://%s", defaultUnixAddr), nil | ||
63 | } | ||
64 | return defaultTCPAddr, nil | ||
65 | } | ||
66 | addrParts := strings.Split(addr, "://") | ||
67 | if len(addrParts) == 1 { | ||
68 | addrParts = []string{"tcp", addrParts[0]} | ||
69 | } | ||
70 | |||
71 | switch addrParts[0] { | ||
72 | case "tcp": | ||
73 | return parseTCPAddr(addrParts[1], defaultTCPAddr) | ||
74 | case "unix": | ||
75 | return parseUnixAddr(addrParts[1], defaultUnixAddr) | ||
76 | case "fd": | ||
77 | return addr, nil | ||
78 | default: | ||
79 | return "", fmt.Errorf("Invalid bind address format: %s", addr) | ||
80 | } | ||
81 | } | ||
82 | |||
83 | // parseUnixAddr parses and validates that the specified address is a valid UNIX | ||
84 | // socket address. It returns a formatted UNIX socket address, either using the | ||
85 | // address parsed from addr, or the contents of defaultAddr if addr is a blank | ||
86 | // string. | ||
87 | func parseUnixAddr(addr string, defaultAddr string) (string, error) { | ||
88 | addr = strings.TrimPrefix(addr, "unix://") | ||
89 | if strings.Contains(addr, "://") { | ||
90 | return "", fmt.Errorf("Invalid proto, expected unix: %s", addr) | ||
91 | } | ||
92 | if addr == "" { | ||
93 | addr = defaultAddr | ||
94 | } | ||
95 | return fmt.Sprintf("unix://%s", addr), nil | ||
96 | } | ||
97 | |||
98 | // parseTCPAddr parses and validates that the specified address is a valid TCP | ||
99 | // address. It returns a formatted TCP address, either using the address parsed | ||
100 | // from tryAddr, or the contents of defaultAddr if tryAddr is a blank string. | ||
101 | // tryAddr is expected to have already been Trim()'d | ||
102 | // defaultAddr must be in the full `tcp://host:port` form | ||
103 | func parseTCPAddr(tryAddr string, defaultAddr string) (string, error) { | ||
104 | if tryAddr == "" || tryAddr == "tcp://" { | ||
105 | return defaultAddr, nil | ||
106 | } | ||
107 | addr := strings.TrimPrefix(tryAddr, "tcp://") | ||
108 | if strings.Contains(addr, "://") || addr == "" { | ||
109 | return "", fmt.Errorf("Invalid proto, expected tcp: %s", tryAddr) | ||
110 | } | ||
111 | |||
112 | defaultAddr = strings.TrimPrefix(defaultAddr, "tcp://") | ||
113 | defaultHost, defaultPort, err := net.SplitHostPort(defaultAddr) | ||
114 | if err != nil { | ||
115 | return "", err | ||
116 | } | ||
117 | // url.Parse fails for trailing colon on IPv6 brackets on Go 1.5, but | ||
118 | // not 1.4. See https://github.com/golang/go/issues/12200 and | ||
119 | // https://github.com/golang/go/issues/6530. | ||
120 | if strings.HasSuffix(addr, "]:") { | ||
121 | addr += defaultPort | ||
122 | } | ||
123 | |||
124 | u, err := url.Parse("tcp://" + addr) | ||
125 | if err != nil { | ||
126 | return "", err | ||
127 | } | ||
128 | |||
129 | host, port, err := net.SplitHostPort(u.Host) | ||
130 | if err != nil { | ||
131 | return "", fmt.Errorf("Invalid bind address format: %s", tryAddr) | ||
132 | } | ||
133 | |||
134 | if host == "" { | ||
135 | host = defaultHost | ||
136 | } | ||
137 | if port == "" { | ||
138 | port = defaultPort | ||
139 | } | ||
140 | p, err := strconv.Atoi(port) | ||
141 | if err != nil && p == 0 { | ||
142 | return "", fmt.Errorf("Invalid bind address format: %s", tryAddr) | ||
143 | } | ||
144 | |||
145 | return fmt.Sprintf("tcp://%s%s", net.JoinHostPort(host, port), u.Path), nil | ||
146 | } | ||
diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/hosts_unix.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/hosts_unix.go new file mode 100644 index 0000000..611407a --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/hosts_unix.go | |||
@@ -0,0 +1,8 @@ | |||
1 | // +build !windows | ||
2 | |||
3 | package opts | ||
4 | |||
5 | import "fmt" | ||
6 | |||
7 | // DefaultHost constant defines the default host string used by docker on other hosts than Windows | ||
8 | var DefaultHost = fmt.Sprintf("unix://%s", DefaultUnixSocket) | ||
diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/hosts_windows.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/hosts_windows.go new file mode 100644 index 0000000..ec52e9a --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/hosts_windows.go | |||
@@ -0,0 +1,6 @@ | |||
1 | // +build windows | ||
2 | |||
3 | package opts | ||
4 | |||
5 | // DefaultHost constant defines the default host string used by docker on Windows | ||
6 | var DefaultHost = DefaultTCPHost | ||
diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/ip.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/ip.go new file mode 100644 index 0000000..c7b0dc9 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/ip.go | |||
@@ -0,0 +1,42 @@ | |||
1 | package opts | ||
2 | |||
3 | import ( | ||
4 | "fmt" | ||
5 | "net" | ||
6 | ) | ||
7 | |||
8 | // IPOpt holds an IP. It is used to store values from CLI flags. | ||
9 | type IPOpt struct { | ||
10 | *net.IP | ||
11 | } | ||
12 | |||
13 | // NewIPOpt creates a new IPOpt from a reference net.IP and a | ||
14 | // string representation of an IP. If the string is not a valid | ||
15 | // IP it will fallback to the specified reference. | ||
16 | func NewIPOpt(ref *net.IP, defaultVal string) *IPOpt { | ||
17 | o := &IPOpt{ | ||
18 | IP: ref, | ||
19 | } | ||
20 | o.Set(defaultVal) | ||
21 | return o | ||
22 | } | ||
23 | |||
24 | // Set sets an IPv4 or IPv6 address from a given string. If the given | ||
25 | // string is not parseable as an IP address it returns an error. | ||
26 | func (o *IPOpt) Set(val string) error { | ||
27 | ip := net.ParseIP(val) | ||
28 | if ip == nil { | ||
29 | return fmt.Errorf("%s is not an ip address", val) | ||
30 | } | ||
31 | *o.IP = ip | ||
32 | return nil | ||
33 | } | ||
34 | |||
35 | // String returns the IP address stored in the IPOpt. If stored IP is a | ||
36 | // nil pointer, it returns an empty string. | ||
37 | func (o *IPOpt) String() string { | ||
38 | if *o.IP == nil { | ||
39 | return "" | ||
40 | } | ||
41 | return o.IP.String() | ||
42 | } | ||
diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/opts.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/opts.go new file mode 100644 index 0000000..b244f5a --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/opts.go | |||
@@ -0,0 +1,252 @@ | |||
1 | package opts | ||
2 | |||
3 | import ( | ||
4 | "fmt" | ||
5 | "net" | ||
6 | "os" | ||
7 | "regexp" | ||
8 | "strings" | ||
9 | ) | ||
10 | |||
11 | var ( | ||
12 | alphaRegexp = regexp.MustCompile(`[a-zA-Z]`) | ||
13 | domainRegexp = regexp.MustCompile(`^(:?(:?[a-zA-Z0-9]|(:?[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9]))(:?\.(:?[a-zA-Z0-9]|(:?[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])))*)\.?\s*$`) | ||
14 | ) | ||
15 | |||
16 | // ListOpts holds a list of values and a validation function. | ||
17 | type ListOpts struct { | ||
18 | values *[]string | ||
19 | validator ValidatorFctType | ||
20 | } | ||
21 | |||
22 | // NewListOpts creates a new ListOpts with the specified validator. | ||
23 | func NewListOpts(validator ValidatorFctType) ListOpts { | ||
24 | var values []string | ||
25 | return *NewListOptsRef(&values, validator) | ||
26 | } | ||
27 | |||
28 | // NewListOptsRef creates a new ListOpts with the specified values and validator. | ||
29 | func NewListOptsRef(values *[]string, validator ValidatorFctType) *ListOpts { | ||
30 | return &ListOpts{ | ||
31 | values: values, | ||
32 | validator: validator, | ||
33 | } | ||
34 | } | ||
35 | |||
36 | func (opts *ListOpts) String() string { | ||
37 | return fmt.Sprintf("%v", []string((*opts.values))) | ||
38 | } | ||
39 | |||
40 | // Set validates if needed the input value and add it to the | ||
41 | // internal slice. | ||
42 | func (opts *ListOpts) Set(value string) error { | ||
43 | if opts.validator != nil { | ||
44 | v, err := opts.validator(value) | ||
45 | if err != nil { | ||
46 | return err | ||
47 | } | ||
48 | value = v | ||
49 | } | ||
50 | (*opts.values) = append((*opts.values), value) | ||
51 | return nil | ||
52 | } | ||
53 | |||
54 | // Delete removes the specified element from the slice. | ||
55 | func (opts *ListOpts) Delete(key string) { | ||
56 | for i, k := range *opts.values { | ||
57 | if k == key { | ||
58 | (*opts.values) = append((*opts.values)[:i], (*opts.values)[i+1:]...) | ||
59 | return | ||
60 | } | ||
61 | } | ||
62 | } | ||
63 | |||
64 | // GetMap returns the content of values in a map in order to avoid | ||
65 | // duplicates. | ||
66 | func (opts *ListOpts) GetMap() map[string]struct{} { | ||
67 | ret := make(map[string]struct{}) | ||
68 | for _, k := range *opts.values { | ||
69 | ret[k] = struct{}{} | ||
70 | } | ||
71 | return ret | ||
72 | } | ||
73 | |||
74 | // GetAll returns the values of slice. | ||
75 | func (opts *ListOpts) GetAll() []string { | ||
76 | return (*opts.values) | ||
77 | } | ||
78 | |||
79 | // GetAllOrEmpty returns the values of the slice | ||
80 | // or an empty slice when there are no values. | ||
81 | func (opts *ListOpts) GetAllOrEmpty() []string { | ||
82 | v := *opts.values | ||
83 | if v == nil { | ||
84 | return make([]string, 0) | ||
85 | } | ||
86 | return v | ||
87 | } | ||
88 | |||
89 | // Get checks the existence of the specified key. | ||
90 | func (opts *ListOpts) Get(key string) bool { | ||
91 | for _, k := range *opts.values { | ||
92 | if k == key { | ||
93 | return true | ||
94 | } | ||
95 | } | ||
96 | return false | ||
97 | } | ||
98 | |||
99 | // Len returns the amount of element in the slice. | ||
100 | func (opts *ListOpts) Len() int { | ||
101 | return len((*opts.values)) | ||
102 | } | ||
103 | |||
104 | //MapOpts holds a map of values and a validation function. | ||
105 | type MapOpts struct { | ||
106 | values map[string]string | ||
107 | validator ValidatorFctType | ||
108 | } | ||
109 | |||
110 | // Set validates if needed the input value and add it to the | ||
111 | // internal map, by splitting on '='. | ||
112 | func (opts *MapOpts) Set(value string) error { | ||
113 | if opts.validator != nil { | ||
114 | v, err := opts.validator(value) | ||
115 | if err != nil { | ||
116 | return err | ||
117 | } | ||
118 | value = v | ||
119 | } | ||
120 | vals := strings.SplitN(value, "=", 2) | ||
121 | if len(vals) == 1 { | ||
122 | (opts.values)[vals[0]] = "" | ||
123 | } else { | ||
124 | (opts.values)[vals[0]] = vals[1] | ||
125 | } | ||
126 | return nil | ||
127 | } | ||
128 | |||
129 | // GetAll returns the values of MapOpts as a map. | ||
130 | func (opts *MapOpts) GetAll() map[string]string { | ||
131 | return opts.values | ||
132 | } | ||
133 | |||
134 | func (opts *MapOpts) String() string { | ||
135 | return fmt.Sprintf("%v", map[string]string((opts.values))) | ||
136 | } | ||
137 | |||
138 | // NewMapOpts creates a new MapOpts with the specified map of values and a validator. | ||
139 | func NewMapOpts(values map[string]string, validator ValidatorFctType) *MapOpts { | ||
140 | if values == nil { | ||
141 | values = make(map[string]string) | ||
142 | } | ||
143 | return &MapOpts{ | ||
144 | values: values, | ||
145 | validator: validator, | ||
146 | } | ||
147 | } | ||
148 | |||
149 | // ValidatorFctType defines a validator function that returns a validated string and/or an error. | ||
150 | type ValidatorFctType func(val string) (string, error) | ||
151 | |||
152 | // ValidatorFctListType defines a validator function that returns a validated list of string and/or an error | ||
153 | type ValidatorFctListType func(val string) ([]string, error) | ||
154 | |||
155 | // ValidateAttach validates that the specified string is a valid attach option. | ||
156 | func ValidateAttach(val string) (string, error) { | ||
157 | s := strings.ToLower(val) | ||
158 | for _, str := range []string{"stdin", "stdout", "stderr"} { | ||
159 | if s == str { | ||
160 | return s, nil | ||
161 | } | ||
162 | } | ||
163 | return val, fmt.Errorf("valid streams are STDIN, STDOUT and STDERR") | ||
164 | } | ||
165 | |||
166 | // ValidateEnv validates an environment variable and returns it. | ||
167 | // If no value is specified, it returns the current value using os.Getenv. | ||
168 | // | ||
169 | // As on ParseEnvFile and related to #16585, environment variable names | ||
170 | // are not validate what so ever, it's up to application inside docker | ||
171 | // to validate them or not. | ||
172 | func ValidateEnv(val string) (string, error) { | ||
173 | arr := strings.Split(val, "=") | ||
174 | if len(arr) > 1 { | ||
175 | return val, nil | ||
176 | } | ||
177 | if !doesEnvExist(val) { | ||
178 | return val, nil | ||
179 | } | ||
180 | return fmt.Sprintf("%s=%s", val, os.Getenv(val)), nil | ||
181 | } | ||
182 | |||
183 | // ValidateIPAddress validates an Ip address. | ||
184 | func ValidateIPAddress(val string) (string, error) { | ||
185 | var ip = net.ParseIP(strings.TrimSpace(val)) | ||
186 | if ip != nil { | ||
187 | return ip.String(), nil | ||
188 | } | ||
189 | return "", fmt.Errorf("%s is not an ip address", val) | ||
190 | } | ||
191 | |||
192 | // ValidateMACAddress validates a MAC address. | ||
193 | func ValidateMACAddress(val string) (string, error) { | ||
194 | _, err := net.ParseMAC(strings.TrimSpace(val)) | ||
195 | if err != nil { | ||
196 | return "", err | ||
197 | } | ||
198 | return val, nil | ||
199 | } | ||
200 | |||
201 | // ValidateDNSSearch validates domain for resolvconf search configuration. | ||
202 | // A zero length domain is represented by a dot (.). | ||
203 | func ValidateDNSSearch(val string) (string, error) { | ||
204 | if val = strings.Trim(val, " "); val == "." { | ||
205 | return val, nil | ||
206 | } | ||
207 | return validateDomain(val) | ||
208 | } | ||
209 | |||
210 | func validateDomain(val string) (string, error) { | ||
211 | if alphaRegexp.FindString(val) == "" { | ||
212 | return "", fmt.Errorf("%s is not a valid domain", val) | ||
213 | } | ||
214 | ns := domainRegexp.FindSubmatch([]byte(val)) | ||
215 | if len(ns) > 0 && len(ns[1]) < 255 { | ||
216 | return string(ns[1]), nil | ||
217 | } | ||
218 | return "", fmt.Errorf("%s is not a valid domain", val) | ||
219 | } | ||
220 | |||
221 | // ValidateExtraHost validates that the specified string is a valid extrahost and returns it. | ||
222 | // ExtraHost are in the form of name:ip where the ip has to be a valid ip (ipv4 or ipv6). | ||
223 | func ValidateExtraHost(val string) (string, error) { | ||
224 | // allow for IPv6 addresses in extra hosts by only splitting on first ":" | ||
225 | arr := strings.SplitN(val, ":", 2) | ||
226 | if len(arr) != 2 || len(arr[0]) == 0 { | ||
227 | return "", fmt.Errorf("bad format for add-host: %q", val) | ||
228 | } | ||
229 | if _, err := ValidateIPAddress(arr[1]); err != nil { | ||
230 | return "", fmt.Errorf("invalid IP address in add-host: %q", arr[1]) | ||
231 | } | ||
232 | return val, nil | ||
233 | } | ||
234 | |||
235 | // ValidateLabel validates that the specified string is a valid label, and returns it. | ||
236 | // Labels are in the form on key=value. | ||
237 | func ValidateLabel(val string) (string, error) { | ||
238 | if strings.Count(val, "=") < 1 { | ||
239 | return "", fmt.Errorf("bad attribute format: %s", val) | ||
240 | } | ||
241 | return val, nil | ||
242 | } | ||
243 | |||
244 | func doesEnvExist(name string) bool { | ||
245 | for _, entry := range os.Environ() { | ||
246 | parts := strings.SplitN(entry, "=", 2) | ||
247 | if parts[0] == name { | ||
248 | return true | ||
249 | } | ||
250 | } | ||
251 | return false | ||
252 | } | ||
diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/opts_unix.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/opts_unix.go new file mode 100644 index 0000000..f1ce844 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/opts_unix.go | |||
@@ -0,0 +1,6 @@ | |||
1 | // +build !windows | ||
2 | |||
3 | package opts | ||
4 | |||
5 | // DefaultHTTPHost Default HTTP Host used if only port is provided to -H flag e.g. docker daemon -H tcp://:8080 | ||
6 | const DefaultHTTPHost = "localhost" | ||
diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/opts_windows.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/opts_windows.go new file mode 100644 index 0000000..2a9e2be --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/opts_windows.go | |||
@@ -0,0 +1,56 @@ | |||
1 | package opts | ||
2 | |||
3 | // TODO Windows. Identify bug in GOLang 1.5.1 and/or Windows Server 2016 TP4. | ||
4 | // @jhowardmsft, @swernli. | ||
5 | // | ||
6 | // On Windows, this mitigates a problem with the default options of running | ||
7 | // a docker client against a local docker daemon on TP4. | ||
8 | // | ||
9 | // What was found that if the default host is "localhost", even if the client | ||
10 | // (and daemon as this is local) is not physically on a network, and the DNS | ||
11 | // cache is flushed (ipconfig /flushdns), then the client will pause for | ||
12 | // exactly one second when connecting to the daemon for calls. For example | ||
13 | // using docker run windowsservercore cmd, the CLI will send a create followed | ||
14 | // by an attach. You see the delay between the attach finishing and the attach | ||
15 | // being seen by the daemon. | ||
16 | // | ||
17 | // Here's some daemon debug logs with additional debug spew put in. The | ||
18 | // AfterWriteJSON log is the very last thing the daemon does as part of the | ||
19 | // create call. The POST /attach is the second CLI call. Notice the second | ||
20 | // time gap. | ||
21 | // | ||
22 | // time="2015-11-06T13:38:37.259627400-08:00" level=debug msg="After createRootfs" | ||
23 | // time="2015-11-06T13:38:37.263626300-08:00" level=debug msg="After setHostConfig" | ||
24 | // time="2015-11-06T13:38:37.267631200-08:00" level=debug msg="before createContainerPl...." | ||
25 | // time="2015-11-06T13:38:37.271629500-08:00" level=debug msg=ToDiskLocking.... | ||
26 | // time="2015-11-06T13:38:37.275643200-08:00" level=debug msg="loggin event...." | ||
27 | // time="2015-11-06T13:38:37.277627600-08:00" level=debug msg="logged event...." | ||
28 | // time="2015-11-06T13:38:37.279631800-08:00" level=debug msg="In defer func" | ||
29 | // time="2015-11-06T13:38:37.282628100-08:00" level=debug msg="After daemon.create" | ||
30 | // time="2015-11-06T13:38:37.286651700-08:00" level=debug msg="return 2" | ||
31 | // time="2015-11-06T13:38:37.289629500-08:00" level=debug msg="Returned from daemon.ContainerCreate" | ||
32 | // time="2015-11-06T13:38:37.311629100-08:00" level=debug msg="After WriteJSON" | ||
33 | // ... 1 second gap here.... | ||
34 | // time="2015-11-06T13:38:38.317866200-08:00" level=debug msg="Calling POST /v1.22/containers/984758282b842f779e805664b2c95d563adc9a979c8a3973e68c807843ee4757/attach" | ||
35 | // time="2015-11-06T13:38:38.326882500-08:00" level=info msg="POST /v1.22/containers/984758282b842f779e805664b2c95d563adc9a979c8a3973e68c807843ee4757/attach?stderr=1&stdin=1&stdout=1&stream=1" | ||
36 | // | ||
37 | // We suspect this is either a bug introduced in GOLang 1.5.1, or that a change | ||
38 | // in GOLang 1.5.1 (from 1.4.3) is exposing a bug in Windows TP4. In theory, | ||
39 | // the Windows networking stack is supposed to resolve "localhost" internally, | ||
40 | // without hitting DNS, or even reading the hosts file (which is why localhost | ||
41 | // is commented out in the hosts file on Windows). | ||
42 | // | ||
43 | // We have validated that working around this using the actual IPv4 localhost | ||
44 | // address does not cause the delay. | ||
45 | // | ||
46 | // This does not occur with the docker client built with 1.4.3 on the same | ||
47 | // Windows TP4 build, regardless of whether the daemon is built using 1.5.1 | ||
48 | // or 1.4.3. It does not occur on Linux. We also verified we see the same thing | ||
49 | // on a cross-compiled Windows binary (from Linux). | ||
50 | // | ||
51 | // Final note: This is a mitigation, not a 'real' fix. It is still susceptible | ||
52 | // to the delay in TP4 if a user were to do 'docker run -H=tcp://localhost:2375...' | ||
53 | // explicitly. | ||
54 | |||
55 | // DefaultHTTPHost Default HTTP Host used if only port is provided to -H flag e.g. docker daemon -H tcp://:8080 | ||
56 | const DefaultHTTPHost = "127.0.0.1" | ||
diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/README.md b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/README.md new file mode 100644 index 0000000..7307d96 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/README.md | |||
@@ -0,0 +1 @@ | |||
This code provides helper functions for dealing with archive files. | |||
diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/archive.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/archive.go new file mode 100644 index 0000000..ce84347 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/archive.go | |||
@@ -0,0 +1,1049 @@ | |||
1 | package archive | ||
2 | |||
3 | import ( | ||
4 | "archive/tar" | ||
5 | "bufio" | ||
6 | "bytes" | ||
7 | "compress/bzip2" | ||
8 | "compress/gzip" | ||
9 | "errors" | ||
10 | "fmt" | ||
11 | "io" | ||
12 | "io/ioutil" | ||
13 | "os" | ||
14 | "os/exec" | ||
15 | "path/filepath" | ||
16 | "runtime" | ||
17 | "strings" | ||
18 | "syscall" | ||
19 | |||
20 | "github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus" | ||
21 | "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/fileutils" | ||
22 | "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/idtools" | ||
23 | "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils" | ||
24 | "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/pools" | ||
25 | "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/promise" | ||
26 | "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system" | ||
27 | ) | ||
28 | |||
29 | type ( | ||
30 | // Archive is a type of io.ReadCloser which has two interfaces Read and Closer. | ||
31 | Archive io.ReadCloser | ||
32 | // Reader is a type of io.Reader. | ||
33 | Reader io.Reader | ||
34 | // Compression is the state represents if compressed or not. | ||
35 | Compression int | ||
36 | // TarChownOptions wraps the chown options UID and GID. | ||
37 | TarChownOptions struct { | ||
38 | UID, GID int | ||
39 | } | ||
40 | // TarOptions wraps the tar options. | ||
41 | TarOptions struct { | ||
42 | IncludeFiles []string | ||
43 | ExcludePatterns []string | ||
44 | Compression Compression | ||
45 | NoLchown bool | ||
46 | UIDMaps []idtools.IDMap | ||
47 | GIDMaps []idtools.IDMap | ||
48 | ChownOpts *TarChownOptions | ||
49 | IncludeSourceDir bool | ||
50 | // When unpacking, specifies whether overwriting a directory with a | ||
51 | // non-directory is allowed and vice versa. | ||
52 | NoOverwriteDirNonDir bool | ||
53 | // For each include when creating an archive, the included name will be | ||
54 | // replaced with the matching name from this map. | ||
55 | RebaseNames map[string]string | ||
56 | } | ||
57 | |||
58 | // Archiver allows the reuse of most utility functions of this package | ||
59 | // with a pluggable Untar function. Also, to facilitate the passing of | ||
60 | // specific id mappings for untar, an archiver can be created with maps | ||
61 | // which will then be passed to Untar operations | ||
62 | Archiver struct { | ||
63 | Untar func(io.Reader, string, *TarOptions) error | ||
64 | UIDMaps []idtools.IDMap | ||
65 | GIDMaps []idtools.IDMap | ||
66 | } | ||
67 | |||
68 | // breakoutError is used to differentiate errors related to breaking out | ||
69 | // When testing archive breakout in the unit tests, this error is expected | ||
70 | // in order for the test to pass. | ||
71 | breakoutError error | ||
72 | ) | ||
73 | |||
74 | var ( | ||
75 | // ErrNotImplemented is the error message of function not implemented. | ||
76 | ErrNotImplemented = errors.New("Function not implemented") | ||
77 | defaultArchiver = &Archiver{Untar: Untar, UIDMaps: nil, GIDMaps: nil} | ||
78 | ) | ||
79 | |||
80 | const ( | ||
81 | // HeaderSize is the size in bytes of a tar header | ||
82 | HeaderSize = 512 | ||
83 | ) | ||
84 | |||
85 | const ( | ||
86 | // Uncompressed represents the uncompressed. | ||
87 | Uncompressed Compression = iota | ||
88 | // Bzip2 is bzip2 compression algorithm. | ||
89 | Bzip2 | ||
90 | // Gzip is gzip compression algorithm. | ||
91 | Gzip | ||
92 | // Xz is xz compression algorithm. | ||
93 | Xz | ||
94 | ) | ||
95 | |||
96 | // IsArchive checks for the magic bytes of a tar or any supported compression | ||
97 | // algorithm. | ||
98 | func IsArchive(header []byte) bool { | ||
99 | compression := DetectCompression(header) | ||
100 | if compression != Uncompressed { | ||
101 | return true | ||
102 | } | ||
103 | r := tar.NewReader(bytes.NewBuffer(header)) | ||
104 | _, err := r.Next() | ||
105 | return err == nil | ||
106 | } | ||
107 | |||
108 | // IsArchivePath checks if the (possibly compressed) file at the given path | ||
109 | // starts with a tar file header. | ||
110 | func IsArchivePath(path string) bool { | ||
111 | file, err := os.Open(path) | ||
112 | if err != nil { | ||
113 | return false | ||
114 | } | ||
115 | defer file.Close() | ||
116 | rdr, err := DecompressStream(file) | ||
117 | if err != nil { | ||
118 | return false | ||
119 | } | ||
120 | r := tar.NewReader(rdr) | ||
121 | _, err = r.Next() | ||
122 | return err == nil | ||
123 | } | ||
124 | |||
125 | // DetectCompression detects the compression algorithm of the source. | ||
126 | func DetectCompression(source []byte) Compression { | ||
127 | for compression, m := range map[Compression][]byte{ | ||
128 | Bzip2: {0x42, 0x5A, 0x68}, | ||
129 | Gzip: {0x1F, 0x8B, 0x08}, | ||
130 | Xz: {0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00}, | ||
131 | } { | ||
132 | if len(source) < len(m) { | ||
133 | logrus.Debugf("Len too short") | ||
134 | continue | ||
135 | } | ||
136 | if bytes.Compare(m, source[:len(m)]) == 0 { | ||
137 | return compression | ||
138 | } | ||
139 | } | ||
140 | return Uncompressed | ||
141 | } | ||
142 | |||
143 | func xzDecompress(archive io.Reader) (io.ReadCloser, <-chan struct{}, error) { | ||
144 | args := []string{"xz", "-d", "-c", "-q"} | ||
145 | |||
146 | return cmdStream(exec.Command(args[0], args[1:]...), archive) | ||
147 | } | ||
148 | |||
149 | // DecompressStream decompress the archive and returns a ReaderCloser with the decompressed archive. | ||
150 | func DecompressStream(archive io.Reader) (io.ReadCloser, error) { | ||
151 | p := pools.BufioReader32KPool | ||
152 | buf := p.Get(archive) | ||
153 | bs, err := buf.Peek(10) | ||
154 | if err != nil && err != io.EOF { | ||
155 | // Note: we'll ignore any io.EOF error because there are some odd | ||
156 | // cases where the layer.tar file will be empty (zero bytes) and | ||
157 | // that results in an io.EOF from the Peek() call. So, in those | ||
158 | // cases we'll just treat it as a non-compressed stream and | ||
159 | // that means just create an empty layer. | ||
160 | // See Issue 18170 | ||
161 | return nil, err | ||
162 | } | ||
163 | |||
164 | compression := DetectCompression(bs) | ||
165 | switch compression { | ||
166 | case Uncompressed: | ||
167 | readBufWrapper := p.NewReadCloserWrapper(buf, buf) | ||
168 | return readBufWrapper, nil | ||
169 | case Gzip: | ||
170 | gzReader, err := gzip.NewReader(buf) | ||
171 | if err != nil { | ||
172 | return nil, err | ||
173 | } | ||
174 | readBufWrapper := p.NewReadCloserWrapper(buf, gzReader) | ||
175 | return readBufWrapper, nil | ||
176 | case Bzip2: | ||
177 | bz2Reader := bzip2.NewReader(buf) | ||
178 | readBufWrapper := p.NewReadCloserWrapper(buf, bz2Reader) | ||
179 | return readBufWrapper, nil | ||
180 | case Xz: | ||
181 | xzReader, chdone, err := xzDecompress(buf) | ||
182 | if err != nil { | ||
183 | return nil, err | ||
184 | } | ||
185 | readBufWrapper := p.NewReadCloserWrapper(buf, xzReader) | ||
186 | return ioutils.NewReadCloserWrapper(readBufWrapper, func() error { | ||
187 | <-chdone | ||
188 | return readBufWrapper.Close() | ||
189 | }), nil | ||
190 | default: | ||
191 | return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) | ||
192 | } | ||
193 | } | ||
194 | |||
195 | // CompressStream compresses the dest with specified compression algorithm. | ||
196 | func CompressStream(dest io.WriteCloser, compression Compression) (io.WriteCloser, error) { | ||
197 | p := pools.BufioWriter32KPool | ||
198 | buf := p.Get(dest) | ||
199 | switch compression { | ||
200 | case Uncompressed: | ||
201 | writeBufWrapper := p.NewWriteCloserWrapper(buf, buf) | ||
202 | return writeBufWrapper, nil | ||
203 | case Gzip: | ||
204 | gzWriter := gzip.NewWriter(dest) | ||
205 | writeBufWrapper := p.NewWriteCloserWrapper(buf, gzWriter) | ||
206 | return writeBufWrapper, nil | ||
207 | case Bzip2, Xz: | ||
208 | // archive/bzip2 does not support writing, and there is no xz support at all | ||
209 | // However, this is not a problem as docker only currently generates gzipped tars | ||
210 | return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) | ||
211 | default: | ||
212 | return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) | ||
213 | } | ||
214 | } | ||
215 | |||
216 | // Extension returns the extension of a file that uses the specified compression algorithm. | ||
217 | func (compression *Compression) Extension() string { | ||
218 | switch *compression { | ||
219 | case Uncompressed: | ||
220 | return "tar" | ||
221 | case Bzip2: | ||
222 | return "tar.bz2" | ||
223 | case Gzip: | ||
224 | return "tar.gz" | ||
225 | case Xz: | ||
226 | return "tar.xz" | ||
227 | } | ||
228 | return "" | ||
229 | } | ||
230 | |||
231 | type tarAppender struct { | ||
232 | TarWriter *tar.Writer | ||
233 | Buffer *bufio.Writer | ||
234 | |||
235 | // for hardlink mapping | ||
236 | SeenFiles map[uint64]string | ||
237 | UIDMaps []idtools.IDMap | ||
238 | GIDMaps []idtools.IDMap | ||
239 | } | ||
240 | |||
241 | // canonicalTarName provides a platform-independent and consistent posix-style | ||
242 | //path for files and directories to be archived regardless of the platform. | ||
243 | func canonicalTarName(name string, isDir bool) (string, error) { | ||
244 | name, err := CanonicalTarNameForPath(name) | ||
245 | if err != nil { | ||
246 | return "", err | ||
247 | } | ||
248 | |||
249 | // suffix with '/' for directories | ||
250 | if isDir && !strings.HasSuffix(name, "/") { | ||
251 | name += "/" | ||
252 | } | ||
253 | return name, nil | ||
254 | } | ||
255 | |||
256 | func (ta *tarAppender) addTarFile(path, name string) error { | ||
257 | fi, err := os.Lstat(path) | ||
258 | if err != nil { | ||
259 | return err | ||
260 | } | ||
261 | |||
262 | link := "" | ||
263 | if fi.Mode()&os.ModeSymlink != 0 { | ||
264 | if link, err = os.Readlink(path); err != nil { | ||
265 | return err | ||
266 | } | ||
267 | } | ||
268 | |||
269 | hdr, err := tar.FileInfoHeader(fi, link) | ||
270 | if err != nil { | ||
271 | return err | ||
272 | } | ||
273 | hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode))) | ||
274 | |||
275 | name, err = canonicalTarName(name, fi.IsDir()) | ||
276 | if err != nil { | ||
277 | return fmt.Errorf("tar: cannot canonicalize path: %v", err) | ||
278 | } | ||
279 | hdr.Name = name | ||
280 | |||
281 | inode, err := setHeaderForSpecialDevice(hdr, ta, name, fi.Sys()) | ||
282 | if err != nil { | ||
283 | return err | ||
284 | } | ||
285 | |||
286 | // if it's not a directory and has more than 1 link, | ||
287 | // it's hardlinked, so set the type flag accordingly | ||
288 | if !fi.IsDir() && hasHardlinks(fi) { | ||
289 | // a link should have a name that it links too | ||
290 | // and that linked name should be first in the tar archive | ||
291 | if oldpath, ok := ta.SeenFiles[inode]; ok { | ||
292 | hdr.Typeflag = tar.TypeLink | ||
293 | hdr.Linkname = oldpath | ||
294 | hdr.Size = 0 // This Must be here for the writer math to add up! | ||
295 | } else { | ||
296 | ta.SeenFiles[inode] = name | ||
297 | } | ||
298 | } | ||
299 | |||
300 | capability, _ := system.Lgetxattr(path, "security.capability") | ||
301 | if capability != nil { | ||
302 | hdr.Xattrs = make(map[string]string) | ||
303 | hdr.Xattrs["security.capability"] = string(capability) | ||
304 | } | ||
305 | |||
306 | //handle re-mapping container ID mappings back to host ID mappings before | ||
307 | //writing tar headers/files. We skip whiteout files because they were written | ||
308 | //by the kernel and already have proper ownership relative to the host | ||
309 | if !strings.HasPrefix(filepath.Base(hdr.Name), WhiteoutPrefix) && (ta.UIDMaps != nil || ta.GIDMaps != nil) { | ||
310 | uid, gid, err := getFileUIDGID(fi.Sys()) | ||
311 | if err != nil { | ||
312 | return err | ||
313 | } | ||
314 | xUID, err := idtools.ToContainer(uid, ta.UIDMaps) | ||
315 | if err != nil { | ||
316 | return err | ||
317 | } | ||
318 | xGID, err := idtools.ToContainer(gid, ta.GIDMaps) | ||
319 | if err != nil { | ||
320 | return err | ||
321 | } | ||
322 | hdr.Uid = xUID | ||
323 | hdr.Gid = xGID | ||
324 | } | ||
325 | |||
326 | if err := ta.TarWriter.WriteHeader(hdr); err != nil { | ||
327 | return err | ||
328 | } | ||
329 | |||
330 | if hdr.Typeflag == tar.TypeReg { | ||
331 | file, err := os.Open(path) | ||
332 | if err != nil { | ||
333 | return err | ||
334 | } | ||
335 | |||
336 | ta.Buffer.Reset(ta.TarWriter) | ||
337 | defer ta.Buffer.Reset(nil) | ||
338 | _, err = io.Copy(ta.Buffer, file) | ||
339 | file.Close() | ||
340 | if err != nil { | ||
341 | return err | ||
342 | } | ||
343 | err = ta.Buffer.Flush() | ||
344 | if err != nil { | ||
345 | return err | ||
346 | } | ||
347 | } | ||
348 | |||
349 | return nil | ||
350 | } | ||
351 | |||
352 | func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, Lchown bool, chownOpts *TarChownOptions) error { | ||
353 | // hdr.Mode is in linux format, which we can use for sycalls, | ||
354 | // but for os.Foo() calls we need the mode converted to os.FileMode, | ||
355 | // so use hdrInfo.Mode() (they differ for e.g. setuid bits) | ||
356 | hdrInfo := hdr.FileInfo() | ||
357 | |||
358 | switch hdr.Typeflag { | ||
359 | case tar.TypeDir: | ||
360 | // Create directory unless it exists as a directory already. | ||
361 | // In that case we just want to merge the two | ||
362 | if fi, err := os.Lstat(path); !(err == nil && fi.IsDir()) { | ||
363 | if err := os.Mkdir(path, hdrInfo.Mode()); err != nil { | ||
364 | return err | ||
365 | } | ||
366 | } | ||
367 | |||
368 | case tar.TypeReg, tar.TypeRegA: | ||
369 | // Source is regular file | ||
370 | file, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY, hdrInfo.Mode()) | ||
371 | if err != nil { | ||
372 | return err | ||
373 | } | ||
374 | if _, err := io.Copy(file, reader); err != nil { | ||
375 | file.Close() | ||
376 | return err | ||
377 | } | ||
378 | file.Close() | ||
379 | |||
380 | case tar.TypeBlock, tar.TypeChar, tar.TypeFifo: | ||
381 | // Handle this is an OS-specific way | ||
382 | if err := handleTarTypeBlockCharFifo(hdr, path); err != nil { | ||
383 | return err | ||
384 | } | ||
385 | |||
386 | case tar.TypeLink: | ||
387 | targetPath := filepath.Join(extractDir, hdr.Linkname) | ||
388 | // check for hardlink breakout | ||
389 | if !strings.HasPrefix(targetPath, extractDir) { | ||
390 | return breakoutError(fmt.Errorf("invalid hardlink %q -> %q", targetPath, hdr.Linkname)) | ||
391 | } | ||
392 | if err := os.Link(targetPath, path); err != nil { | ||
393 | return err | ||
394 | } | ||
395 | |||
396 | case tar.TypeSymlink: | ||
397 | // path -> hdr.Linkname = targetPath | ||
398 | // e.g. /extractDir/path/to/symlink -> ../2/file = /extractDir/path/2/file | ||
399 | targetPath := filepath.Join(filepath.Dir(path), hdr.Linkname) | ||
400 | |||
401 | // the reason we don't need to check symlinks in the path (with FollowSymlinkInScope) is because | ||
402 | // that symlink would first have to be created, which would be caught earlier, at this very check: | ||
403 | if !strings.HasPrefix(targetPath, extractDir) { | ||
404 | return breakoutError(fmt.Errorf("invalid symlink %q -> %q", path, hdr.Linkname)) | ||
405 | } | ||
406 | if err := os.Symlink(hdr.Linkname, path); err != nil { | ||
407 | return err | ||
408 | } | ||
409 | |||
410 | case tar.TypeXGlobalHeader: | ||
411 | logrus.Debugf("PAX Global Extended Headers found and ignored") | ||
412 | return nil | ||
413 | |||
414 | default: | ||
415 | return fmt.Errorf("Unhandled tar header type %d\n", hdr.Typeflag) | ||
416 | } | ||
417 | |||
418 | // Lchown is not supported on Windows. | ||
419 | if Lchown && runtime.GOOS != "windows" { | ||
420 | if chownOpts == nil { | ||
421 | chownOpts = &TarChownOptions{UID: hdr.Uid, GID: hdr.Gid} | ||
422 | } | ||
423 | if err := os.Lchown(path, chownOpts.UID, chownOpts.GID); err != nil { | ||
424 | return err | ||
425 | } | ||
426 | } | ||
427 | |||
428 | for key, value := range hdr.Xattrs { | ||
429 | if err := system.Lsetxattr(path, key, []byte(value), 0); err != nil { | ||
430 | return err | ||
431 | } | ||
432 | } | ||
433 | |||
434 | // There is no LChmod, so ignore mode for symlink. Also, this | ||
435 | // must happen after chown, as that can modify the file mode | ||
436 | if err := handleLChmod(hdr, path, hdrInfo); err != nil { | ||
437 | return err | ||
438 | } | ||
439 | |||
440 | aTime := hdr.AccessTime | ||
441 | if aTime.Before(hdr.ModTime) { | ||
442 | // Last access time should never be before last modified time. | ||
443 | aTime = hdr.ModTime | ||
444 | } | ||
445 | |||
446 | // system.Chtimes doesn't support a NOFOLLOW flag atm | ||
447 | if hdr.Typeflag == tar.TypeLink { | ||
448 | if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) { | ||
449 | if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil { | ||
450 | return err | ||
451 | } | ||
452 | } | ||
453 | } else if hdr.Typeflag != tar.TypeSymlink { | ||
454 | if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil { | ||
455 | return err | ||
456 | } | ||
457 | } else { | ||
458 | ts := []syscall.Timespec{timeToTimespec(aTime), timeToTimespec(hdr.ModTime)} | ||
459 | if err := system.LUtimesNano(path, ts); err != nil && err != system.ErrNotSupportedPlatform { | ||
460 | return err | ||
461 | } | ||
462 | } | ||
463 | return nil | ||
464 | } | ||
465 | |||
466 | // Tar creates an archive from the directory at `path`, and returns it as a | ||
467 | // stream of bytes. | ||
468 | func Tar(path string, compression Compression) (io.ReadCloser, error) { | ||
469 | return TarWithOptions(path, &TarOptions{Compression: compression}) | ||
470 | } | ||
471 | |||
472 | // TarWithOptions creates an archive from the directory at `path`, only including files whose relative | ||
473 | // paths are included in `options.IncludeFiles` (if non-nil) or not in `options.ExcludePatterns`. | ||
474 | func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) { | ||
475 | |||
476 | // Fix the source path to work with long path names. This is a no-op | ||
477 | // on platforms other than Windows. | ||
478 | srcPath = fixVolumePathPrefix(srcPath) | ||
479 | |||
480 | patterns, patDirs, exceptions, err := fileutils.CleanPatterns(options.ExcludePatterns) | ||
481 | |||
482 | if err != nil { | ||
483 | return nil, err | ||
484 | } | ||
485 | |||
486 | pipeReader, pipeWriter := io.Pipe() | ||
487 | |||
488 | compressWriter, err := CompressStream(pipeWriter, options.Compression) | ||
489 | if err != nil { | ||
490 | return nil, err | ||
491 | } | ||
492 | |||
493 | go func() { | ||
494 | ta := &tarAppender{ | ||
495 | TarWriter: tar.NewWriter(compressWriter), | ||
496 | Buffer: pools.BufioWriter32KPool.Get(nil), | ||
497 | SeenFiles: make(map[uint64]string), | ||
498 | UIDMaps: options.UIDMaps, | ||
499 | GIDMaps: options.GIDMaps, | ||
500 | } | ||
501 | |||
502 | defer func() { | ||
503 | // Make sure to check the error on Close. | ||
504 | if err := ta.TarWriter.Close(); err != nil { | ||
505 | logrus.Debugf("Can't close tar writer: %s", err) | ||
506 | } | ||
507 | if err := compressWriter.Close(); err != nil { | ||
508 | logrus.Debugf("Can't close compress writer: %s", err) | ||
509 | } | ||
510 | if err := pipeWriter.Close(); err != nil { | ||
511 | logrus.Debugf("Can't close pipe writer: %s", err) | ||
512 | } | ||
513 | }() | ||
514 | |||
515 | // this buffer is needed for the duration of this piped stream | ||
516 | defer pools.BufioWriter32KPool.Put(ta.Buffer) | ||
517 | |||
518 | // In general we log errors here but ignore them because | ||
519 | // during e.g. a diff operation the container can continue | ||
520 | // mutating the filesystem and we can see transient errors | ||
521 | // from this | ||
522 | |||
523 | stat, err := os.Lstat(srcPath) | ||
524 | if err != nil { | ||
525 | return | ||
526 | } | ||
527 | |||
528 | if !stat.IsDir() { | ||
529 | // We can't later join a non-dir with any includes because the | ||
530 | // 'walk' will error if "file/." is stat-ed and "file" is not a | ||
531 | // directory. So, we must split the source path and use the | ||
532 | // basename as the include. | ||
533 | if len(options.IncludeFiles) > 0 { | ||
534 | logrus.Warn("Tar: Can't archive a file with includes") | ||
535 | } | ||
536 | |||
537 | dir, base := SplitPathDirEntry(srcPath) | ||
538 | srcPath = dir | ||
539 | options.IncludeFiles = []string{base} | ||
540 | } | ||
541 | |||
542 | if len(options.IncludeFiles) == 0 { | ||
543 | options.IncludeFiles = []string{"."} | ||
544 | } | ||
545 | |||
546 | seen := make(map[string]bool) | ||
547 | |||
548 | for _, include := range options.IncludeFiles { | ||
549 | rebaseName := options.RebaseNames[include] | ||
550 | |||
551 | walkRoot := getWalkRoot(srcPath, include) | ||
552 | filepath.Walk(walkRoot, func(filePath string, f os.FileInfo, err error) error { | ||
553 | if err != nil { | ||
554 | logrus.Debugf("Tar: Can't stat file %s to tar: %s", srcPath, err) | ||
555 | return nil | ||
556 | } | ||
557 | |||
558 | relFilePath, err := filepath.Rel(srcPath, filePath) | ||
559 | if err != nil || (!options.IncludeSourceDir && relFilePath == "." && f.IsDir()) { | ||
560 | // Error getting relative path OR we are looking | ||
561 | // at the source directory path. Skip in both situations. | ||
562 | return nil | ||
563 | } | ||
564 | |||
565 | if options.IncludeSourceDir && include == "." && relFilePath != "." { | ||
566 | relFilePath = strings.Join([]string{".", relFilePath}, string(filepath.Separator)) | ||
567 | } | ||
568 | |||
569 | skip := false | ||
570 | |||
571 | // If "include" is an exact match for the current file | ||
572 | // then even if there's an "excludePatterns" pattern that | ||
573 | // matches it, don't skip it. IOW, assume an explicit 'include' | ||
574 | // is asking for that file no matter what - which is true | ||
575 | // for some files, like .dockerignore and Dockerfile (sometimes) | ||
576 | if include != relFilePath { | ||
577 | skip, err = fileutils.OptimizedMatches(relFilePath, patterns, patDirs) | ||
578 | if err != nil { | ||
579 | logrus.Debugf("Error matching %s: %v", relFilePath, err) | ||
580 | return err | ||
581 | } | ||
582 | } | ||
583 | |||
584 | if skip { | ||
585 | if !exceptions && f.IsDir() { | ||
586 | return filepath.SkipDir | ||
587 | } | ||
588 | return nil | ||
589 | } | ||
590 | |||
591 | if seen[relFilePath] { | ||
592 | return nil | ||
593 | } | ||
594 | seen[relFilePath] = true | ||
595 | |||
596 | // Rename the base resource. | ||
597 | if rebaseName != "" { | ||
598 | var replacement string | ||
599 | if rebaseName != string(filepath.Separator) { | ||
600 | // Special case the root directory to replace with an | ||
601 | // empty string instead so that we don't end up with | ||
602 | // double slashes in the paths. | ||
603 | replacement = rebaseName | ||
604 | } | ||
605 | |||
606 | relFilePath = strings.Replace(relFilePath, include, replacement, 1) | ||
607 | } | ||
608 | |||
609 | if err := ta.addTarFile(filePath, relFilePath); err != nil { | ||
610 | logrus.Debugf("Can't add file %s to tar: %s", filePath, err) | ||
611 | } | ||
612 | return nil | ||
613 | }) | ||
614 | } | ||
615 | }() | ||
616 | |||
617 | return pipeReader, nil | ||
618 | } | ||
619 | |||
620 | // Unpack unpacks the decompressedArchive to dest with options. | ||
621 | func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) error { | ||
622 | tr := tar.NewReader(decompressedArchive) | ||
623 | trBuf := pools.BufioReader32KPool.Get(nil) | ||
624 | defer pools.BufioReader32KPool.Put(trBuf) | ||
625 | |||
626 | var dirs []*tar.Header | ||
627 | remappedRootUID, remappedRootGID, err := idtools.GetRootUIDGID(options.UIDMaps, options.GIDMaps) | ||
628 | if err != nil { | ||
629 | return err | ||
630 | } | ||
631 | |||
632 | // Iterate through the files in the archive. | ||
633 | loop: | ||
634 | for { | ||
635 | hdr, err := tr.Next() | ||
636 | if err == io.EOF { | ||
637 | // end of tar archive | ||
638 | break | ||
639 | } | ||
640 | if err != nil { | ||
641 | return err | ||
642 | } | ||
643 | |||
644 | // Normalize name, for safety and for a simple is-root check | ||
645 | // This keeps "../" as-is, but normalizes "/../" to "/". Or Windows: | ||
646 | // This keeps "..\" as-is, but normalizes "\..\" to "\". | ||
647 | hdr.Name = filepath.Clean(hdr.Name) | ||
648 | |||
649 | for _, exclude := range options.ExcludePatterns { | ||
650 | if strings.HasPrefix(hdr.Name, exclude) { | ||
651 | continue loop | ||
652 | } | ||
653 | } | ||
654 | |||
655 | // After calling filepath.Clean(hdr.Name) above, hdr.Name will now be in | ||
656 | // the filepath format for the OS on which the daemon is running. Hence | ||
657 | // the check for a slash-suffix MUST be done in an OS-agnostic way. | ||
658 | if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) { | ||
659 | // Not the root directory, ensure that the parent directory exists | ||
660 | parent := filepath.Dir(hdr.Name) | ||
661 | parentPath := filepath.Join(dest, parent) | ||
662 | if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { | ||
663 | err = system.MkdirAll(parentPath, 0777) | ||
664 | if err != nil { | ||
665 | return err | ||
666 | } | ||
667 | } | ||
668 | } | ||
669 | |||
670 | path := filepath.Join(dest, hdr.Name) | ||
671 | rel, err := filepath.Rel(dest, path) | ||
672 | if err != nil { | ||
673 | return err | ||
674 | } | ||
675 | if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) { | ||
676 | return breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest)) | ||
677 | } | ||
678 | |||
679 | // If path exits we almost always just want to remove and replace it | ||
680 | // The only exception is when it is a directory *and* the file from | ||
681 | // the layer is also a directory. Then we want to merge them (i.e. | ||
682 | // just apply the metadata from the layer). | ||
683 | if fi, err := os.Lstat(path); err == nil { | ||
684 | if options.NoOverwriteDirNonDir && fi.IsDir() && hdr.Typeflag != tar.TypeDir { | ||
685 | // If NoOverwriteDirNonDir is true then we cannot replace | ||
686 | // an existing directory with a non-directory from the archive. | ||
687 | return fmt.Errorf("cannot overwrite directory %q with non-directory %q", path, dest) | ||
688 | } | ||
689 | |||
690 | if options.NoOverwriteDirNonDir && !fi.IsDir() && hdr.Typeflag == tar.TypeDir { | ||
691 | // If NoOverwriteDirNonDir is true then we cannot replace | ||
692 | // an existing non-directory with a directory from the archive. | ||
693 | return fmt.Errorf("cannot overwrite non-directory %q with directory %q", path, dest) | ||
694 | } | ||
695 | |||
696 | if fi.IsDir() && hdr.Name == "." { | ||
697 | continue | ||
698 | } | ||
699 | |||
700 | if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) { | ||
701 | if err := os.RemoveAll(path); err != nil { | ||
702 | return err | ||
703 | } | ||
704 | } | ||
705 | } | ||
706 | trBuf.Reset(tr) | ||
707 | |||
708 | // if the options contain a uid & gid maps, convert header uid/gid | ||
709 | // entries using the maps such that lchown sets the proper mapped | ||
710 | // uid/gid after writing the file. We only perform this mapping if | ||
711 | // the file isn't already owned by the remapped root UID or GID, as | ||
712 | // that specific uid/gid has no mapping from container -> host, and | ||
713 | // those files already have the proper ownership for inside the | ||
714 | // container. | ||
715 | if hdr.Uid != remappedRootUID { | ||
716 | xUID, err := idtools.ToHost(hdr.Uid, options.UIDMaps) | ||
717 | if err != nil { | ||
718 | return err | ||
719 | } | ||
720 | hdr.Uid = xUID | ||
721 | } | ||
722 | if hdr.Gid != remappedRootGID { | ||
723 | xGID, err := idtools.ToHost(hdr.Gid, options.GIDMaps) | ||
724 | if err != nil { | ||
725 | return err | ||
726 | } | ||
727 | hdr.Gid = xGID | ||
728 | } | ||
729 | |||
730 | if err := createTarFile(path, dest, hdr, trBuf, !options.NoLchown, options.ChownOpts); err != nil { | ||
731 | return err | ||
732 | } | ||
733 | |||
734 | // Directory mtimes must be handled at the end to avoid further | ||
735 | // file creation in them to modify the directory mtime | ||
736 | if hdr.Typeflag == tar.TypeDir { | ||
737 | dirs = append(dirs, hdr) | ||
738 | } | ||
739 | } | ||
740 | |||
741 | for _, hdr := range dirs { | ||
742 | path := filepath.Join(dest, hdr.Name) | ||
743 | |||
744 | if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil { | ||
745 | return err | ||
746 | } | ||
747 | } | ||
748 | return nil | ||
749 | } | ||
750 | |||
751 | // Untar reads a stream of bytes from `archive`, parses it as a tar archive, | ||
752 | // and unpacks it into the directory at `dest`. | ||
753 | // The archive may be compressed with one of the following algorithms: | ||
754 | // identity (uncompressed), gzip, bzip2, xz. | ||
755 | // FIXME: specify behavior when target path exists vs. doesn't exist. | ||
756 | func Untar(tarArchive io.Reader, dest string, options *TarOptions) error { | ||
757 | return untarHandler(tarArchive, dest, options, true) | ||
758 | } | ||
759 | |||
760 | // UntarUncompressed reads a stream of bytes from `archive`, parses it as a tar archive, | ||
761 | // and unpacks it into the directory at `dest`. | ||
762 | // The archive must be an uncompressed stream. | ||
763 | func UntarUncompressed(tarArchive io.Reader, dest string, options *TarOptions) error { | ||
764 | return untarHandler(tarArchive, dest, options, false) | ||
765 | } | ||
766 | |||
767 | // Handler for teasing out the automatic decompression | ||
768 | func untarHandler(tarArchive io.Reader, dest string, options *TarOptions, decompress bool) error { | ||
769 | if tarArchive == nil { | ||
770 | return fmt.Errorf("Empty archive") | ||
771 | } | ||
772 | dest = filepath.Clean(dest) | ||
773 | if options == nil { | ||
774 | options = &TarOptions{} | ||
775 | } | ||
776 | if options.ExcludePatterns == nil { | ||
777 | options.ExcludePatterns = []string{} | ||
778 | } | ||
779 | |||
780 | r := tarArchive | ||
781 | if decompress { | ||
782 | decompressedArchive, err := DecompressStream(tarArchive) | ||
783 | if err != nil { | ||
784 | return err | ||
785 | } | ||
786 | defer decompressedArchive.Close() | ||
787 | r = decompressedArchive | ||
788 | } | ||
789 | |||
790 | return Unpack(r, dest, options) | ||
791 | } | ||
792 | |||
793 | // TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other. | ||
794 | // If either Tar or Untar fails, TarUntar aborts and returns the error. | ||
795 | func (archiver *Archiver) TarUntar(src, dst string) error { | ||
796 | logrus.Debugf("TarUntar(%s %s)", src, dst) | ||
797 | archive, err := TarWithOptions(src, &TarOptions{Compression: Uncompressed}) | ||
798 | if err != nil { | ||
799 | return err | ||
800 | } | ||
801 | defer archive.Close() | ||
802 | |||
803 | var options *TarOptions | ||
804 | if archiver.UIDMaps != nil || archiver.GIDMaps != nil { | ||
805 | options = &TarOptions{ | ||
806 | UIDMaps: archiver.UIDMaps, | ||
807 | GIDMaps: archiver.GIDMaps, | ||
808 | } | ||
809 | } | ||
810 | return archiver.Untar(archive, dst, options) | ||
811 | } | ||
812 | |||
813 | // TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other. | ||
814 | // If either Tar or Untar fails, TarUntar aborts and returns the error. | ||
815 | func TarUntar(src, dst string) error { | ||
816 | return defaultArchiver.TarUntar(src, dst) | ||
817 | } | ||
818 | |||
819 | // UntarPath untar a file from path to a destination, src is the source tar file path. | ||
820 | func (archiver *Archiver) UntarPath(src, dst string) error { | ||
821 | archive, err := os.Open(src) | ||
822 | if err != nil { | ||
823 | return err | ||
824 | } | ||
825 | defer archive.Close() | ||
826 | var options *TarOptions | ||
827 | if archiver.UIDMaps != nil || archiver.GIDMaps != nil { | ||
828 | options = &TarOptions{ | ||
829 | UIDMaps: archiver.UIDMaps, | ||
830 | GIDMaps: archiver.GIDMaps, | ||
831 | } | ||
832 | } | ||
833 | return archiver.Untar(archive, dst, options) | ||
834 | } | ||
835 | |||
836 | // UntarPath is a convenience function which looks for an archive | ||
837 | // at filesystem path `src`, and unpacks it at `dst`. | ||
838 | func UntarPath(src, dst string) error { | ||
839 | return defaultArchiver.UntarPath(src, dst) | ||
840 | } | ||
841 | |||
842 | // CopyWithTar creates a tar archive of filesystem path `src`, and | ||
843 | // unpacks it at filesystem path `dst`. | ||
844 | // The archive is streamed directly with fixed buffering and no | ||
845 | // intermediary disk IO. | ||
846 | func (archiver *Archiver) CopyWithTar(src, dst string) error { | ||
847 | srcSt, err := os.Stat(src) | ||
848 | if err != nil { | ||
849 | return err | ||
850 | } | ||
851 | if !srcSt.IsDir() { | ||
852 | return archiver.CopyFileWithTar(src, dst) | ||
853 | } | ||
854 | // Create dst, copy src's content into it | ||
855 | logrus.Debugf("Creating dest directory: %s", dst) | ||
856 | if err := system.MkdirAll(dst, 0755); err != nil { | ||
857 | return err | ||
858 | } | ||
859 | logrus.Debugf("Calling TarUntar(%s, %s)", src, dst) | ||
860 | return archiver.TarUntar(src, dst) | ||
861 | } | ||
862 | |||
863 | // CopyWithTar creates a tar archive of filesystem path `src`, and | ||
864 | // unpacks it at filesystem path `dst`. | ||
865 | // The archive is streamed directly with fixed buffering and no | ||
866 | // intermediary disk IO. | ||
867 | func CopyWithTar(src, dst string) error { | ||
868 | return defaultArchiver.CopyWithTar(src, dst) | ||
869 | } | ||
870 | |||
871 | // CopyFileWithTar emulates the behavior of the 'cp' command-line | ||
872 | // for a single file. It copies a regular file from path `src` to | ||
873 | // path `dst`, and preserves all its metadata. | ||
874 | func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) { | ||
875 | logrus.Debugf("CopyFileWithTar(%s, %s)", src, dst) | ||
876 | srcSt, err := os.Stat(src) | ||
877 | if err != nil { | ||
878 | return err | ||
879 | } | ||
880 | |||
881 | if srcSt.IsDir() { | ||
882 | return fmt.Errorf("Can't copy a directory") | ||
883 | } | ||
884 | |||
885 | // Clean up the trailing slash. This must be done in an operating | ||
886 | // system specific manner. | ||
887 | if dst[len(dst)-1] == os.PathSeparator { | ||
888 | dst = filepath.Join(dst, filepath.Base(src)) | ||
889 | } | ||
890 | // Create the holding directory if necessary | ||
891 | if err := system.MkdirAll(filepath.Dir(dst), 0700); err != nil { | ||
892 | return err | ||
893 | } | ||
894 | |||
895 | r, w := io.Pipe() | ||
896 | errC := promise.Go(func() error { | ||
897 | defer w.Close() | ||
898 | |||
899 | srcF, err := os.Open(src) | ||
900 | if err != nil { | ||
901 | return err | ||
902 | } | ||
903 | defer srcF.Close() | ||
904 | |||
905 | hdr, err := tar.FileInfoHeader(srcSt, "") | ||
906 | if err != nil { | ||
907 | return err | ||
908 | } | ||
909 | hdr.Name = filepath.Base(dst) | ||
910 | hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode))) | ||
911 | |||
912 | remappedRootUID, remappedRootGID, err := idtools.GetRootUIDGID(archiver.UIDMaps, archiver.GIDMaps) | ||
913 | if err != nil { | ||
914 | return err | ||
915 | } | ||
916 | |||
917 | // only perform mapping if the file being copied isn't already owned by the | ||
918 | // uid or gid of the remapped root in the container | ||
919 | if remappedRootUID != hdr.Uid { | ||
920 | xUID, err := idtools.ToHost(hdr.Uid, archiver.UIDMaps) | ||
921 | if err != nil { | ||
922 | return err | ||
923 | } | ||
924 | hdr.Uid = xUID | ||
925 | } | ||
926 | if remappedRootGID != hdr.Gid { | ||
927 | xGID, err := idtools.ToHost(hdr.Gid, archiver.GIDMaps) | ||
928 | if err != nil { | ||
929 | return err | ||
930 | } | ||
931 | hdr.Gid = xGID | ||
932 | } | ||
933 | |||
934 | tw := tar.NewWriter(w) | ||
935 | defer tw.Close() | ||
936 | if err := tw.WriteHeader(hdr); err != nil { | ||
937 | return err | ||
938 | } | ||
939 | if _, err := io.Copy(tw, srcF); err != nil { | ||
940 | return err | ||
941 | } | ||
942 | return nil | ||
943 | }) | ||
944 | defer func() { | ||
945 | if er := <-errC; err != nil { | ||
946 | err = er | ||
947 | } | ||
948 | }() | ||
949 | |||
950 | err = archiver.Untar(r, filepath.Dir(dst), nil) | ||
951 | if err != nil { | ||
952 | r.CloseWithError(err) | ||
953 | } | ||
954 | return err | ||
955 | } | ||
956 | |||
957 | // CopyFileWithTar emulates the behavior of the 'cp' command-line | ||
958 | // for a single file. It copies a regular file from path `src` to | ||
959 | // path `dst`, and preserves all its metadata. | ||
960 | // | ||
961 | // Destination handling is in an operating specific manner depending | ||
962 | // where the daemon is running. If `dst` ends with a trailing slash | ||
963 | // the final destination path will be `dst/base(src)` (Linux) or | ||
964 | // `dst\base(src)` (Windows). | ||
965 | func CopyFileWithTar(src, dst string) (err error) { | ||
966 | return defaultArchiver.CopyFileWithTar(src, dst) | ||
967 | } | ||
968 | |||
969 | // cmdStream executes a command, and returns its stdout as a stream. | ||
970 | // If the command fails to run or doesn't complete successfully, an error | ||
971 | // will be returned, including anything written on stderr. | ||
972 | func cmdStream(cmd *exec.Cmd, input io.Reader) (io.ReadCloser, <-chan struct{}, error) { | ||
973 | chdone := make(chan struct{}) | ||
974 | cmd.Stdin = input | ||
975 | pipeR, pipeW := io.Pipe() | ||
976 | cmd.Stdout = pipeW | ||
977 | var errBuf bytes.Buffer | ||
978 | cmd.Stderr = &errBuf | ||
979 | |||
980 | // Run the command and return the pipe | ||
981 | if err := cmd.Start(); err != nil { | ||
982 | return nil, nil, err | ||
983 | } | ||
984 | |||
985 | // Copy stdout to the returned pipe | ||
986 | go func() { | ||
987 | if err := cmd.Wait(); err != nil { | ||
988 | pipeW.CloseWithError(fmt.Errorf("%s: %s", err, errBuf.String())) | ||
989 | } else { | ||
990 | pipeW.Close() | ||
991 | } | ||
992 | close(chdone) | ||
993 | }() | ||
994 | |||
995 | return pipeR, chdone, nil | ||
996 | } | ||
997 | |||
998 | // NewTempArchive reads the content of src into a temporary file, and returns the contents | ||
999 | // of that file as an archive. The archive can only be read once - as soon as reading completes, | ||
1000 | // the file will be deleted. | ||
1001 | func NewTempArchive(src Archive, dir string) (*TempArchive, error) { | ||
1002 | f, err := ioutil.TempFile(dir, "") | ||
1003 | if err != nil { | ||
1004 | return nil, err | ||
1005 | } | ||
1006 | if _, err := io.Copy(f, src); err != nil { | ||
1007 | return nil, err | ||
1008 | } | ||
1009 | if _, err := f.Seek(0, 0); err != nil { | ||
1010 | return nil, err | ||
1011 | } | ||
1012 | st, err := f.Stat() | ||
1013 | if err != nil { | ||
1014 | return nil, err | ||
1015 | } | ||
1016 | size := st.Size() | ||
1017 | return &TempArchive{File: f, Size: size}, nil | ||
1018 | } | ||
1019 | |||
1020 | // TempArchive is a temporary archive. The archive can only be read once - as soon as reading completes, | ||
1021 | // the file will be deleted. | ||
1022 | type TempArchive struct { | ||
1023 | *os.File | ||
1024 | Size int64 // Pre-computed from Stat().Size() as a convenience | ||
1025 | read int64 | ||
1026 | closed bool | ||
1027 | } | ||
1028 | |||
1029 | // Close closes the underlying file if it's still open, or does a no-op | ||
1030 | // to allow callers to try to close the TempArchive multiple times safely. | ||
1031 | func (archive *TempArchive) Close() error { | ||
1032 | if archive.closed { | ||
1033 | return nil | ||
1034 | } | ||
1035 | |||
1036 | archive.closed = true | ||
1037 | |||
1038 | return archive.File.Close() | ||
1039 | } | ||
1040 | |||
1041 | func (archive *TempArchive) Read(data []byte) (int, error) { | ||
1042 | n, err := archive.File.Read(data) | ||
1043 | archive.read += int64(n) | ||
1044 | if err != nil || archive.read == archive.Size { | ||
1045 | archive.Close() | ||
1046 | os.Remove(archive.File.Name()) | ||
1047 | } | ||
1048 | return n, err | ||
1049 | } | ||
diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/archive_unix.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/archive_unix.go new file mode 100644 index 0000000..86c6888 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/archive_unix.go | |||
@@ -0,0 +1,112 @@ | |||
1 | // +build !windows | ||
2 | |||
3 | package archive | ||
4 | |||
5 | import ( | ||
6 | "archive/tar" | ||
7 | "errors" | ||
8 | "os" | ||
9 | "path/filepath" | ||
10 | "syscall" | ||
11 | |||
12 | "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system" | ||
13 | ) | ||
14 | |||
15 | // fixVolumePathPrefix does platform specific processing to ensure that if | ||
16 | // the path being passed in is not in a volume path format, convert it to one. | ||
17 | func fixVolumePathPrefix(srcPath string) string { | ||
18 | return srcPath | ||
19 | } | ||
20 | |||
21 | // getWalkRoot calculates the root path when performing a TarWithOptions. | ||
22 | // We use a separate function as this is platform specific. On Linux, we | ||
23 | // can't use filepath.Join(srcPath,include) because this will clean away | ||
24 | // a trailing "." or "/" which may be important. | ||
25 | func getWalkRoot(srcPath string, include string) string { | ||
26 | return srcPath + string(filepath.Separator) + include | ||
27 | } | ||
28 | |||
29 | // CanonicalTarNameForPath returns platform-specific filepath | ||
30 | // to canonical posix-style path for tar archival. p is relative | ||
31 | // path. | ||
32 | func CanonicalTarNameForPath(p string) (string, error) { | ||
33 | return p, nil // already unix-style | ||
34 | } | ||
35 | |||
36 | // chmodTarEntry is used to adjust the file permissions used in tar header based | ||
37 | // on the platform the archival is done. | ||
38 | |||
39 | func chmodTarEntry(perm os.FileMode) os.FileMode { | ||
40 | return perm // noop for unix as golang APIs provide perm bits correctly | ||
41 | } | ||
42 | |||
43 | func setHeaderForSpecialDevice(hdr *tar.Header, ta *tarAppender, name string, stat interface{}) (inode uint64, err error) { | ||
44 | s, ok := stat.(*syscall.Stat_t) | ||
45 | |||
46 | if !ok { | ||
47 | err = errors.New("cannot convert stat value to syscall.Stat_t") | ||
48 | return | ||
49 | } | ||
50 | |||
51 | inode = uint64(s.Ino) | ||
52 | |||
53 | // Currently go does not fill in the major/minors | ||
54 | if s.Mode&syscall.S_IFBLK != 0 || | ||
55 | s.Mode&syscall.S_IFCHR != 0 { | ||
56 | hdr.Devmajor = int64(major(uint64(s.Rdev))) | ||
57 | hdr.Devminor = int64(minor(uint64(s.Rdev))) | ||
58 | } | ||
59 | |||
60 | return | ||
61 | } | ||
62 | |||
63 | func getFileUIDGID(stat interface{}) (int, int, error) { | ||
64 | s, ok := stat.(*syscall.Stat_t) | ||
65 | |||
66 | if !ok { | ||
67 | return -1, -1, errors.New("cannot convert stat value to syscall.Stat_t") | ||
68 | } | ||
69 | return int(s.Uid), int(s.Gid), nil | ||
70 | } | ||
71 | |||
72 | func major(device uint64) uint64 { | ||
73 | return (device >> 8) & 0xfff | ||
74 | } | ||
75 | |||
76 | func minor(device uint64) uint64 { | ||
77 | return (device & 0xff) | ((device >> 12) & 0xfff00) | ||
78 | } | ||
79 | |||
80 | // handleTarTypeBlockCharFifo is an OS-specific helper function used by | ||
81 | // createTarFile to handle the following types of header: Block; Char; Fifo | ||
82 | func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error { | ||
83 | mode := uint32(hdr.Mode & 07777) | ||
84 | switch hdr.Typeflag { | ||
85 | case tar.TypeBlock: | ||
86 | mode |= syscall.S_IFBLK | ||
87 | case tar.TypeChar: | ||
88 | mode |= syscall.S_IFCHR | ||
89 | case tar.TypeFifo: | ||
90 | mode |= syscall.S_IFIFO | ||
91 | } | ||
92 | |||
93 | if err := system.Mknod(path, mode, int(system.Mkdev(hdr.Devmajor, hdr.Devminor))); err != nil { | ||
94 | return err | ||
95 | } | ||
96 | return nil | ||
97 | } | ||
98 | |||
99 | func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error { | ||
100 | if hdr.Typeflag == tar.TypeLink { | ||
101 | if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) { | ||
102 | if err := os.Chmod(path, hdrInfo.Mode()); err != nil { | ||
103 | return err | ||
104 | } | ||
105 | } | ||
106 | } else if hdr.Typeflag != tar.TypeSymlink { | ||
107 | if err := os.Chmod(path, hdrInfo.Mode()); err != nil { | ||
108 | return err | ||
109 | } | ||
110 | } | ||
111 | return nil | ||
112 | } | ||
diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/archive_windows.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/archive_windows.go new file mode 100644 index 0000000..23d60aa --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/archive_windows.go | |||
@@ -0,0 +1,70 @@ | |||
1 | // +build windows | ||
2 | |||
3 | package archive | ||
4 | |||
5 | import ( | ||
6 | "archive/tar" | ||
7 | "fmt" | ||
8 | "os" | ||
9 | "path/filepath" | ||
10 | "strings" | ||
11 | |||
12 | "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/longpath" | ||
13 | ) | ||
14 | |||
15 | // fixVolumePathPrefix does platform specific processing to ensure that if | ||
16 | // the path being passed in is not in a volume path format, convert it to one. | ||
17 | func fixVolumePathPrefix(srcPath string) string { | ||
18 | return longpath.AddPrefix(srcPath) | ||
19 | } | ||
20 | |||
21 | // getWalkRoot calculates the root path when performing a TarWithOptions. | ||
22 | // We use a separate function as this is platform specific. | ||
23 | func getWalkRoot(srcPath string, include string) string { | ||
24 | return filepath.Join(srcPath, include) | ||
25 | } | ||
26 | |||
27 | // CanonicalTarNameForPath returns platform-specific filepath | ||
28 | // to canonical posix-style path for tar archival. p is relative | ||
29 | // path. | ||
30 | func CanonicalTarNameForPath(p string) (string, error) { | ||
31 | // windows: convert windows style relative path with backslashes | ||
32 | // into forward slashes. Since windows does not allow '/' or '\' | ||
33 | // in file names, it is mostly safe to replace however we must | ||
34 | // check just in case | ||
35 | if strings.Contains(p, "/") { | ||
36 | return "", fmt.Errorf("Windows path contains forward slash: %s", p) | ||
37 | } | ||
38 | return strings.Replace(p, string(os.PathSeparator), "/", -1), nil | ||
39 | |||
40 | } | ||
41 | |||
42 | // chmodTarEntry is used to adjust the file permissions used in tar header based | ||
43 | // on the platform the archival is done. | ||
44 | func chmodTarEntry(perm os.FileMode) os.FileMode { | ||
45 | perm &= 0755 | ||
46 | // Add the x bit: make everything +x from windows | ||
47 | perm |= 0111 | ||
48 | |||
49 | return perm | ||
50 | } | ||
51 | |||
52 | func setHeaderForSpecialDevice(hdr *tar.Header, ta *tarAppender, name string, stat interface{}) (inode uint64, err error) { | ||
53 | // do nothing. no notion of Rdev, Inode, Nlink in stat on Windows | ||
54 | return | ||
55 | } | ||
56 | |||
57 | // handleTarTypeBlockCharFifo is an OS-specific helper function used by | ||
58 | // createTarFile to handle the following types of header: Block; Char; Fifo | ||
59 | func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error { | ||
60 | return nil | ||
61 | } | ||
62 | |||
63 | func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error { | ||
64 | return nil | ||
65 | } | ||
66 | |||
67 | func getFileUIDGID(stat interface{}) (int, int, error) { | ||
68 | // no notion of file ownership mapping yet on Windows | ||
69 | return 0, 0, nil | ||
70 | } | ||
diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes.go new file mode 100644 index 0000000..a2a1dc3 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes.go | |||
@@ -0,0 +1,416 @@ | |||
1 | package archive | ||
2 | |||
3 | import ( | ||
4 | "archive/tar" | ||
5 | "bytes" | ||
6 | "fmt" | ||
7 | "io" | ||
8 | "io/ioutil" | ||
9 | "os" | ||
10 | "path/filepath" | ||
11 | "sort" | ||
12 | "strings" | ||
13 | "syscall" | ||
14 | "time" | ||
15 | |||
16 | "github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus" | ||
17 | "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/idtools" | ||
18 | "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/pools" | ||
19 | "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system" | ||
20 | ) | ||
21 | |||
22 | // ChangeType represents the change type. | ||
23 | type ChangeType int | ||
24 | |||
25 | const ( | ||
26 | // ChangeModify represents the modify operation. | ||
27 | ChangeModify = iota | ||
28 | // ChangeAdd represents the add operation. | ||
29 | ChangeAdd | ||
30 | // ChangeDelete represents the delete operation. | ||
31 | ChangeDelete | ||
32 | ) | ||
33 | |||
34 | func (c ChangeType) String() string { | ||
35 | switch c { | ||
36 | case ChangeModify: | ||
37 | return "C" | ||
38 | case ChangeAdd: | ||
39 | return "A" | ||
40 | case ChangeDelete: | ||
41 | return "D" | ||
42 | } | ||
43 | return "" | ||
44 | } | ||
45 | |||
46 | // Change represents a change, it wraps the change type and path. | ||
47 | // It describes changes of the files in the path respect to the | ||
48 | // parent layers. The change could be modify, add, delete. | ||
49 | // This is used for layer diff. | ||
50 | type Change struct { | ||
51 | Path string | ||
52 | Kind ChangeType | ||
53 | } | ||
54 | |||
55 | func (change *Change) String() string { | ||
56 | return fmt.Sprintf("%s %s", change.Kind, change.Path) | ||
57 | } | ||
58 | |||
59 | // for sort.Sort | ||
60 | type changesByPath []Change | ||
61 | |||
62 | func (c changesByPath) Less(i, j int) bool { return c[i].Path < c[j].Path } | ||
63 | func (c changesByPath) Len() int { return len(c) } | ||
64 | func (c changesByPath) Swap(i, j int) { c[j], c[i] = c[i], c[j] } | ||
65 | |||
66 | // Gnu tar and the go tar writer don't have sub-second mtime | ||
67 | // precision, which is problematic when we apply changes via tar | ||
68 | // files, we handle this by comparing for exact times, *or* same | ||
69 | // second count and either a or b having exactly 0 nanoseconds | ||
70 | func sameFsTime(a, b time.Time) bool { | ||
71 | return a == b || | ||
72 | (a.Unix() == b.Unix() && | ||
73 | (a.Nanosecond() == 0 || b.Nanosecond() == 0)) | ||
74 | } | ||
75 | |||
76 | func sameFsTimeSpec(a, b syscall.Timespec) bool { | ||
77 | return a.Sec == b.Sec && | ||
78 | (a.Nsec == b.Nsec || a.Nsec == 0 || b.Nsec == 0) | ||
79 | } | ||
80 | |||
81 | // Changes walks the path rw and determines changes for the files in the path, | ||
82 | // with respect to the parent layers | ||
83 | func Changes(layers []string, rw string) ([]Change, error) { | ||
84 | var ( | ||
85 | changes []Change | ||
86 | changedDirs = make(map[string]struct{}) | ||
87 | ) | ||
88 | |||
89 | err := filepath.Walk(rw, func(path string, f os.FileInfo, err error) error { | ||
90 | if err != nil { | ||
91 | return err | ||
92 | } | ||
93 | |||
94 | // Rebase path | ||
95 | path, err = filepath.Rel(rw, path) | ||
96 | if err != nil { | ||
97 | return err | ||
98 | } | ||
99 | |||
100 | // As this runs on the daemon side, file paths are OS specific. | ||
101 | path = filepath.Join(string(os.PathSeparator), path) | ||
102 | |||
103 | // Skip root | ||
104 | if path == string(os.PathSeparator) { | ||
105 | return nil | ||
106 | } | ||
107 | |||
108 | // Skip AUFS metadata | ||
109 | if matched, err := filepath.Match(string(os.PathSeparator)+WhiteoutMetaPrefix+"*", path); err != nil || matched { | ||
110 | return err | ||
111 | } | ||
112 | |||
113 | change := Change{ | ||
114 | Path: path, | ||
115 | } | ||
116 | |||
117 | // Find out what kind of modification happened | ||
118 | file := filepath.Base(path) | ||
119 | // If there is a whiteout, then the file was removed | ||
120 | if strings.HasPrefix(file, WhiteoutPrefix) { | ||
121 | originalFile := file[len(WhiteoutPrefix):] | ||
122 | change.Path = filepath.Join(filepath.Dir(path), originalFile) | ||
123 | change.Kind = ChangeDelete | ||
124 | } else { | ||
125 | // Otherwise, the file was added | ||
126 | change.Kind = ChangeAdd | ||
127 | |||
128 | // ...Unless it already existed in a top layer, in which case, it's a modification | ||
129 | for _, layer := range layers { | ||
130 | stat, err := os.Stat(filepath.Join(layer, path)) | ||
131 | if err != nil && !os.IsNotExist(err) { | ||
132 | return err | ||
133 | } | ||
134 | if err == nil { | ||
135 | // The file existed in the top layer, so that's a modification | ||
136 | |||
137 | // However, if it's a directory, maybe it wasn't actually modified. | ||
138 | // If you modify /foo/bar/baz, then /foo will be part of the changed files only because it's the parent of bar | ||
139 | if stat.IsDir() && f.IsDir() { | ||
140 | if f.Size() == stat.Size() && f.Mode() == stat.Mode() && sameFsTime(f.ModTime(), stat.ModTime()) { | ||
141 | // Both directories are the same, don't record the change | ||
142 | return nil | ||
143 | } | ||
144 | } | ||
145 | change.Kind = ChangeModify | ||
146 | break | ||
147 | } | ||
148 | } | ||
149 | } | ||
150 | |||
151 | // If /foo/bar/file.txt is modified, then /foo/bar must be part of the changed files. | ||
152 | // This block is here to ensure the change is recorded even if the | ||
153 | // modify time, mode and size of the parent directory in the rw and ro layers are all equal. | ||
154 | // Check https://github.com/docker/docker/pull/13590 for details. | ||
155 | if f.IsDir() { | ||
156 | changedDirs[path] = struct{}{} | ||
157 | } | ||
158 | if change.Kind == ChangeAdd || change.Kind == ChangeDelete { | ||
159 | parent := filepath.Dir(path) | ||
160 | if _, ok := changedDirs[parent]; !ok && parent != "/" { | ||
161 | changes = append(changes, Change{Path: parent, Kind: ChangeModify}) | ||
162 | changedDirs[parent] = struct{}{} | ||
163 | } | ||
164 | } | ||
165 | |||
166 | // Record change | ||
167 | changes = append(changes, change) | ||
168 | return nil | ||
169 | }) | ||
170 | if err != nil && !os.IsNotExist(err) { | ||
171 | return nil, err | ||
172 | } | ||
173 | return changes, nil | ||
174 | } | ||
175 | |||
176 | // FileInfo describes the information of a file. | ||
177 | type FileInfo struct { | ||
178 | parent *FileInfo | ||
179 | name string | ||
180 | stat *system.StatT | ||
181 | children map[string]*FileInfo | ||
182 | capability []byte | ||
183 | added bool | ||
184 | } | ||
185 | |||
186 | // LookUp looks up the file information of a file. | ||
187 | func (info *FileInfo) LookUp(path string) *FileInfo { | ||
188 | // As this runs on the daemon side, file paths are OS specific. | ||
189 | parent := info | ||
190 | if path == string(os.PathSeparator) { | ||
191 | return info | ||
192 | } | ||
193 | |||
194 | pathElements := strings.Split(path, string(os.PathSeparator)) | ||
195 | for _, elem := range pathElements { | ||
196 | if elem != "" { | ||
197 | child := parent.children[elem] | ||
198 | if child == nil { | ||
199 | return nil | ||
200 | } | ||
201 | parent = child | ||
202 | } | ||
203 | } | ||
204 | return parent | ||
205 | } | ||
206 | |||
207 | func (info *FileInfo) path() string { | ||
208 | if info.parent == nil { | ||
209 | // As this runs on the daemon side, file paths are OS specific. | ||
210 | return string(os.PathSeparator) | ||
211 | } | ||
212 | return filepath.Join(info.parent.path(), info.name) | ||
213 | } | ||
214 | |||
215 | func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) { | ||
216 | |||
217 | sizeAtEntry := len(*changes) | ||
218 | |||
219 | if oldInfo == nil { | ||
220 | // add | ||
221 | change := Change{ | ||
222 | Path: info.path(), | ||
223 | Kind: ChangeAdd, | ||
224 | } | ||
225 | *changes = append(*changes, change) | ||
226 | info.added = true | ||
227 | } | ||
228 | |||
229 | // We make a copy so we can modify it to detect additions | ||
230 | // also, we only recurse on the old dir if the new info is a directory | ||
231 | // otherwise any previous delete/change is considered recursive | ||
232 | oldChildren := make(map[string]*FileInfo) | ||
233 | if oldInfo != nil && info.isDir() { | ||
234 | for k, v := range oldInfo.children { | ||
235 | oldChildren[k] = v | ||
236 | } | ||
237 | } | ||
238 | |||
239 | for name, newChild := range info.children { | ||
240 | oldChild, _ := oldChildren[name] | ||
241 | if oldChild != nil { | ||
242 | // change? | ||
243 | oldStat := oldChild.stat | ||
244 | newStat := newChild.stat | ||
245 | // Note: We can't compare inode or ctime or blocksize here, because these change | ||
246 | // when copying a file into a container. However, that is not generally a problem | ||
247 | // because any content change will change mtime, and any status change should | ||
248 | // be visible when actually comparing the stat fields. The only time this | ||
249 | // breaks down is if some code intentionally hides a change by setting | ||
250 | // back mtime | ||
251 | if statDifferent(oldStat, newStat) || | ||
252 | bytes.Compare(oldChild.capability, newChild.capability) != 0 { | ||
253 | change := Change{ | ||
254 | Path: newChild.path(), | ||
255 | Kind: ChangeModify, | ||
256 | } | ||
257 | *changes = append(*changes, change) | ||
258 | newChild.added = true | ||
259 | } | ||
260 | |||
261 | // Remove from copy so we can detect deletions | ||
262 | delete(oldChildren, name) | ||
263 | } | ||
264 | |||
265 | newChild.addChanges(oldChild, changes) | ||
266 | } | ||
267 | for _, oldChild := range oldChildren { | ||
268 | // delete | ||
269 | change := Change{ | ||
270 | Path: oldChild.path(), | ||
271 | Kind: ChangeDelete, | ||
272 | } | ||
273 | *changes = append(*changes, change) | ||
274 | } | ||
275 | |||
276 | // If there were changes inside this directory, we need to add it, even if the directory | ||
277 | // itself wasn't changed. This is needed to properly save and restore filesystem permissions. | ||
278 | // As this runs on the daemon side, file paths are OS specific. | ||
279 | if len(*changes) > sizeAtEntry && info.isDir() && !info.added && info.path() != string(os.PathSeparator) { | ||
280 | change := Change{ | ||
281 | Path: info.path(), | ||
282 | Kind: ChangeModify, | ||
283 | } | ||
284 | // Let's insert the directory entry before the recently added entries located inside this dir | ||
285 | *changes = append(*changes, change) // just to resize the slice, will be overwritten | ||
286 | copy((*changes)[sizeAtEntry+1:], (*changes)[sizeAtEntry:]) | ||
287 | (*changes)[sizeAtEntry] = change | ||
288 | } | ||
289 | |||
290 | } | ||
291 | |||
292 | // Changes add changes to file information. | ||
293 | func (info *FileInfo) Changes(oldInfo *FileInfo) []Change { | ||
294 | var changes []Change | ||
295 | |||
296 | info.addChanges(oldInfo, &changes) | ||
297 | |||
298 | return changes | ||
299 | } | ||
300 | |||
301 | func newRootFileInfo() *FileInfo { | ||
302 | // As this runs on the daemon side, file paths are OS specific. | ||
303 | root := &FileInfo{ | ||
304 | name: string(os.PathSeparator), | ||
305 | children: make(map[string]*FileInfo), | ||
306 | } | ||
307 | return root | ||
308 | } | ||
309 | |||
310 | // ChangesDirs compares two directories and generates an array of Change objects describing the changes. | ||
311 | // If oldDir is "", then all files in newDir will be Add-Changes. | ||
312 | func ChangesDirs(newDir, oldDir string) ([]Change, error) { | ||
313 | var ( | ||
314 | oldRoot, newRoot *FileInfo | ||
315 | ) | ||
316 | if oldDir == "" { | ||
317 | emptyDir, err := ioutil.TempDir("", "empty") | ||
318 | if err != nil { | ||
319 | return nil, err | ||
320 | } | ||
321 | defer os.Remove(emptyDir) | ||
322 | oldDir = emptyDir | ||
323 | } | ||
324 | oldRoot, newRoot, err := collectFileInfoForChanges(oldDir, newDir) | ||
325 | if err != nil { | ||
326 | return nil, err | ||
327 | } | ||
328 | |||
329 | return newRoot.Changes(oldRoot), nil | ||
330 | } | ||
331 | |||
332 | // ChangesSize calculates the size in bytes of the provided changes, based on newDir. | ||
333 | func ChangesSize(newDir string, changes []Change) int64 { | ||
334 | var ( | ||
335 | size int64 | ||
336 | sf = make(map[uint64]struct{}) | ||
337 | ) | ||
338 | for _, change := range changes { | ||
339 | if change.Kind == ChangeModify || change.Kind == ChangeAdd { | ||
340 | file := filepath.Join(newDir, change.Path) | ||
341 | fileInfo, err := os.Lstat(file) | ||
342 | if err != nil { | ||
343 | logrus.Errorf("Can not stat %q: %s", file, err) | ||
344 | continue | ||
345 | } | ||
346 | |||
347 | if fileInfo != nil && !fileInfo.IsDir() { | ||
348 | if hasHardlinks(fileInfo) { | ||
349 | inode := getIno(fileInfo) | ||
350 | if _, ok := sf[inode]; !ok { | ||
351 | size += fileInfo.Size() | ||
352 | sf[inode] = struct{}{} | ||
353 | } | ||
354 | } else { | ||
355 | size += fileInfo.Size() | ||
356 | } | ||
357 | } | ||
358 | } | ||
359 | } | ||
360 | return size | ||
361 | } | ||
362 | |||
363 | // ExportChanges produces an Archive from the provided changes, relative to dir. | ||
364 | func ExportChanges(dir string, changes []Change, uidMaps, gidMaps []idtools.IDMap) (Archive, error) { | ||
365 | reader, writer := io.Pipe() | ||
366 | go func() { | ||
367 | ta := &tarAppender{ | ||
368 | TarWriter: tar.NewWriter(writer), | ||
369 | Buffer: pools.BufioWriter32KPool.Get(nil), | ||
370 | SeenFiles: make(map[uint64]string), | ||
371 | UIDMaps: uidMaps, | ||
372 | GIDMaps: gidMaps, | ||
373 | } | ||
374 | // this buffer is needed for the duration of this piped stream | ||
375 | defer pools.BufioWriter32KPool.Put(ta.Buffer) | ||
376 | |||
377 | sort.Sort(changesByPath(changes)) | ||
378 | |||
379 | // In general we log errors here but ignore them because | ||
380 | // during e.g. a diff operation the container can continue | ||
381 | // mutating the filesystem and we can see transient errors | ||
382 | // from this | ||
383 | for _, change := range changes { | ||
384 | if change.Kind == ChangeDelete { | ||
385 | whiteOutDir := filepath.Dir(change.Path) | ||
386 | whiteOutBase := filepath.Base(change.Path) | ||
387 | whiteOut := filepath.Join(whiteOutDir, WhiteoutPrefix+whiteOutBase) | ||
388 | timestamp := time.Now() | ||
389 | hdr := &tar.Header{ | ||
390 | Name: whiteOut[1:], | ||
391 | Size: 0, | ||
392 | ModTime: timestamp, | ||
393 | AccessTime: timestamp, | ||
394 | ChangeTime: timestamp, | ||
395 | } | ||
396 | if err := ta.TarWriter.WriteHeader(hdr); err != nil { | ||
397 | logrus.Debugf("Can't write whiteout header: %s", err) | ||
398 | } | ||
399 | } else { | ||
400 | path := filepath.Join(dir, change.Path) | ||
401 | if err := ta.addTarFile(path, change.Path[1:]); err != nil { | ||
402 | logrus.Debugf("Can't add file %s to tar: %s", path, err) | ||
403 | } | ||
404 | } | ||
405 | } | ||
406 | |||
407 | // Make sure to check the error on Close. | ||
408 | if err := ta.TarWriter.Close(); err != nil { | ||
409 | logrus.Debugf("Can't close layer: %s", err) | ||
410 | } | ||
411 | if err := writer.Close(); err != nil { | ||
412 | logrus.Debugf("failed close Changes writer: %s", err) | ||
413 | } | ||
414 | }() | ||
415 | return reader, nil | ||
416 | } | ||
diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_linux.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_linux.go new file mode 100644 index 0000000..378cc09 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_linux.go | |||
@@ -0,0 +1,285 @@ | |||
1 | package archive | ||
2 | |||
3 | import ( | ||
4 | "bytes" | ||
5 | "fmt" | ||
6 | "os" | ||
7 | "path/filepath" | ||
8 | "sort" | ||
9 | "syscall" | ||
10 | "unsafe" | ||
11 | |||
12 | "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system" | ||
13 | ) | ||
14 | |||
15 | // walker is used to implement collectFileInfoForChanges on linux. Where this | ||
16 | // method in general returns the entire contents of two directory trees, we | ||
17 | // optimize some FS calls out on linux. In particular, we take advantage of the | ||
18 | // fact that getdents(2) returns the inode of each file in the directory being | ||
19 | // walked, which, when walking two trees in parallel to generate a list of | ||
20 | // changes, can be used to prune subtrees without ever having to lstat(2) them | ||
21 | // directly. Eliminating stat calls in this way can save up to seconds on large | ||
22 | // images. | ||
23 | type walker struct { | ||
24 | dir1 string | ||
25 | dir2 string | ||
26 | root1 *FileInfo | ||
27 | root2 *FileInfo | ||
28 | } | ||
29 | |||
30 | // collectFileInfoForChanges returns a complete representation of the trees | ||
31 | // rooted at dir1 and dir2, with one important exception: any subtree or | ||
32 | // leaf where the inode and device numbers are an exact match between dir1 | ||
33 | // and dir2 will be pruned from the results. This method is *only* to be used | ||
34 | // to generating a list of changes between the two directories, as it does not | ||
35 | // reflect the full contents. | ||
36 | func collectFileInfoForChanges(dir1, dir2 string) (*FileInfo, *FileInfo, error) { | ||
37 | w := &walker{ | ||
38 | dir1: dir1, | ||
39 | dir2: dir2, | ||
40 | root1: newRootFileInfo(), | ||
41 | root2: newRootFileInfo(), | ||
42 | } | ||
43 | |||
44 | i1, err := os.Lstat(w.dir1) | ||
45 | if err != nil { | ||
46 | return nil, nil, err | ||
47 | } | ||
48 | i2, err := os.Lstat(w.dir2) | ||
49 | if err != nil { | ||
50 | return nil, nil, err | ||
51 | } | ||
52 | |||
53 | if err := w.walk("/", i1, i2); err != nil { | ||
54 | return nil, nil, err | ||
55 | } | ||
56 | |||
57 | return w.root1, w.root2, nil | ||
58 | } | ||
59 | |||
60 | // Given a FileInfo, its path info, and a reference to the root of the tree | ||
61 | // being constructed, register this file with the tree. | ||
62 | func walkchunk(path string, fi os.FileInfo, dir string, root *FileInfo) error { | ||
63 | if fi == nil { | ||
64 | return nil | ||
65 | } | ||
66 | parent := root.LookUp(filepath.Dir(path)) | ||
67 | if parent == nil { | ||
68 | return fmt.Errorf("collectFileInfoForChanges: Unexpectedly no parent for %s", path) | ||
69 | } | ||
70 | info := &FileInfo{ | ||
71 | name: filepath.Base(path), | ||
72 | children: make(map[string]*FileInfo), | ||
73 | parent: parent, | ||
74 | } | ||
75 | cpath := filepath.Join(dir, path) | ||
76 | stat, err := system.FromStatT(fi.Sys().(*syscall.Stat_t)) | ||
77 | if err != nil { | ||
78 | return err | ||
79 | } | ||
80 | info.stat = stat | ||
81 | info.capability, _ = system.Lgetxattr(cpath, "security.capability") // lgetxattr(2): fs access | ||
82 | parent.children[info.name] = info | ||
83 | return nil | ||
84 | } | ||
85 | |||
86 | // Walk a subtree rooted at the same path in both trees being iterated. For | ||
87 | // example, /docker/overlay/1234/a/b/c/d and /docker/overlay/8888/a/b/c/d | ||
88 | func (w *walker) walk(path string, i1, i2 os.FileInfo) (err error) { | ||
89 | // Register these nodes with the return trees, unless we're still at the | ||
90 | // (already-created) roots: | ||
91 | if path != "/" { | ||
92 | if err := walkchunk(path, i1, w.dir1, w.root1); err != nil { | ||
93 | return err | ||
94 | } | ||
95 | if err := walkchunk(path, i2, w.dir2, w.root2); err != nil { | ||
96 | return err | ||
97 | } | ||
98 | } | ||
99 | |||
100 | is1Dir := i1 != nil && i1.IsDir() | ||
101 | is2Dir := i2 != nil && i2.IsDir() | ||
102 | |||
103 | sameDevice := false | ||
104 | if i1 != nil && i2 != nil { | ||
105 | si1 := i1.Sys().(*syscall.Stat_t) | ||
106 | si2 := i2.Sys().(*syscall.Stat_t) | ||
107 | if si1.Dev == si2.Dev { | ||
108 | sameDevice = true | ||
109 | } | ||
110 | } | ||
111 | |||
112 | // If these files are both non-existent, or leaves (non-dirs), we are done. | ||
113 | if !is1Dir && !is2Dir { | ||
114 | return nil | ||
115 | } | ||
116 | |||
117 | // Fetch the names of all the files contained in both directories being walked: | ||
118 | var names1, names2 []nameIno | ||
119 | if is1Dir { | ||
120 | names1, err = readdirnames(filepath.Join(w.dir1, path)) // getdents(2): fs access | ||
121 | if err != nil { | ||
122 | return err | ||
123 | } | ||
124 | } | ||
125 | if is2Dir { | ||
126 | names2, err = readdirnames(filepath.Join(w.dir2, path)) // getdents(2): fs access | ||
127 | if err != nil { | ||
128 | return err | ||
129 | } | ||
130 | } | ||
131 | |||
132 | // We have lists of the files contained in both parallel directories, sorted | ||
133 | // in the same order. Walk them in parallel, generating a unique merged list | ||
134 | // of all items present in either or both directories. | ||
135 | var names []string | ||
136 | ix1 := 0 | ||
137 | ix2 := 0 | ||
138 | |||
139 | for { | ||
140 | if ix1 >= len(names1) { | ||
141 | break | ||
142 | } | ||
143 | if ix2 >= len(names2) { | ||
144 | break | ||
145 | } | ||
146 | |||
147 | ni1 := names1[ix1] | ||
148 | ni2 := names2[ix2] | ||
149 | |||
150 | switch bytes.Compare([]byte(ni1.name), []byte(ni2.name)) { | ||
151 | case -1: // ni1 < ni2 -- advance ni1 | ||
152 | // we will not encounter ni1 in names2 | ||
153 | names = append(names, ni1.name) | ||
154 | ix1++ | ||
155 | case 0: // ni1 == ni2 | ||
156 | if ni1.ino != ni2.ino || !sameDevice { | ||
157 | names = append(names, ni1.name) | ||
158 | } | ||
159 | ix1++ | ||
160 | ix2++ | ||
161 | case 1: // ni1 > ni2 -- advance ni2 | ||
162 | // we will not encounter ni2 in names1 | ||
163 | names = append(names, ni2.name) | ||
164 | ix2++ | ||
165 | } | ||
166 | } | ||
167 | for ix1 < len(names1) { | ||
168 | names = append(names, names1[ix1].name) | ||
169 | ix1++ | ||
170 | } | ||
171 | for ix2 < len(names2) { | ||
172 | names = append(names, names2[ix2].name) | ||
173 | ix2++ | ||
174 | } | ||
175 | |||
176 | // For each of the names present in either or both of the directories being | ||
177 | // iterated, stat the name under each root, and recurse the pair of them: | ||
178 | for _, name := range names { | ||
179 | fname := filepath.Join(path, name) | ||
180 | var cInfo1, cInfo2 os.FileInfo | ||
181 | if is1Dir { | ||
182 | cInfo1, err = os.Lstat(filepath.Join(w.dir1, fname)) // lstat(2): fs access | ||
183 | if err != nil && !os.IsNotExist(err) { | ||
184 | return err | ||
185 | } | ||
186 | } | ||
187 | if is2Dir { | ||
188 | cInfo2, err = os.Lstat(filepath.Join(w.dir2, fname)) // lstat(2): fs access | ||
189 | if err != nil && !os.IsNotExist(err) { | ||
190 | return err | ||
191 | } | ||
192 | } | ||
193 | if err = w.walk(fname, cInfo1, cInfo2); err != nil { | ||
194 | return err | ||
195 | } | ||
196 | } | ||
197 | return nil | ||
198 | } | ||
199 | |||
200 | // {name,inode} pairs used to support the early-pruning logic of the walker type | ||
201 | type nameIno struct { | ||
202 | name string | ||
203 | ino uint64 | ||
204 | } | ||
205 | |||
206 | type nameInoSlice []nameIno | ||
207 | |||
208 | func (s nameInoSlice) Len() int { return len(s) } | ||
209 | func (s nameInoSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } | ||
210 | func (s nameInoSlice) Less(i, j int) bool { return s[i].name < s[j].name } | ||
211 | |||
212 | // readdirnames is a hacked-apart version of the Go stdlib code, exposing inode | ||
213 | // numbers further up the stack when reading directory contents. Unlike | ||
214 | // os.Readdirnames, which returns a list of filenames, this function returns a | ||
215 | // list of {filename,inode} pairs. | ||
216 | func readdirnames(dirname string) (names []nameIno, err error) { | ||
217 | var ( | ||
218 | size = 100 | ||
219 | buf = make([]byte, 4096) | ||
220 | nbuf int | ||
221 | bufp int | ||
222 | nb int | ||
223 | ) | ||
224 | |||
225 | f, err := os.Open(dirname) | ||
226 | if err != nil { | ||
227 | return nil, err | ||
228 | } | ||
229 | defer f.Close() | ||
230 | |||
231 | names = make([]nameIno, 0, size) // Empty with room to grow. | ||
232 | for { | ||
233 | // Refill the buffer if necessary | ||
234 | if bufp >= nbuf { | ||
235 | bufp = 0 | ||
236 | nbuf, err = syscall.ReadDirent(int(f.Fd()), buf) // getdents on linux | ||
237 | if nbuf < 0 { | ||
238 | nbuf = 0 | ||
239 | } | ||
240 | if err != nil { | ||
241 | return nil, os.NewSyscallError("readdirent", err) | ||
242 | } | ||
243 | if nbuf <= 0 { | ||
244 | break // EOF | ||
245 | } | ||
246 | } | ||
247 | |||
248 | // Drain the buffer | ||
249 | nb, names = parseDirent(buf[bufp:nbuf], names) | ||
250 | bufp += nb | ||
251 | } | ||
252 | |||
253 | sl := nameInoSlice(names) | ||
254 | sort.Sort(sl) | ||
255 | return sl, nil | ||
256 | } | ||
257 | |||
258 | // parseDirent is a minor modification of syscall.ParseDirent (linux version) | ||
259 | // which returns {name,inode} pairs instead of just names. | ||
260 | func parseDirent(buf []byte, names []nameIno) (consumed int, newnames []nameIno) { | ||
261 | origlen := len(buf) | ||
262 | for len(buf) > 0 { | ||
263 | dirent := (*syscall.Dirent)(unsafe.Pointer(&buf[0])) | ||
264 | buf = buf[dirent.Reclen:] | ||
265 | if dirent.Ino == 0 { // File absent in directory. | ||
266 | continue | ||
267 | } | ||
268 | bytes := (*[10000]byte)(unsafe.Pointer(&dirent.Name[0])) | ||
269 | var name = string(bytes[0:clen(bytes[:])]) | ||
270 | if name == "." || name == ".." { // Useless names | ||
271 | continue | ||
272 | } | ||
273 | names = append(names, nameIno{name, dirent.Ino}) | ||
274 | } | ||
275 | return origlen - len(buf), names | ||
276 | } | ||
277 | |||
278 | func clen(n []byte) int { | ||
279 | for i := 0; i < len(n); i++ { | ||
280 | if n[i] == 0 { | ||
281 | return i | ||
282 | } | ||
283 | } | ||
284 | return len(n) | ||
285 | } | ||
diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_other.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_other.go new file mode 100644 index 0000000..35832f0 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_other.go | |||
@@ -0,0 +1,97 @@ | |||
1 | // +build !linux | ||
2 | |||
3 | package archive | ||
4 | |||
5 | import ( | ||
6 | "fmt" | ||
7 | "os" | ||
8 | "path/filepath" | ||
9 | "runtime" | ||
10 | "strings" | ||
11 | |||
12 | "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system" | ||
13 | ) | ||
14 | |||
15 | func collectFileInfoForChanges(oldDir, newDir string) (*FileInfo, *FileInfo, error) { | ||
16 | var ( | ||
17 | oldRoot, newRoot *FileInfo | ||
18 | err1, err2 error | ||
19 | errs = make(chan error, 2) | ||
20 | ) | ||
21 | go func() { | ||
22 | oldRoot, err1 = collectFileInfo(oldDir) | ||
23 | errs <- err1 | ||
24 | }() | ||
25 | go func() { | ||
26 | newRoot, err2 = collectFileInfo(newDir) | ||
27 | errs <- err2 | ||
28 | }() | ||
29 | |||
30 | // block until both routines have returned | ||
31 | for i := 0; i < 2; i++ { | ||
32 | if err := <-errs; err != nil { | ||
33 | return nil, nil, err | ||
34 | } | ||
35 | } | ||
36 | |||
37 | return oldRoot, newRoot, nil | ||
38 | } | ||
39 | |||
40 | func collectFileInfo(sourceDir string) (*FileInfo, error) { | ||
41 | root := newRootFileInfo() | ||
42 | |||
43 | err := filepath.Walk(sourceDir, func(path string, f os.FileInfo, err error) error { | ||
44 | if err != nil { | ||
45 | return err | ||
46 | } | ||
47 | |||
48 | // Rebase path | ||
49 | relPath, err := filepath.Rel(sourceDir, path) | ||
50 | if err != nil { | ||
51 | return err | ||
52 | } | ||
53 | |||
54 | // As this runs on the daemon side, file paths are OS specific. | ||
55 | relPath = filepath.Join(string(os.PathSeparator), relPath) | ||
56 | |||
57 | // See https://github.com/golang/go/issues/9168 - bug in filepath.Join. | ||
58 | // Temporary workaround. If the returned path starts with two backslashes, | ||
59 | // trim it down to a single backslash. Only relevant on Windows. | ||
60 | if runtime.GOOS == "windows" { | ||
61 | if strings.HasPrefix(relPath, `\\`) { | ||
62 | relPath = relPath[1:] | ||
63 | } | ||
64 | } | ||
65 | |||
66 | if relPath == string(os.PathSeparator) { | ||
67 | return nil | ||
68 | } | ||
69 | |||
70 | parent := root.LookUp(filepath.Dir(relPath)) | ||
71 | if parent == nil { | ||
72 | return fmt.Errorf("collectFileInfo: Unexpectedly no parent for %s", relPath) | ||
73 | } | ||
74 | |||
75 | info := &FileInfo{ | ||
76 | name: filepath.Base(relPath), | ||
77 | children: make(map[string]*FileInfo), | ||
78 | parent: parent, | ||
79 | } | ||
80 | |||
81 | s, err := system.Lstat(path) | ||
82 | if err != nil { | ||
83 | return err | ||
84 | } | ||
85 | info.stat = s | ||
86 | |||
87 | info.capability, _ = system.Lgetxattr(path, "security.capability") | ||
88 | |||
89 | parent.children[info.name] = info | ||
90 | |||
91 | return nil | ||
92 | }) | ||
93 | if err != nil { | ||
94 | return nil, err | ||
95 | } | ||
96 | return root, nil | ||
97 | } | ||
diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_unix.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_unix.go new file mode 100644 index 0000000..6646b4d --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_unix.go | |||
@@ -0,0 +1,36 @@ | |||
1 | // +build !windows | ||
2 | |||
3 | package archive | ||
4 | |||
5 | import ( | ||
6 | "os" | ||
7 | "syscall" | ||
8 | |||
9 | "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system" | ||
10 | ) | ||
11 | |||
12 | func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool { | ||
13 | // Don't look at size for dirs, its not a good measure of change | ||
14 | if oldStat.Mode() != newStat.Mode() || | ||
15 | oldStat.UID() != newStat.UID() || | ||
16 | oldStat.GID() != newStat.GID() || | ||
17 | oldStat.Rdev() != newStat.Rdev() || | ||
18 | // Don't look at size for dirs, its not a good measure of change | ||
19 | (oldStat.Mode()&syscall.S_IFDIR != syscall.S_IFDIR && | ||
20 | (!sameFsTimeSpec(oldStat.Mtim(), newStat.Mtim()) || (oldStat.Size() != newStat.Size()))) { | ||
21 | return true | ||
22 | } | ||
23 | return false | ||
24 | } | ||
25 | |||
26 | func (info *FileInfo) isDir() bool { | ||
27 | return info.parent == nil || info.stat.Mode()&syscall.S_IFDIR != 0 | ||
28 | } | ||
29 | |||
30 | func getIno(fi os.FileInfo) uint64 { | ||
31 | return uint64(fi.Sys().(*syscall.Stat_t).Ino) | ||
32 | } | ||
33 | |||
34 | func hasHardlinks(fi os.FileInfo) bool { | ||
35 | return fi.Sys().(*syscall.Stat_t).Nlink > 1 | ||
36 | } | ||
diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_windows.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_windows.go new file mode 100644 index 0000000..2d8708d --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_windows.go | |||
@@ -0,0 +1,30 @@ | |||
1 | package archive | ||
2 | |||
3 | import ( | ||
4 | "os" | ||
5 | |||
6 | "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system" | ||
7 | ) | ||
8 | |||
9 | func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool { | ||
10 | |||
11 | // Don't look at size for dirs, its not a good measure of change | ||
12 | if oldStat.ModTime() != newStat.ModTime() || | ||
13 | oldStat.Mode() != newStat.Mode() || | ||
14 | oldStat.Size() != newStat.Size() && !oldStat.IsDir() { | ||
15 | return true | ||
16 | } | ||
17 | return false | ||
18 | } | ||
19 | |||
20 | func (info *FileInfo) isDir() bool { | ||
21 | return info.parent == nil || info.stat.IsDir() | ||
22 | } | ||
23 | |||
24 | func getIno(fi os.FileInfo) (inode uint64) { | ||
25 | return | ||
26 | } | ||
27 | |||
28 | func hasHardlinks(fi os.FileInfo) bool { | ||
29 | return false | ||
30 | } | ||
diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/copy.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/copy.go new file mode 100644 index 0000000..e950912 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/copy.go | |||
@@ -0,0 +1,458 @@ | |||
1 | package archive | ||
2 | |||
3 | import ( | ||
4 | "archive/tar" | ||
5 | "errors" | ||
6 | "io" | ||
7 | "io/ioutil" | ||
8 | "os" | ||
9 | "path/filepath" | ||
10 | "strings" | ||
11 | |||
12 | "github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus" | ||
13 | "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system" | ||
14 | ) | ||
15 | |||
16 | // Errors used or returned by this file. | ||
17 | var ( | ||
18 | ErrNotDirectory = errors.New("not a directory") | ||
19 | ErrDirNotExists = errors.New("no such directory") | ||
20 | ErrCannotCopyDir = errors.New("cannot copy directory") | ||
21 | ErrInvalidCopySource = errors.New("invalid copy source content") | ||
22 | ) | ||
23 | |||
24 | // PreserveTrailingDotOrSeparator returns the given cleaned path (after | ||
25 | // processing using any utility functions from the path or filepath stdlib | ||
26 | // packages) and appends a trailing `/.` or `/` if its corresponding original | ||
27 | // path (from before being processed by utility functions from the path or | ||
28 | // filepath stdlib packages) ends with a trailing `/.` or `/`. If the cleaned | ||
29 | // path already ends in a `.` path segment, then another is not added. If the | ||
30 | // clean path already ends in a path separator, then another is not added. | ||
31 | func PreserveTrailingDotOrSeparator(cleanedPath, originalPath string) string { | ||
32 | // Ensure paths are in platform semantics | ||
33 | cleanedPath = normalizePath(cleanedPath) | ||
34 | originalPath = normalizePath(originalPath) | ||
35 | |||
36 | if !specifiesCurrentDir(cleanedPath) && specifiesCurrentDir(originalPath) { | ||
37 | if !hasTrailingPathSeparator(cleanedPath) { | ||
38 | // Add a separator if it doesn't already end with one (a cleaned | ||
39 | // path would only end in a separator if it is the root). | ||
40 | cleanedPath += string(filepath.Separator) | ||
41 | } | ||
42 | cleanedPath += "." | ||
43 | } | ||
44 | |||
45 | if !hasTrailingPathSeparator(cleanedPath) && hasTrailingPathSeparator(originalPath) { | ||
46 | cleanedPath += string(filepath.Separator) | ||
47 | } | ||
48 | |||
49 | return cleanedPath | ||
50 | } | ||
51 | |||
52 | // assertsDirectory returns whether the given path is | ||
53 | // asserted to be a directory, i.e., the path ends with | ||
54 | // a trailing '/' or `/.`, assuming a path separator of `/`. | ||
55 | func assertsDirectory(path string) bool { | ||
56 | return hasTrailingPathSeparator(path) || specifiesCurrentDir(path) | ||
57 | } | ||
58 | |||
59 | // hasTrailingPathSeparator returns whether the given | ||
60 | // path ends with the system's path separator character. | ||
61 | func hasTrailingPathSeparator(path string) bool { | ||
62 | return len(path) > 0 && os.IsPathSeparator(path[len(path)-1]) | ||
63 | } | ||
64 | |||
65 | // specifiesCurrentDir returns whether the given path specifies | ||
66 | // a "current directory", i.e., the last path segment is `.`. | ||
67 | func specifiesCurrentDir(path string) bool { | ||
68 | return filepath.Base(path) == "." | ||
69 | } | ||
70 | |||
71 | // SplitPathDirEntry splits the given path between its directory name and its | ||
72 | // basename by first cleaning the path but preserves a trailing "." if the | ||
73 | // original path specified the current directory. | ||
74 | func SplitPathDirEntry(path string) (dir, base string) { | ||
75 | cleanedPath := filepath.Clean(normalizePath(path)) | ||
76 | |||
77 | if specifiesCurrentDir(path) { | ||
78 | cleanedPath += string(filepath.Separator) + "." | ||
79 | } | ||
80 | |||
81 | return filepath.Dir(cleanedPath), filepath.Base(cleanedPath) | ||
82 | } | ||
83 | |||
84 | // TarResource archives the resource described by the given CopyInfo to a Tar | ||
85 | // archive. A non-nil error is returned if sourcePath does not exist or is | ||
86 | // asserted to be a directory but exists as another type of file. | ||
87 | // | ||
88 | // This function acts as a convenient wrapper around TarWithOptions, which | ||
89 | // requires a directory as the source path. TarResource accepts either a | ||
90 | // directory or a file path and correctly sets the Tar options. | ||
91 | func TarResource(sourceInfo CopyInfo) (content Archive, err error) { | ||
92 | return TarResourceRebase(sourceInfo.Path, sourceInfo.RebaseName) | ||
93 | } | ||
94 | |||
95 | // TarResourceRebase is like TarResource but renames the first path element of | ||
96 | // items in the resulting tar archive to match the given rebaseName if not "". | ||
97 | func TarResourceRebase(sourcePath, rebaseName string) (content Archive, err error) { | ||
98 | sourcePath = normalizePath(sourcePath) | ||
99 | if _, err = os.Lstat(sourcePath); err != nil { | ||
100 | // Catches the case where the source does not exist or is not a | ||
101 | // directory if asserted to be a directory, as this also causes an | ||
102 | // error. | ||
103 | return | ||
104 | } | ||
105 | |||
106 | // Separate the source path between it's directory and | ||
107 | // the entry in that directory which we are archiving. | ||
108 | sourceDir, sourceBase := SplitPathDirEntry(sourcePath) | ||
109 | |||
110 | filter := []string{sourceBase} | ||
111 | |||
112 | logrus.Debugf("copying %q from %q", sourceBase, sourceDir) | ||
113 | |||
114 | return TarWithOptions(sourceDir, &TarOptions{ | ||
115 | Compression: Uncompressed, | ||
116 | IncludeFiles: filter, | ||
117 | IncludeSourceDir: true, | ||
118 | RebaseNames: map[string]string{ | ||
119 | sourceBase: rebaseName, | ||
120 | }, | ||
121 | }) | ||
122 | } | ||
123 | |||
124 | // CopyInfo holds basic info about the source | ||
125 | // or destination path of a copy operation. | ||
126 | type CopyInfo struct { | ||
127 | Path string | ||
128 | Exists bool | ||
129 | IsDir bool | ||
130 | RebaseName string | ||
131 | } | ||
132 | |||
133 | // CopyInfoSourcePath stats the given path to create a CopyInfo | ||
134 | // struct representing that resource for the source of an archive copy | ||
135 | // operation. The given path should be an absolute local path. A source path | ||
136 | // has all symlinks evaluated that appear before the last path separator ("/" | ||
137 | // on Unix). As it is to be a copy source, the path must exist. | ||
138 | func CopyInfoSourcePath(path string, followLink bool) (CopyInfo, error) { | ||
139 | // normalize the file path and then evaluate the symbol link | ||
140 | // we will use the target file instead of the symbol link if | ||
141 | // followLink is set | ||
142 | path = normalizePath(path) | ||
143 | |||
144 | resolvedPath, rebaseName, err := ResolveHostSourcePath(path, followLink) | ||
145 | if err != nil { | ||
146 | return CopyInfo{}, err | ||
147 | } | ||
148 | |||
149 | stat, err := os.Lstat(resolvedPath) | ||
150 | if err != nil { | ||
151 | return CopyInfo{}, err | ||
152 | } | ||
153 | |||
154 | return CopyInfo{ | ||
155 | Path: resolvedPath, | ||
156 | Exists: true, | ||
157 | IsDir: stat.IsDir(), | ||
158 | RebaseName: rebaseName, | ||
159 | }, nil | ||
160 | } | ||
161 | |||
162 | // CopyInfoDestinationPath stats the given path to create a CopyInfo | ||
163 | // struct representing that resource for the destination of an archive copy | ||
164 | // operation. The given path should be an absolute local path. | ||
165 | func CopyInfoDestinationPath(path string) (info CopyInfo, err error) { | ||
166 | maxSymlinkIter := 10 // filepath.EvalSymlinks uses 255, but 10 already seems like a lot. | ||
167 | path = normalizePath(path) | ||
168 | originalPath := path | ||
169 | |||
170 | stat, err := os.Lstat(path) | ||
171 | |||
172 | if err == nil && stat.Mode()&os.ModeSymlink == 0 { | ||
173 | // The path exists and is not a symlink. | ||
174 | return CopyInfo{ | ||
175 | Path: path, | ||
176 | Exists: true, | ||
177 | IsDir: stat.IsDir(), | ||
178 | }, nil | ||
179 | } | ||
180 | |||
181 | // While the path is a symlink. | ||
182 | for n := 0; err == nil && stat.Mode()&os.ModeSymlink != 0; n++ { | ||
183 | if n > maxSymlinkIter { | ||
184 | // Don't follow symlinks more than this arbitrary number of times. | ||
185 | return CopyInfo{}, errors.New("too many symlinks in " + originalPath) | ||
186 | } | ||
187 | |||
188 | // The path is a symbolic link. We need to evaluate it so that the | ||
189 | // destination of the copy operation is the link target and not the | ||
190 | // link itself. This is notably different than CopyInfoSourcePath which | ||
191 | // only evaluates symlinks before the last appearing path separator. | ||
192 | // Also note that it is okay if the last path element is a broken | ||
193 | // symlink as the copy operation should create the target. | ||
194 | var linkTarget string | ||
195 | |||
196 | linkTarget, err = os.Readlink(path) | ||
197 | if err != nil { | ||
198 | return CopyInfo{}, err | ||
199 | } | ||
200 | |||
201 | if !system.IsAbs(linkTarget) { | ||
202 | // Join with the parent directory. | ||
203 | dstParent, _ := SplitPathDirEntry(path) | ||
204 | linkTarget = filepath.Join(dstParent, linkTarget) | ||
205 | } | ||
206 | |||
207 | path = linkTarget | ||
208 | stat, err = os.Lstat(path) | ||
209 | } | ||
210 | |||
211 | if err != nil { | ||
212 | // It's okay if the destination path doesn't exist. We can still | ||
213 | // continue the copy operation if the parent directory exists. | ||
214 | if !os.IsNotExist(err) { | ||
215 | return CopyInfo{}, err | ||
216 | } | ||
217 | |||
218 | // Ensure destination parent dir exists. | ||
219 | dstParent, _ := SplitPathDirEntry(path) | ||
220 | |||
221 | parentDirStat, err := os.Lstat(dstParent) | ||
222 | if err != nil { | ||
223 | return CopyInfo{}, err | ||
224 | } | ||
225 | if !parentDirStat.IsDir() { | ||
226 | return CopyInfo{}, ErrNotDirectory | ||
227 | } | ||
228 | |||
229 | return CopyInfo{Path: path}, nil | ||
230 | } | ||
231 | |||
232 | // The path exists after resolving symlinks. | ||
233 | return CopyInfo{ | ||
234 | Path: path, | ||
235 | Exists: true, | ||
236 | IsDir: stat.IsDir(), | ||
237 | }, nil | ||
238 | } | ||
239 | |||
240 | // PrepareArchiveCopy prepares the given srcContent archive, which should | ||
241 | // contain the archived resource described by srcInfo, to the destination | ||
242 | // described by dstInfo. Returns the possibly modified content archive along | ||
243 | // with the path to the destination directory which it should be extracted to. | ||
244 | func PrepareArchiveCopy(srcContent Reader, srcInfo, dstInfo CopyInfo) (dstDir string, content Archive, err error) { | ||
245 | // Ensure in platform semantics | ||
246 | srcInfo.Path = normalizePath(srcInfo.Path) | ||
247 | dstInfo.Path = normalizePath(dstInfo.Path) | ||
248 | |||
249 | // Separate the destination path between its directory and base | ||
250 | // components in case the source archive contents need to be rebased. | ||
251 | dstDir, dstBase := SplitPathDirEntry(dstInfo.Path) | ||
252 | _, srcBase := SplitPathDirEntry(srcInfo.Path) | ||
253 | |||
254 | switch { | ||
255 | case dstInfo.Exists && dstInfo.IsDir: | ||
256 | // The destination exists as a directory. No alteration | ||
257 | // to srcContent is needed as its contents can be | ||
258 | // simply extracted to the destination directory. | ||
259 | return dstInfo.Path, ioutil.NopCloser(srcContent), nil | ||
260 | case dstInfo.Exists && srcInfo.IsDir: | ||
261 | // The destination exists as some type of file and the source | ||
262 | // content is a directory. This is an error condition since | ||
263 | // you cannot copy a directory to an existing file location. | ||
264 | return "", nil, ErrCannotCopyDir | ||
265 | case dstInfo.Exists: | ||
266 | // The destination exists as some type of file and the source content | ||
267 | // is also a file. The source content entry will have to be renamed to | ||
268 | // have a basename which matches the destination path's basename. | ||
269 | if len(srcInfo.RebaseName) != 0 { | ||
270 | srcBase = srcInfo.RebaseName | ||
271 | } | ||
272 | return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil | ||
273 | case srcInfo.IsDir: | ||
274 | // The destination does not exist and the source content is an archive | ||
275 | // of a directory. The archive should be extracted to the parent of | ||
276 | // the destination path instead, and when it is, the directory that is | ||
277 | // created as a result should take the name of the destination path. | ||
278 | // The source content entries will have to be renamed to have a | ||
279 | // basename which matches the destination path's basename. | ||
280 | if len(srcInfo.RebaseName) != 0 { | ||
281 | srcBase = srcInfo.RebaseName | ||
282 | } | ||
283 | return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil | ||
284 | case assertsDirectory(dstInfo.Path): | ||
285 | // The destination does not exist and is asserted to be created as a | ||
286 | // directory, but the source content is not a directory. This is an | ||
287 | // error condition since you cannot create a directory from a file | ||
288 | // source. | ||
289 | return "", nil, ErrDirNotExists | ||
290 | default: | ||
291 | // The last remaining case is when the destination does not exist, is | ||
292 | // not asserted to be a directory, and the source content is not an | ||
293 | // archive of a directory. It this case, the destination file will need | ||
294 | // to be created when the archive is extracted and the source content | ||
295 | // entry will have to be renamed to have a basename which matches the | ||
296 | // destination path's basename. | ||
297 | if len(srcInfo.RebaseName) != 0 { | ||
298 | srcBase = srcInfo.RebaseName | ||
299 | } | ||
300 | return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil | ||
301 | } | ||
302 | |||
303 | } | ||
304 | |||
305 | // RebaseArchiveEntries rewrites the given srcContent archive replacing | ||
306 | // an occurrence of oldBase with newBase at the beginning of entry names. | ||
307 | func RebaseArchiveEntries(srcContent Reader, oldBase, newBase string) Archive { | ||
308 | if oldBase == string(os.PathSeparator) { | ||
309 | // If oldBase specifies the root directory, use an empty string as | ||
310 | // oldBase instead so that newBase doesn't replace the path separator | ||
311 | // that all paths will start with. | ||
312 | oldBase = "" | ||
313 | } | ||
314 | |||
315 | rebased, w := io.Pipe() | ||
316 | |||
317 | go func() { | ||
318 | srcTar := tar.NewReader(srcContent) | ||
319 | rebasedTar := tar.NewWriter(w) | ||
320 | |||
321 | for { | ||
322 | hdr, err := srcTar.Next() | ||
323 | if err == io.EOF { | ||
324 | // Signals end of archive. | ||
325 | rebasedTar.Close() | ||
326 | w.Close() | ||
327 | return | ||
328 | } | ||
329 | if err != nil { | ||
330 | w.CloseWithError(err) | ||
331 | return | ||
332 | } | ||
333 | |||
334 | hdr.Name = strings.Replace(hdr.Name, oldBase, newBase, 1) | ||
335 | |||
336 | if err = rebasedTar.WriteHeader(hdr); err != nil { | ||
337 | w.CloseWithError(err) | ||
338 | return | ||
339 | } | ||
340 | |||
341 | if _, err = io.Copy(rebasedTar, srcTar); err != nil { | ||
342 | w.CloseWithError(err) | ||
343 | return | ||
344 | } | ||
345 | } | ||
346 | }() | ||
347 | |||
348 | return rebased | ||
349 | } | ||
350 | |||
351 | // CopyResource performs an archive copy from the given source path to the | ||
352 | // given destination path. The source path MUST exist and the destination | ||
353 | // path's parent directory must exist. | ||
354 | func CopyResource(srcPath, dstPath string, followLink bool) error { | ||
355 | var ( | ||
356 | srcInfo CopyInfo | ||
357 | err error | ||
358 | ) | ||
359 | |||
360 | // Ensure in platform semantics | ||
361 | srcPath = normalizePath(srcPath) | ||
362 | dstPath = normalizePath(dstPath) | ||
363 | |||
364 | // Clean the source and destination paths. | ||
365 | srcPath = PreserveTrailingDotOrSeparator(filepath.Clean(srcPath), srcPath) | ||
366 | dstPath = PreserveTrailingDotOrSeparator(filepath.Clean(dstPath), dstPath) | ||
367 | |||
368 | if srcInfo, err = CopyInfoSourcePath(srcPath, followLink); err != nil { | ||
369 | return err | ||
370 | } | ||
371 | |||
372 | content, err := TarResource(srcInfo) | ||
373 | if err != nil { | ||
374 | return err | ||
375 | } | ||
376 | defer content.Close() | ||
377 | |||
378 | return CopyTo(content, srcInfo, dstPath) | ||
379 | } | ||
380 | |||
381 | // CopyTo handles extracting the given content whose | ||
382 | // entries should be sourced from srcInfo to dstPath. | ||
383 | func CopyTo(content Reader, srcInfo CopyInfo, dstPath string) error { | ||
384 | // The destination path need not exist, but CopyInfoDestinationPath will | ||
385 | // ensure that at least the parent directory exists. | ||
386 | dstInfo, err := CopyInfoDestinationPath(normalizePath(dstPath)) | ||
387 | if err != nil { | ||
388 | return err | ||
389 | } | ||
390 | |||
391 | dstDir, copyArchive, err := PrepareArchiveCopy(content, srcInfo, dstInfo) | ||
392 | if err != nil { | ||
393 | return err | ||
394 | } | ||
395 | defer copyArchive.Close() | ||
396 | |||
397 | options := &TarOptions{ | ||
398 | NoLchown: true, | ||
399 | NoOverwriteDirNonDir: true, | ||
400 | } | ||
401 | |||
402 | return Untar(copyArchive, dstDir, options) | ||
403 | } | ||
404 | |||
405 | // ResolveHostSourcePath decides real path need to be copied with parameters such as | ||
406 | // whether to follow symbol link or not, if followLink is true, resolvedPath will return | ||
407 | // link target of any symbol link file, else it will only resolve symlink of directory | ||
408 | // but return symbol link file itself without resolving. | ||
409 | func ResolveHostSourcePath(path string, followLink bool) (resolvedPath, rebaseName string, err error) { | ||
410 | if followLink { | ||
411 | resolvedPath, err = filepath.EvalSymlinks(path) | ||
412 | if err != nil { | ||
413 | return | ||
414 | } | ||
415 | |||
416 | resolvedPath, rebaseName = GetRebaseName(path, resolvedPath) | ||
417 | } else { | ||
418 | dirPath, basePath := filepath.Split(path) | ||
419 | |||
420 | // if not follow symbol link, then resolve symbol link of parent dir | ||
421 | var resolvedDirPath string | ||
422 | resolvedDirPath, err = filepath.EvalSymlinks(dirPath) | ||
423 | if err != nil { | ||
424 | return | ||
425 | } | ||
426 | // resolvedDirPath will have been cleaned (no trailing path separators) so | ||
427 | // we can manually join it with the base path element. | ||
428 | resolvedPath = resolvedDirPath + string(filepath.Separator) + basePath | ||
429 | if hasTrailingPathSeparator(path) && filepath.Base(path) != filepath.Base(resolvedPath) { | ||
430 | rebaseName = filepath.Base(path) | ||
431 | } | ||
432 | } | ||
433 | return resolvedPath, rebaseName, nil | ||
434 | } | ||
435 | |||
436 | // GetRebaseName normalizes and compares path and resolvedPath, | ||
437 | // return completed resolved path and rebased file name | ||
438 | func GetRebaseName(path, resolvedPath string) (string, string) { | ||
439 | // linkTarget will have been cleaned (no trailing path separators and dot) so | ||
440 | // we can manually join it with them | ||
441 | var rebaseName string | ||
442 | if specifiesCurrentDir(path) && !specifiesCurrentDir(resolvedPath) { | ||
443 | resolvedPath += string(filepath.Separator) + "." | ||
444 | } | ||
445 | |||
446 | if hasTrailingPathSeparator(path) && !hasTrailingPathSeparator(resolvedPath) { | ||
447 | resolvedPath += string(filepath.Separator) | ||
448 | } | ||
449 | |||
450 | if filepath.Base(path) != filepath.Base(resolvedPath) { | ||
451 | // In the case where the path had a trailing separator and a symlink | ||
452 | // evaluation has changed the last path component, we will need to | ||
453 | // rebase the name in the archive that is being copied to match the | ||
454 | // originally requested name. | ||
455 | rebaseName = filepath.Base(path) | ||
456 | } | ||
457 | return resolvedPath, rebaseName | ||
458 | } | ||
diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/copy_unix.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/copy_unix.go new file mode 100644 index 0000000..e305b5e --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/copy_unix.go | |||
@@ -0,0 +1,11 @@ | |||
1 | // +build !windows | ||
2 | |||
3 | package archive | ||
4 | |||
5 | import ( | ||
6 | "path/filepath" | ||
7 | ) | ||
8 | |||
9 | func normalizePath(path string) string { | ||
10 | return filepath.ToSlash(path) | ||
11 | } | ||
diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/copy_windows.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/copy_windows.go new file mode 100644 index 0000000..2b775b4 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/copy_windows.go | |||
@@ -0,0 +1,9 @@ | |||
1 | package archive | ||
2 | |||
3 | import ( | ||
4 | "path/filepath" | ||
5 | ) | ||
6 | |||
7 | func normalizePath(path string) string { | ||
8 | return filepath.FromSlash(path) | ||
9 | } | ||
diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/diff.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/diff.go new file mode 100644 index 0000000..887dd54 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/diff.go | |||
@@ -0,0 +1,279 @@ | |||
1 | package archive | ||
2 | |||
3 | import ( | ||
4 | "archive/tar" | ||
5 | "fmt" | ||
6 | "io" | ||
7 | "io/ioutil" | ||
8 | "os" | ||
9 | "path/filepath" | ||
10 | "runtime" | ||
11 | "strings" | ||
12 | |||
13 | "github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus" | ||
14 | "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/idtools" | ||
15 | "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/pools" | ||
16 | "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system" | ||
17 | ) | ||
18 | |||
19 | // UnpackLayer unpack `layer` to a `dest`. The stream `layer` can be | ||
20 | // compressed or uncompressed. | ||
21 | // Returns the size in bytes of the contents of the layer. | ||
22 | func UnpackLayer(dest string, layer Reader, options *TarOptions) (size int64, err error) { | ||
23 | tr := tar.NewReader(layer) | ||
24 | trBuf := pools.BufioReader32KPool.Get(tr) | ||
25 | defer pools.BufioReader32KPool.Put(trBuf) | ||
26 | |||
27 | var dirs []*tar.Header | ||
28 | unpackedPaths := make(map[string]struct{}) | ||
29 | |||
30 | if options == nil { | ||
31 | options = &TarOptions{} | ||
32 | } | ||
33 | if options.ExcludePatterns == nil { | ||
34 | options.ExcludePatterns = []string{} | ||
35 | } | ||
36 | remappedRootUID, remappedRootGID, err := idtools.GetRootUIDGID(options.UIDMaps, options.GIDMaps) | ||
37 | if err != nil { | ||
38 | return 0, err | ||
39 | } | ||
40 | |||
41 | aufsTempdir := "" | ||
42 | aufsHardlinks := make(map[string]*tar.Header) | ||
43 | |||
44 | if options == nil { | ||
45 | options = &TarOptions{} | ||
46 | } | ||
47 | // Iterate through the files in the archive. | ||
48 | for { | ||
49 | hdr, err := tr.Next() | ||
50 | if err == io.EOF { | ||
51 | // end of tar archive | ||
52 | break | ||
53 | } | ||
54 | if err != nil { | ||
55 | return 0, err | ||
56 | } | ||
57 | |||
58 | size += hdr.Size | ||
59 | |||
60 | // Normalize name, for safety and for a simple is-root check | ||
61 | hdr.Name = filepath.Clean(hdr.Name) | ||
62 | |||
63 | // Windows does not support filenames with colons in them. Ignore | ||
64 | // these files. This is not a problem though (although it might | ||
65 | // appear that it is). Let's suppose a client is running docker pull. | ||
66 | // The daemon it points to is Windows. Would it make sense for the | ||
67 | // client to be doing a docker pull Ubuntu for example (which has files | ||
68 | // with colons in the name under /usr/share/man/man3)? No, absolutely | ||
69 | // not as it would really only make sense that they were pulling a | ||
70 | // Windows image. However, for development, it is necessary to be able | ||
71 | // to pull Linux images which are in the repository. | ||
72 | // | ||
73 | // TODO Windows. Once the registry is aware of what images are Windows- | ||
74 | // specific or Linux-specific, this warning should be changed to an error | ||
75 | // to cater for the situation where someone does manage to upload a Linux | ||
76 | // image but have it tagged as Windows inadvertently. | ||
77 | if runtime.GOOS == "windows" { | ||
78 | if strings.Contains(hdr.Name, ":") { | ||
79 | logrus.Warnf("Windows: Ignoring %s (is this a Linux image?)", hdr.Name) | ||
80 | continue | ||
81 | } | ||
82 | } | ||
83 | |||
84 | // Note as these operations are platform specific, so must the slash be. | ||
85 | if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) { | ||
86 | // Not the root directory, ensure that the parent directory exists. | ||
87 | // This happened in some tests where an image had a tarfile without any | ||
88 | // parent directories. | ||
89 | parent := filepath.Dir(hdr.Name) | ||
90 | parentPath := filepath.Join(dest, parent) | ||
91 | |||
92 | if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { | ||
93 | err = system.MkdirAll(parentPath, 0600) | ||
94 | if err != nil { | ||
95 | return 0, err | ||
96 | } | ||
97 | } | ||
98 | } | ||
99 | |||
100 | // Skip AUFS metadata dirs | ||
101 | if strings.HasPrefix(hdr.Name, WhiteoutMetaPrefix) { | ||
102 | // Regular files inside /.wh..wh.plnk can be used as hardlink targets | ||
103 | // We don't want this directory, but we need the files in them so that | ||
104 | // such hardlinks can be resolved. | ||
105 | if strings.HasPrefix(hdr.Name, WhiteoutLinkDir) && hdr.Typeflag == tar.TypeReg { | ||
106 | basename := filepath.Base(hdr.Name) | ||
107 | aufsHardlinks[basename] = hdr | ||
108 | if aufsTempdir == "" { | ||
109 | if aufsTempdir, err = ioutil.TempDir("", "dockerplnk"); err != nil { | ||
110 | return 0, err | ||
111 | } | ||
112 | defer os.RemoveAll(aufsTempdir) | ||
113 | } | ||
114 | if err := createTarFile(filepath.Join(aufsTempdir, basename), dest, hdr, tr, true, nil); err != nil { | ||
115 | return 0, err | ||
116 | } | ||
117 | } | ||
118 | |||
119 | if hdr.Name != WhiteoutOpaqueDir { | ||
120 | continue | ||
121 | } | ||
122 | } | ||
123 | path := filepath.Join(dest, hdr.Name) | ||
124 | rel, err := filepath.Rel(dest, path) | ||
125 | if err != nil { | ||
126 | return 0, err | ||
127 | } | ||
128 | |||
129 | // Note as these operations are platform specific, so must the slash be. | ||
130 | if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) { | ||
131 | return 0, breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest)) | ||
132 | } | ||
133 | base := filepath.Base(path) | ||
134 | |||
135 | if strings.HasPrefix(base, WhiteoutPrefix) { | ||
136 | dir := filepath.Dir(path) | ||
137 | if base == WhiteoutOpaqueDir { | ||
138 | _, err := os.Lstat(dir) | ||
139 | if err != nil { | ||
140 | return 0, err | ||
141 | } | ||
142 | err = filepath.Walk(dir, func(path string, info os.FileInfo, err error) error { | ||
143 | if err != nil { | ||
144 | if os.IsNotExist(err) { | ||
145 | err = nil // parent was deleted | ||
146 | } | ||
147 | return err | ||
148 | } | ||
149 | if path == dir { | ||
150 | return nil | ||
151 | } | ||
152 | if _, exists := unpackedPaths[path]; !exists { | ||
153 | err := os.RemoveAll(path) | ||
154 | return err | ||
155 | } | ||
156 | return nil | ||
157 | }) | ||
158 | if err != nil { | ||
159 | return 0, err | ||
160 | } | ||
161 | } else { | ||
162 | originalBase := base[len(WhiteoutPrefix):] | ||
163 | originalPath := filepath.Join(dir, originalBase) | ||
164 | if err := os.RemoveAll(originalPath); err != nil { | ||
165 | return 0, err | ||
166 | } | ||
167 | } | ||
168 | } else { | ||
169 | // If path exits we almost always just want to remove and replace it. | ||
170 | // The only exception is when it is a directory *and* the file from | ||
171 | // the layer is also a directory. Then we want to merge them (i.e. | ||
172 | // just apply the metadata from the layer). | ||
173 | if fi, err := os.Lstat(path); err == nil { | ||
174 | if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) { | ||
175 | if err := os.RemoveAll(path); err != nil { | ||
176 | return 0, err | ||
177 | } | ||
178 | } | ||
179 | } | ||
180 | |||
181 | trBuf.Reset(tr) | ||
182 | srcData := io.Reader(trBuf) | ||
183 | srcHdr := hdr | ||
184 | |||
185 | // Hard links into /.wh..wh.plnk don't work, as we don't extract that directory, so | ||
186 | // we manually retarget these into the temporary files we extracted them into | ||
187 | if hdr.Typeflag == tar.TypeLink && strings.HasPrefix(filepath.Clean(hdr.Linkname), WhiteoutLinkDir) { | ||
188 | linkBasename := filepath.Base(hdr.Linkname) | ||
189 | srcHdr = aufsHardlinks[linkBasename] | ||
190 | if srcHdr == nil { | ||
191 | return 0, fmt.Errorf("Invalid aufs hardlink") | ||
192 | } | ||
193 | tmpFile, err := os.Open(filepath.Join(aufsTempdir, linkBasename)) | ||
194 | if err != nil { | ||
195 | return 0, err | ||
196 | } | ||
197 | defer tmpFile.Close() | ||
198 | srcData = tmpFile | ||
199 | } | ||
200 | |||
201 | // if the options contain a uid & gid maps, convert header uid/gid | ||
202 | // entries using the maps such that lchown sets the proper mapped | ||
203 | // uid/gid after writing the file. We only perform this mapping if | ||
204 | // the file isn't already owned by the remapped root UID or GID, as | ||
205 | // that specific uid/gid has no mapping from container -> host, and | ||
206 | // those files already have the proper ownership for inside the | ||
207 | // container. | ||
208 | if srcHdr.Uid != remappedRootUID { | ||
209 | xUID, err := idtools.ToHost(srcHdr.Uid, options.UIDMaps) | ||
210 | if err != nil { | ||
211 | return 0, err | ||
212 | } | ||
213 | srcHdr.Uid = xUID | ||
214 | } | ||
215 | if srcHdr.Gid != remappedRootGID { | ||
216 | xGID, err := idtools.ToHost(srcHdr.Gid, options.GIDMaps) | ||
217 | if err != nil { | ||
218 | return 0, err | ||
219 | } | ||
220 | srcHdr.Gid = xGID | ||
221 | } | ||
222 | if err := createTarFile(path, dest, srcHdr, srcData, true, nil); err != nil { | ||
223 | return 0, err | ||
224 | } | ||
225 | |||
226 | // Directory mtimes must be handled at the end to avoid further | ||
227 | // file creation in them to modify the directory mtime | ||
228 | if hdr.Typeflag == tar.TypeDir { | ||
229 | dirs = append(dirs, hdr) | ||
230 | } | ||
231 | unpackedPaths[path] = struct{}{} | ||
232 | } | ||
233 | } | ||
234 | |||
235 | for _, hdr := range dirs { | ||
236 | path := filepath.Join(dest, hdr.Name) | ||
237 | if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil { | ||
238 | return 0, err | ||
239 | } | ||
240 | } | ||
241 | |||
242 | return size, nil | ||
243 | } | ||
244 | |||
245 | // ApplyLayer parses a diff in the standard layer format from `layer`, | ||
246 | // and applies it to the directory `dest`. The stream `layer` can be | ||
247 | // compressed or uncompressed. | ||
248 | // Returns the size in bytes of the contents of the layer. | ||
249 | func ApplyLayer(dest string, layer Reader) (int64, error) { | ||
250 | return applyLayerHandler(dest, layer, &TarOptions{}, true) | ||
251 | } | ||
252 | |||
253 | // ApplyUncompressedLayer parses a diff in the standard layer format from | ||
254 | // `layer`, and applies it to the directory `dest`. The stream `layer` | ||
255 | // can only be uncompressed. | ||
256 | // Returns the size in bytes of the contents of the layer. | ||
257 | func ApplyUncompressedLayer(dest string, layer Reader, options *TarOptions) (int64, error) { | ||
258 | return applyLayerHandler(dest, layer, options, false) | ||
259 | } | ||
260 | |||
261 | // do the bulk load of ApplyLayer, but allow for not calling DecompressStream | ||
262 | func applyLayerHandler(dest string, layer Reader, options *TarOptions, decompress bool) (int64, error) { | ||
263 | dest = filepath.Clean(dest) | ||
264 | |||
265 | // We need to be able to set any perms | ||
266 | oldmask, err := system.Umask(0) | ||
267 | if err != nil { | ||
268 | return 0, err | ||
269 | } | ||
270 | defer system.Umask(oldmask) // ignore err, ErrNotSupportedPlatform | ||
271 | |||
272 | if decompress { | ||
273 | layer, err = DecompressStream(layer) | ||
274 | if err != nil { | ||
275 | return 0, err | ||
276 | } | ||
277 | } | ||
278 | return UnpackLayer(dest, layer, options) | ||
279 | } | ||
diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/time_linux.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/time_linux.go new file mode 100644 index 0000000..3448569 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/time_linux.go | |||
@@ -0,0 +1,16 @@ | |||
1 | package archive | ||
2 | |||
3 | import ( | ||
4 | "syscall" | ||
5 | "time" | ||
6 | ) | ||
7 | |||
8 | func timeToTimespec(time time.Time) (ts syscall.Timespec) { | ||
9 | if time.IsZero() { | ||
10 | // Return UTIME_OMIT special value | ||
11 | ts.Sec = 0 | ||
12 | ts.Nsec = ((1 << 30) - 2) | ||
13 | return | ||
14 | } | ||
15 | return syscall.NsecToTimespec(time.UnixNano()) | ||
16 | } | ||
diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/time_unsupported.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/time_unsupported.go new file mode 100644 index 0000000..e85aac0 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/time_unsupported.go | |||
@@ -0,0 +1,16 @@ | |||
1 | // +build !linux | ||
2 | |||
3 | package archive | ||
4 | |||
5 | import ( | ||
6 | "syscall" | ||
7 | "time" | ||
8 | ) | ||
9 | |||
10 | func timeToTimespec(time time.Time) (ts syscall.Timespec) { | ||
11 | nsec := int64(0) | ||
12 | if !time.IsZero() { | ||
13 | nsec = time.UnixNano() | ||
14 | } | ||
15 | return syscall.NsecToTimespec(nsec) | ||
16 | } | ||
diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/whiteouts.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/whiteouts.go new file mode 100644 index 0000000..d20478a --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/whiteouts.go | |||
@@ -0,0 +1,23 @@ | |||
1 | package archive | ||
2 | |||
3 | // Whiteouts are files with a special meaning for the layered filesystem. | ||
4 | // Docker uses AUFS whiteout files inside exported archives. In other | ||
5 | // filesystems these files are generated/handled on tar creation/extraction. | ||
6 | |||
7 | // WhiteoutPrefix prefix means file is a whiteout. If this is followed by a | ||
8 | // filename this means that file has been removed from the base layer. | ||
9 | const WhiteoutPrefix = ".wh." | ||
10 | |||
11 | // WhiteoutMetaPrefix prefix means whiteout has a special meaning and is not | ||
12 | // for removing an actual file. Normally these files are excluded from exported | ||
13 | // archives. | ||
14 | const WhiteoutMetaPrefix = WhiteoutPrefix + WhiteoutPrefix | ||
15 | |||
16 | // WhiteoutLinkDir is a directory AUFS uses for storing hardlink links to other | ||
17 | // layers. Normally these should not go into exported archives and all changed | ||
18 | // hardlinks should be copied to the top layer. | ||
19 | const WhiteoutLinkDir = WhiteoutMetaPrefix + "plnk" | ||
20 | |||
21 | // WhiteoutOpaqueDir file means directory has been made opaque - meaning | ||
22 | // readdir calls to this directory do not follow to lower layers. | ||
23 | const WhiteoutOpaqueDir = WhiteoutMetaPrefix + ".opq" | ||
diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/wrap.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/wrap.go new file mode 100644 index 0000000..dfb335c --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/wrap.go | |||
@@ -0,0 +1,59 @@ | |||
1 | package archive | ||
2 | |||
3 | import ( | ||
4 | "archive/tar" | ||
5 | "bytes" | ||
6 | "io/ioutil" | ||
7 | ) | ||
8 | |||
9 | // Generate generates a new archive from the content provided | ||
10 | // as input. | ||
11 | // | ||
12 | // `files` is a sequence of path/content pairs. A new file is | ||
13 | // added to the archive for each pair. | ||
14 | // If the last pair is incomplete, the file is created with an | ||
15 | // empty content. For example: | ||
16 | // | ||
17 | // Generate("foo.txt", "hello world", "emptyfile") | ||
18 | // | ||
19 | // The above call will return an archive with 2 files: | ||
20 | // * ./foo.txt with content "hello world" | ||
21 | // * ./empty with empty content | ||
22 | // | ||
23 | // FIXME: stream content instead of buffering | ||
24 | // FIXME: specify permissions and other archive metadata | ||
25 | func Generate(input ...string) (Archive, error) { | ||
26 | files := parseStringPairs(input...) | ||
27 | buf := new(bytes.Buffer) | ||
28 | tw := tar.NewWriter(buf) | ||
29 | for _, file := range files { | ||
30 | name, content := file[0], file[1] | ||
31 | hdr := &tar.Header{ | ||
32 | Name: name, | ||
33 | Size: int64(len(content)), | ||
34 | } | ||
35 | if err := tw.WriteHeader(hdr); err != nil { | ||
36 | return nil, err | ||
37 | } | ||
38 | if _, err := tw.Write([]byte(content)); err != nil { | ||
39 | return nil, err | ||
40 | } | ||
41 | } | ||
42 | if err := tw.Close(); err != nil { | ||
43 | return nil, err | ||
44 | } | ||
45 | return ioutil.NopCloser(buf), nil | ||
46 | } | ||
47 | |||
48 | func parseStringPairs(input ...string) (output [][2]string) { | ||
49 | output = make([][2]string, 0, len(input)/2+1) | ||
50 | for i := 0; i < len(input); i += 2 { | ||
51 | var pair [2]string | ||
52 | pair[0] = input[i] | ||
53 | if i+1 < len(input) { | ||
54 | pair[1] = input[i+1] | ||
55 | } | ||
56 | output = append(output, pair) | ||
57 | } | ||
58 | return | ||
59 | } | ||
diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/fileutils/fileutils.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/fileutils/fileutils.go new file mode 100644 index 0000000..a15cf4b --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/fileutils/fileutils.go | |||
@@ -0,0 +1,279 @@ | |||
1 | package fileutils | ||
2 | |||
3 | import ( | ||
4 | "errors" | ||
5 | "fmt" | ||
6 | "io" | ||
7 | "os" | ||
8 | "path/filepath" | ||
9 | "regexp" | ||
10 | "strings" | ||
11 | "text/scanner" | ||
12 | |||
13 | "github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus" | ||
14 | ) | ||
15 | |||
16 | // exclusion return true if the specified pattern is an exclusion | ||
17 | func exclusion(pattern string) bool { | ||
18 | return pattern[0] == '!' | ||
19 | } | ||
20 | |||
21 | // empty return true if the specified pattern is empty | ||
22 | func empty(pattern string) bool { | ||
23 | return pattern == "" | ||
24 | } | ||
25 | |||
26 | // CleanPatterns takes a slice of patterns returns a new | ||
27 | // slice of patterns cleaned with filepath.Clean, stripped | ||
28 | // of any empty patterns and lets the caller know whether the | ||
29 | // slice contains any exception patterns (prefixed with !). | ||
30 | func CleanPatterns(patterns []string) ([]string, [][]string, bool, error) { | ||
31 | // Loop over exclusion patterns and: | ||
32 | // 1. Clean them up. | ||
33 | // 2. Indicate whether we are dealing with any exception rules. | ||
34 | // 3. Error if we see a single exclusion marker on it's own (!). | ||
35 | cleanedPatterns := []string{} | ||
36 | patternDirs := [][]string{} | ||
37 | exceptions := false | ||
38 | for _, pattern := range patterns { | ||
39 | // Eliminate leading and trailing whitespace. | ||
40 | pattern = strings.TrimSpace(pattern) | ||
41 | if empty(pattern) { | ||
42 | continue | ||
43 | } | ||
44 | if exclusion(pattern) { | ||
45 | if len(pattern) == 1 { | ||
46 | return nil, nil, false, errors.New("Illegal exclusion pattern: !") | ||
47 | } | ||
48 | exceptions = true | ||
49 | } | ||
50 | pattern = filepath.Clean(pattern) | ||
51 | cleanedPatterns = append(cleanedPatterns, pattern) | ||
52 | if exclusion(pattern) { | ||
53 | pattern = pattern[1:] | ||
54 | } | ||
55 | patternDirs = append(patternDirs, strings.Split(pattern, "/")) | ||
56 | } | ||
57 | |||
58 | return cleanedPatterns, patternDirs, exceptions, nil | ||
59 | } | ||
60 | |||
61 | // Matches returns true if file matches any of the patterns | ||
62 | // and isn't excluded by any of the subsequent patterns. | ||
63 | func Matches(file string, patterns []string) (bool, error) { | ||
64 | file = filepath.Clean(file) | ||
65 | |||
66 | if file == "." { | ||
67 | // Don't let them exclude everything, kind of silly. | ||
68 | return false, nil | ||
69 | } | ||
70 | |||
71 | patterns, patDirs, _, err := CleanPatterns(patterns) | ||
72 | if err != nil { | ||
73 | return false, err | ||
74 | } | ||
75 | |||
76 | return OptimizedMatches(file, patterns, patDirs) | ||
77 | } | ||
78 | |||
79 | // OptimizedMatches is basically the same as fileutils.Matches() but optimized for archive.go. | ||
80 | // It will assume that the inputs have been preprocessed and therefore the function | ||
81 | // doesn't need to do as much error checking and clean-up. This was done to avoid | ||
82 | // repeating these steps on each file being checked during the archive process. | ||
83 | // The more generic fileutils.Matches() can't make these assumptions. | ||
84 | func OptimizedMatches(file string, patterns []string, patDirs [][]string) (bool, error) { | ||
85 | matched := false | ||
86 | parentPath := filepath.Dir(file) | ||
87 | parentPathDirs := strings.Split(parentPath, "/") | ||
88 | |||
89 | for i, pattern := range patterns { | ||
90 | negative := false | ||
91 | |||
92 | if exclusion(pattern) { | ||
93 | negative = true | ||
94 | pattern = pattern[1:] | ||
95 | } | ||
96 | |||
97 | match, err := regexpMatch(pattern, file) | ||
98 | if err != nil { | ||
99 | return false, fmt.Errorf("Error in pattern (%s): %s", pattern, err) | ||
100 | } | ||
101 | |||
102 | if !match && parentPath != "." { | ||
103 | // Check to see if the pattern matches one of our parent dirs. | ||
104 | if len(patDirs[i]) <= len(parentPathDirs) { | ||
105 | match, _ = regexpMatch(strings.Join(patDirs[i], "/"), | ||
106 | strings.Join(parentPathDirs[:len(patDirs[i])], "/")) | ||
107 | } | ||
108 | } | ||
109 | |||
110 | if match { | ||
111 | matched = !negative | ||
112 | } | ||
113 | } | ||
114 | |||
115 | if matched { | ||
116 | logrus.Debugf("Skipping excluded path: %s", file) | ||
117 | } | ||
118 | |||
119 | return matched, nil | ||
120 | } | ||
121 | |||
122 | // regexpMatch tries to match the logic of filepath.Match but | ||
123 | // does so using regexp logic. We do this so that we can expand the | ||
124 | // wildcard set to include other things, like "**" to mean any number | ||
125 | // of directories. This means that we should be backwards compatible | ||
126 | // with filepath.Match(). We'll end up supporting more stuff, due to | ||
127 | // the fact that we're using regexp, but that's ok - it does no harm. | ||
128 | func regexpMatch(pattern, path string) (bool, error) { | ||
129 | regStr := "^" | ||
130 | |||
131 | // Do some syntax checking on the pattern. | ||
132 | // filepath's Match() has some really weird rules that are inconsistent | ||
133 | // so instead of trying to dup their logic, just call Match() for its | ||
134 | // error state and if there is an error in the pattern return it. | ||
135 | // If this becomes an issue we can remove this since its really only | ||
136 | // needed in the error (syntax) case - which isn't really critical. | ||
137 | if _, err := filepath.Match(pattern, path); err != nil { | ||
138 | return false, err | ||
139 | } | ||
140 | |||
141 | // Go through the pattern and convert it to a regexp. | ||
142 | // We use a scanner so we can support utf-8 chars. | ||
143 | var scan scanner.Scanner | ||
144 | scan.Init(strings.NewReader(pattern)) | ||
145 | |||
146 | sl := string(os.PathSeparator) | ||
147 | escSL := sl | ||
148 | if sl == `\` { | ||
149 | escSL += `\` | ||
150 | } | ||
151 | |||
152 | for scan.Peek() != scanner.EOF { | ||
153 | ch := scan.Next() | ||
154 | |||
155 | if ch == '*' { | ||
156 | if scan.Peek() == '*' { | ||
157 | // is some flavor of "**" | ||
158 | scan.Next() | ||
159 | |||
160 | if scan.Peek() == scanner.EOF { | ||
161 | // is "**EOF" - to align with .gitignore just accept all | ||
162 | regStr += ".*" | ||
163 | } else { | ||
164 | // is "**" | ||
165 | regStr += "((.*" + escSL + ")|([^" + escSL + "]*))" | ||
166 | } | ||
167 | |||
168 | // Treat **/ as ** so eat the "/" | ||
169 | if string(scan.Peek()) == sl { | ||
170 | scan.Next() | ||
171 | } | ||
172 | } else { | ||
173 | // is "*" so map it to anything but "/" | ||
174 | regStr += "[^" + escSL + "]*" | ||
175 | } | ||
176 | } else if ch == '?' { | ||
177 | // "?" is any char except "/" | ||
178 | regStr += "[^" + escSL + "]" | ||
179 | } else if strings.Index(".$", string(ch)) != -1 { | ||
180 | // Escape some regexp special chars that have no meaning | ||
181 | // in golang's filepath.Match | ||
182 | regStr += `\` + string(ch) | ||
183 | } else if ch == '\\' { | ||
184 | // escape next char. Note that a trailing \ in the pattern | ||
185 | // will be left alone (but need to escape it) | ||
186 | if sl == `\` { | ||
187 | // On windows map "\" to "\\", meaning an escaped backslash, | ||
188 | // and then just continue because filepath.Match on | ||
189 | // Windows doesn't allow escaping at all | ||
190 | regStr += escSL | ||
191 | continue | ||
192 | } | ||
193 | if scan.Peek() != scanner.EOF { | ||
194 | regStr += `\` + string(scan.Next()) | ||
195 | } else { | ||
196 | regStr += `\` | ||
197 | } | ||
198 | } else { | ||
199 | regStr += string(ch) | ||
200 | } | ||
201 | } | ||
202 | |||
203 | regStr += "$" | ||
204 | |||
205 | res, err := regexp.MatchString(regStr, path) | ||
206 | |||
207 | // Map regexp's error to filepath's so no one knows we're not using filepath | ||
208 | if err != nil { | ||
209 | err = filepath.ErrBadPattern | ||
210 | } | ||
211 | |||
212 | return res, err | ||
213 | } | ||
214 | |||
215 | // CopyFile copies from src to dst until either EOF is reached | ||
216 | // on src or an error occurs. It verifies src exists and remove | ||
217 | // the dst if it exists. | ||
218 | func CopyFile(src, dst string) (int64, error) { | ||
219 | cleanSrc := filepath.Clean(src) | ||
220 | cleanDst := filepath.Clean(dst) | ||
221 | if cleanSrc == cleanDst { | ||
222 | return 0, nil | ||
223 | } | ||
224 | sf, err := os.Open(cleanSrc) | ||
225 | if err != nil { | ||
226 | return 0, err | ||
227 | } | ||
228 | defer sf.Close() | ||
229 | if err := os.Remove(cleanDst); err != nil && !os.IsNotExist(err) { | ||
230 | return 0, err | ||
231 | } | ||
232 | df, err := os.Create(cleanDst) | ||
233 | if err != nil { | ||
234 | return 0, err | ||
235 | } | ||
236 | defer df.Close() | ||
237 | return io.Copy(df, sf) | ||
238 | } | ||
239 | |||
240 | // ReadSymlinkedDirectory returns the target directory of a symlink. | ||
241 | // The target of the symbolic link may not be a file. | ||
242 | func ReadSymlinkedDirectory(path string) (string, error) { | ||
243 | var realPath string | ||
244 | var err error | ||
245 | if realPath, err = filepath.Abs(path); err != nil { | ||
246 | return "", fmt.Errorf("unable to get absolute path for %s: %s", path, err) | ||
247 | } | ||
248 | if realPath, err = filepath.EvalSymlinks(realPath); err != nil { | ||
249 | return "", fmt.Errorf("failed to canonicalise path for %s: %s", path, err) | ||
250 | } | ||
251 | realPathInfo, err := os.Stat(realPath) | ||
252 | if err != nil { | ||
253 | return "", fmt.Errorf("failed to stat target '%s' of '%s': %s", realPath, path, err) | ||
254 | } | ||
255 | if !realPathInfo.Mode().IsDir() { | ||
256 | return "", fmt.Errorf("canonical path points to a file '%s'", realPath) | ||
257 | } | ||
258 | return realPath, nil | ||
259 | } | ||
260 | |||
261 | // CreateIfNotExists creates a file or a directory only if it does not already exist. | ||
262 | func CreateIfNotExists(path string, isDir bool) error { | ||
263 | if _, err := os.Stat(path); err != nil { | ||
264 | if os.IsNotExist(err) { | ||
265 | if isDir { | ||
266 | return os.MkdirAll(path, 0755) | ||
267 | } | ||
268 | if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil { | ||
269 | return err | ||
270 | } | ||
271 | f, err := os.OpenFile(path, os.O_CREATE, 0755) | ||
272 | if err != nil { | ||
273 | return err | ||
274 | } | ||
275 | f.Close() | ||
276 | } | ||
277 | } | ||
278 | return nil | ||
279 | } | ||
diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/fileutils/fileutils_unix.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/fileutils/fileutils_unix.go new file mode 100644 index 0000000..7e00802 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/fileutils/fileutils_unix.go | |||
@@ -0,0 +1,22 @@ | |||
1 | // +build linux freebsd | ||
2 | |||
3 | package fileutils | ||
4 | |||
5 | import ( | ||
6 | "fmt" | ||
7 | "io/ioutil" | ||
8 | "os" | ||
9 | |||
10 | "github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus" | ||
11 | ) | ||
12 | |||
13 | // GetTotalUsedFds Returns the number of used File Descriptors by | ||
14 | // reading it via /proc filesystem. | ||
15 | func GetTotalUsedFds() int { | ||
16 | if fds, err := ioutil.ReadDir(fmt.Sprintf("/proc/%d/fd", os.Getpid())); err != nil { | ||
17 | logrus.Errorf("Error opening /proc/%d/fd: %s", os.Getpid(), err) | ||
18 | } else { | ||
19 | return len(fds) | ||
20 | } | ||
21 | return -1 | ||
22 | } | ||
diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/fileutils/fileutils_windows.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/fileutils/fileutils_windows.go new file mode 100644 index 0000000..5ec21ca --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/fileutils/fileutils_windows.go | |||
@@ -0,0 +1,7 @@ | |||
1 | package fileutils | ||
2 | |||
3 | // GetTotalUsedFds Returns the number of used File Descriptors. Not supported | ||
4 | // on Windows. | ||
5 | func GetTotalUsedFds() int { | ||
6 | return -1 | ||
7 | } | ||
diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/homedir/homedir.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/homedir/homedir.go new file mode 100644 index 0000000..dcae178 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/homedir/homedir.go | |||
@@ -0,0 +1,39 @@ | |||
1 | package homedir | ||
2 | |||
3 | import ( | ||
4 | "os" | ||
5 | "runtime" | ||
6 | |||
7 | "github.com/fsouza/go-dockerclient/external/github.com/opencontainers/runc/libcontainer/user" | ||
8 | ) | ||
9 | |||
10 | // Key returns the env var name for the user's home dir based on | ||
11 | // the platform being run on | ||
12 | func Key() string { | ||
13 | if runtime.GOOS == "windows" { | ||
14 | return "USERPROFILE" | ||
15 | } | ||
16 | return "HOME" | ||
17 | } | ||
18 | |||
19 | // Get returns the home directory of the current user with the help of | ||
20 | // environment variables depending on the target operating system. | ||
21 | // Returned path should be used with "path/filepath" to form new paths. | ||
22 | func Get() string { | ||
23 | home := os.Getenv(Key()) | ||
24 | if home == "" && runtime.GOOS != "windows" { | ||
25 | if u, err := user.CurrentUser(); err == nil { | ||
26 | return u.Home | ||
27 | } | ||
28 | } | ||
29 | return home | ||
30 | } | ||
31 | |||
32 | // GetShortcutString returns the string that is shortcut to user's home directory | ||
33 | // in the native shell of the platform running on. | ||
34 | func GetShortcutString() string { | ||
35 | if runtime.GOOS == "windows" { | ||
36 | return "%USERPROFILE%" // be careful while using in format functions | ||
37 | } | ||
38 | return "~" | ||
39 | } | ||
diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/idtools/idtools.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/idtools/idtools.go new file mode 100644 index 0000000..a1301ee --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/idtools/idtools.go | |||
@@ -0,0 +1,195 @@ | |||
1 | package idtools | ||
2 | |||
3 | import ( | ||
4 | "bufio" | ||
5 | "fmt" | ||
6 | "os" | ||
7 | "sort" | ||
8 | "strconv" | ||
9 | "strings" | ||
10 | ) | ||
11 | |||
12 | // IDMap contains a single entry for user namespace range remapping. An array | ||
13 | // of IDMap entries represents the structure that will be provided to the Linux | ||
14 | // kernel for creating a user namespace. | ||
15 | type IDMap struct { | ||
16 | ContainerID int `json:"container_id"` | ||
17 | HostID int `json:"host_id"` | ||
18 | Size int `json:"size"` | ||
19 | } | ||
20 | |||
21 | type subIDRange struct { | ||
22 | Start int | ||
23 | Length int | ||
24 | } | ||
25 | |||
26 | type ranges []subIDRange | ||
27 | |||
28 | func (e ranges) Len() int { return len(e) } | ||
29 | func (e ranges) Swap(i, j int) { e[i], e[j] = e[j], e[i] } | ||
30 | func (e ranges) Less(i, j int) bool { return e[i].Start < e[j].Start } | ||
31 | |||
32 | const ( | ||
33 | subuidFileName string = "/etc/subuid" | ||
34 | subgidFileName string = "/etc/subgid" | ||
35 | ) | ||
36 | |||
37 | // MkdirAllAs creates a directory (include any along the path) and then modifies | ||
38 | // ownership to the requested uid/gid. If the directory already exists, this | ||
39 | // function will still change ownership to the requested uid/gid pair. | ||
40 | func MkdirAllAs(path string, mode os.FileMode, ownerUID, ownerGID int) error { | ||
41 | return mkdirAs(path, mode, ownerUID, ownerGID, true, true) | ||
42 | } | ||
43 | |||
44 | // MkdirAllNewAs creates a directory (include any along the path) and then modifies | ||
45 | // ownership ONLY of newly created directories to the requested uid/gid. If the | ||
46 | // directories along the path exist, no change of ownership will be performed | ||
47 | func MkdirAllNewAs(path string, mode os.FileMode, ownerUID, ownerGID int) error { | ||
48 | return mkdirAs(path, mode, ownerUID, ownerGID, true, false) | ||
49 | } | ||
50 | |||
51 | // MkdirAs creates a directory and then modifies ownership to the requested uid/gid. | ||
52 | // If the directory already exists, this function still changes ownership | ||
53 | func MkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int) error { | ||
54 | return mkdirAs(path, mode, ownerUID, ownerGID, false, true) | ||
55 | } | ||
56 | |||
57 | // GetRootUIDGID retrieves the remapped root uid/gid pair from the set of maps. | ||
58 | // If the maps are empty, then the root uid/gid will default to "real" 0/0 | ||
59 | func GetRootUIDGID(uidMap, gidMap []IDMap) (int, int, error) { | ||
60 | var uid, gid int | ||
61 | |||
62 | if uidMap != nil { | ||
63 | xUID, err := ToHost(0, uidMap) | ||
64 | if err != nil { | ||
65 | return -1, -1, err | ||
66 | } | ||
67 | uid = xUID | ||
68 | } | ||
69 | if gidMap != nil { | ||
70 | xGID, err := ToHost(0, gidMap) | ||
71 | if err != nil { | ||
72 | return -1, -1, err | ||
73 | } | ||
74 | gid = xGID | ||
75 | } | ||
76 | return uid, gid, nil | ||
77 | } | ||
78 | |||
79 | // ToContainer takes an id mapping, and uses it to translate a | ||
80 | // host ID to the remapped ID. If no map is provided, then the translation | ||
81 | // assumes a 1-to-1 mapping and returns the passed in id | ||
82 | func ToContainer(hostID int, idMap []IDMap) (int, error) { | ||
83 | if idMap == nil { | ||
84 | return hostID, nil | ||
85 | } | ||
86 | for _, m := range idMap { | ||
87 | if (hostID >= m.HostID) && (hostID <= (m.HostID + m.Size - 1)) { | ||
88 | contID := m.ContainerID + (hostID - m.HostID) | ||
89 | return contID, nil | ||
90 | } | ||
91 | } | ||
92 | return -1, fmt.Errorf("Host ID %d cannot be mapped to a container ID", hostID) | ||
93 | } | ||
94 | |||
95 | // ToHost takes an id mapping and a remapped ID, and translates the | ||
96 | // ID to the mapped host ID. If no map is provided, then the translation | ||
97 | // assumes a 1-to-1 mapping and returns the passed in id # | ||
98 | func ToHost(contID int, idMap []IDMap) (int, error) { | ||
99 | if idMap == nil { | ||
100 | return contID, nil | ||
101 | } | ||
102 | for _, m := range idMap { | ||
103 | if (contID >= m.ContainerID) && (contID <= (m.ContainerID + m.Size - 1)) { | ||
104 | hostID := m.HostID + (contID - m.ContainerID) | ||
105 | return hostID, nil | ||
106 | } | ||
107 | } | ||
108 | return -1, fmt.Errorf("Container ID %d cannot be mapped to a host ID", contID) | ||
109 | } | ||
110 | |||
111 | // CreateIDMappings takes a requested user and group name and | ||
112 | // using the data from /etc/sub{uid,gid} ranges, creates the | ||
113 | // proper uid and gid remapping ranges for that user/group pair | ||
114 | func CreateIDMappings(username, groupname string) ([]IDMap, []IDMap, error) { | ||
115 | subuidRanges, err := parseSubuid(username) | ||
116 | if err != nil { | ||
117 | return nil, nil, err | ||
118 | } | ||
119 | subgidRanges, err := parseSubgid(groupname) | ||
120 | if err != nil { | ||
121 | return nil, nil, err | ||
122 | } | ||
123 | if len(subuidRanges) == 0 { | ||
124 | return nil, nil, fmt.Errorf("No subuid ranges found for user %q", username) | ||
125 | } | ||
126 | if len(subgidRanges) == 0 { | ||
127 | return nil, nil, fmt.Errorf("No subgid ranges found for group %q", groupname) | ||
128 | } | ||
129 | |||
130 | return createIDMap(subuidRanges), createIDMap(subgidRanges), nil | ||
131 | } | ||
132 | |||
133 | func createIDMap(subidRanges ranges) []IDMap { | ||
134 | idMap := []IDMap{} | ||
135 | |||
136 | // sort the ranges by lowest ID first | ||
137 | sort.Sort(subidRanges) | ||
138 | containerID := 0 | ||
139 | for _, idrange := range subidRanges { | ||
140 | idMap = append(idMap, IDMap{ | ||
141 | ContainerID: containerID, | ||
142 | HostID: idrange.Start, | ||
143 | Size: idrange.Length, | ||
144 | }) | ||
145 | containerID = containerID + idrange.Length | ||
146 | } | ||
147 | return idMap | ||
148 | } | ||
149 | |||
150 | func parseSubuid(username string) (ranges, error) { | ||
151 | return parseSubidFile(subuidFileName, username) | ||
152 | } | ||
153 | |||
154 | func parseSubgid(username string) (ranges, error) { | ||
155 | return parseSubidFile(subgidFileName, username) | ||
156 | } | ||
157 | |||
158 | func parseSubidFile(path, username string) (ranges, error) { | ||
159 | var rangeList ranges | ||
160 | |||
161 | subidFile, err := os.Open(path) | ||
162 | if err != nil { | ||
163 | return rangeList, err | ||
164 | } | ||
165 | defer subidFile.Close() | ||
166 | |||
167 | s := bufio.NewScanner(subidFile) | ||
168 | for s.Scan() { | ||
169 | if err := s.Err(); err != nil { | ||
170 | return rangeList, err | ||
171 | } | ||
172 | |||
173 | text := strings.TrimSpace(s.Text()) | ||
174 | if text == "" { | ||
175 | continue | ||
176 | } | ||
177 | parts := strings.Split(text, ":") | ||
178 | if len(parts) != 3 { | ||
179 | return rangeList, fmt.Errorf("Cannot parse subuid/gid information: Format not correct for %s file", path) | ||
180 | } | ||
181 | if parts[0] == username { | ||
182 | // return the first entry for a user; ignores potential for multiple ranges per user | ||
183 | startid, err := strconv.Atoi(parts[1]) | ||
184 | if err != nil { | ||
185 | return rangeList, fmt.Errorf("String to int conversion failed during subuid/gid parsing of %s: %v", path, err) | ||
186 | } | ||
187 | length, err := strconv.Atoi(parts[2]) | ||
188 | if err != nil { | ||
189 | return rangeList, fmt.Errorf("String to int conversion failed during subuid/gid parsing of %s: %v", path, err) | ||
190 | } | ||
191 | rangeList = append(rangeList, subIDRange{startid, length}) | ||
192 | } | ||
193 | } | ||
194 | return rangeList, nil | ||
195 | } | ||
diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/idtools/idtools_unix.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/idtools/idtools_unix.go new file mode 100644 index 0000000..0444307 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/idtools/idtools_unix.go | |||
@@ -0,0 +1,60 @@ | |||
1 | // +build !windows | ||
2 | |||
3 | package idtools | ||
4 | |||
5 | import ( | ||
6 | "os" | ||
7 | "path/filepath" | ||
8 | |||
9 | "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system" | ||
10 | ) | ||
11 | |||
12 | func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chownExisting bool) error { | ||
13 | // make an array containing the original path asked for, plus (for mkAll == true) | ||
14 | // all path components leading up to the complete path that don't exist before we MkdirAll | ||
15 | // so that we can chown all of them properly at the end. If chownExisting is false, we won't | ||
16 | // chown the full directory path if it exists | ||
17 | var paths []string | ||
18 | if _, err := os.Stat(path); err != nil && os.IsNotExist(err) { | ||
19 | paths = []string{path} | ||
20 | } else if err == nil && chownExisting { | ||
21 | if err := os.Chown(path, ownerUID, ownerGID); err != nil { | ||
22 | return err | ||
23 | } | ||
24 | // short-circuit--we were called with an existing directory and chown was requested | ||
25 | return nil | ||
26 | } else if err == nil { | ||
27 | // nothing to do; directory path fully exists already and chown was NOT requested | ||
28 | return nil | ||
29 | } | ||
30 | |||
31 | if mkAll { | ||
32 | // walk back to "/" looking for directories which do not exist | ||
33 | // and add them to the paths array for chown after creation | ||
34 | dirPath := path | ||
35 | for { | ||
36 | dirPath = filepath.Dir(dirPath) | ||
37 | if dirPath == "/" { | ||
38 | break | ||
39 | } | ||
40 | if _, err := os.Stat(dirPath); err != nil && os.IsNotExist(err) { | ||
41 | paths = append(paths, dirPath) | ||
42 | } | ||
43 | } | ||
44 | if err := system.MkdirAll(path, mode); err != nil && !os.IsExist(err) { | ||
45 | return err | ||
46 | } | ||
47 | } else { | ||
48 | if err := os.Mkdir(path, mode); err != nil && !os.IsExist(err) { | ||
49 | return err | ||
50 | } | ||
51 | } | ||
52 | // even if it existed, we will chown the requested path + any subpaths that | ||
53 | // didn't exist when we called MkdirAll | ||
54 | for _, pathComponent := range paths { | ||
55 | if err := os.Chown(pathComponent, ownerUID, ownerGID); err != nil { | ||
56 | return err | ||
57 | } | ||
58 | } | ||
59 | return nil | ||
60 | } | ||
diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/idtools/idtools_windows.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/idtools/idtools_windows.go new file mode 100644 index 0000000..d5ec992 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/idtools/idtools_windows.go | |||
@@ -0,0 +1,18 @@ | |||
1 | // +build windows | ||
2 | |||
3 | package idtools | ||
4 | |||
5 | import ( | ||
6 | "os" | ||
7 | |||
8 | "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system" | ||
9 | ) | ||
10 | |||
11 | // Platforms such as Windows do not support the UID/GID concept. So make this | ||
12 | // just a wrapper around system.MkdirAll. | ||
13 | func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chownExisting bool) error { | ||
14 | if err := system.MkdirAll(path, mode); err != nil && !os.IsExist(err) { | ||
15 | return err | ||
16 | } | ||
17 | return nil | ||
18 | } | ||
diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go new file mode 100644 index 0000000..c1eedff --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go | |||
@@ -0,0 +1,155 @@ | |||
1 | package idtools | ||
2 | |||
3 | import ( | ||
4 | "fmt" | ||
5 | "os/exec" | ||
6 | "path/filepath" | ||
7 | "strings" | ||
8 | "syscall" | ||
9 | ) | ||
10 | |||
11 | // add a user and/or group to Linux /etc/passwd, /etc/group using standard | ||
12 | // Linux distribution commands: | ||
13 | // adduser --uid <id> --shell /bin/login --no-create-home --disabled-login --ingroup <groupname> <username> | ||
14 | // useradd -M -u <id> -s /bin/nologin -N -g <groupname> <username> | ||
15 | // addgroup --gid <id> <groupname> | ||
16 | // groupadd -g <id> <groupname> | ||
17 | |||
18 | const baseUID int = 10000 | ||
19 | const baseGID int = 10000 | ||
20 | const idMAX int = 65534 | ||
21 | |||
22 | var ( | ||
23 | userCommand string | ||
24 | groupCommand string | ||
25 | |||
26 | cmdTemplates = map[string]string{ | ||
27 | "adduser": "--uid %d --shell /bin/false --no-create-home --disabled-login --ingroup %s %s", | ||
28 | "useradd": "-M -u %d -s /bin/false -N -g %s %s", | ||
29 | "addgroup": "--gid %d %s", | ||
30 | "groupadd": "-g %d %s", | ||
31 | } | ||
32 | ) | ||
33 | |||
34 | func init() { | ||
35 | // set up which commands are used for adding users/groups dependent on distro | ||
36 | if _, err := resolveBinary("adduser"); err == nil { | ||
37 | userCommand = "adduser" | ||
38 | } else if _, err := resolveBinary("useradd"); err == nil { | ||
39 | userCommand = "useradd" | ||
40 | } | ||
41 | if _, err := resolveBinary("addgroup"); err == nil { | ||
42 | groupCommand = "addgroup" | ||
43 | } else if _, err := resolveBinary("groupadd"); err == nil { | ||
44 | groupCommand = "groupadd" | ||
45 | } | ||
46 | } | ||
47 | |||
48 | func resolveBinary(binname string) (string, error) { | ||
49 | binaryPath, err := exec.LookPath(binname) | ||
50 | if err != nil { | ||
51 | return "", err | ||
52 | } | ||
53 | resolvedPath, err := filepath.EvalSymlinks(binaryPath) | ||
54 | if err != nil { | ||
55 | return "", err | ||
56 | } | ||
57 | //only return no error if the final resolved binary basename | ||
58 | //matches what was searched for | ||
59 | if filepath.Base(resolvedPath) == binname { | ||
60 | return resolvedPath, nil | ||
61 | } | ||
62 | return "", fmt.Errorf("Binary %q does not resolve to a binary of that name in $PATH (%q)", binname, resolvedPath) | ||
63 | } | ||
64 | |||
65 | // AddNamespaceRangesUser takes a name and finds an unused uid, gid pair | ||
66 | // and calls the appropriate helper function to add the group and then | ||
67 | // the user to the group in /etc/group and /etc/passwd respectively. | ||
68 | // This new user's /etc/sub{uid,gid} ranges will be used for user namespace | ||
69 | // mapping ranges in containers. | ||
70 | func AddNamespaceRangesUser(name string) (int, int, error) { | ||
71 | // Find unused uid, gid pair | ||
72 | uid, err := findUnusedUID(baseUID) | ||
73 | if err != nil { | ||
74 | return -1, -1, fmt.Errorf("Unable to find unused UID: %v", err) | ||
75 | } | ||
76 | gid, err := findUnusedGID(baseGID) | ||
77 | if err != nil { | ||
78 | return -1, -1, fmt.Errorf("Unable to find unused GID: %v", err) | ||
79 | } | ||
80 | |||
81 | // First add the group that we will use | ||
82 | if err := addGroup(name, gid); err != nil { | ||
83 | return -1, -1, fmt.Errorf("Error adding group %q: %v", name, err) | ||
84 | } | ||
85 | // Add the user as a member of the group | ||
86 | if err := addUser(name, uid, name); err != nil { | ||
87 | return -1, -1, fmt.Errorf("Error adding user %q: %v", name, err) | ||
88 | } | ||
89 | return uid, gid, nil | ||
90 | } | ||
91 | |||
92 | func addUser(userName string, uid int, groupName string) error { | ||
93 | |||
94 | if userCommand == "" { | ||
95 | return fmt.Errorf("Cannot add user; no useradd/adduser binary found") | ||
96 | } | ||
97 | args := fmt.Sprintf(cmdTemplates[userCommand], uid, groupName, userName) | ||
98 | return execAddCmd(userCommand, args) | ||
99 | } | ||
100 | |||
101 | func addGroup(groupName string, gid int) error { | ||
102 | |||
103 | if groupCommand == "" { | ||
104 | return fmt.Errorf("Cannot add group; no groupadd/addgroup binary found") | ||
105 | } | ||
106 | args := fmt.Sprintf(cmdTemplates[groupCommand], gid, groupName) | ||
107 | // only error out if the error isn't that the group already exists | ||
108 | // if the group exists then our needs are already met | ||
109 | if err := execAddCmd(groupCommand, args); err != nil && !strings.Contains(err.Error(), "already exists") { | ||
110 | return err | ||
111 | } | ||
112 | return nil | ||
113 | } | ||
114 | |||
115 | func execAddCmd(cmd, args string) error { | ||
116 | execCmd := exec.Command(cmd, strings.Split(args, " ")...) | ||
117 | out, err := execCmd.CombinedOutput() | ||
118 | if err != nil { | ||
119 | return fmt.Errorf("Failed to add user/group with error: %v; output: %q", err, string(out)) | ||
120 | } | ||
121 | return nil | ||
122 | } | ||
123 | |||
124 | func findUnusedUID(startUID int) (int, error) { | ||
125 | return findUnused("passwd", startUID) | ||
126 | } | ||
127 | |||
128 | func findUnusedGID(startGID int) (int, error) { | ||
129 | return findUnused("group", startGID) | ||
130 | } | ||
131 | |||
132 | func findUnused(file string, id int) (int, error) { | ||
133 | for { | ||
134 | cmdStr := fmt.Sprintf("cat /etc/%s | cut -d: -f3 | grep '^%d$'", file, id) | ||
135 | cmd := exec.Command("sh", "-c", cmdStr) | ||
136 | if err := cmd.Run(); err != nil { | ||
137 | // if a non-zero return code occurs, then we know the ID was not found | ||
138 | // and is usable | ||
139 | if exiterr, ok := err.(*exec.ExitError); ok { | ||
140 | // The program has exited with an exit code != 0 | ||
141 | if status, ok := exiterr.Sys().(syscall.WaitStatus); ok { | ||
142 | if status.ExitStatus() == 1 { | ||
143 | //no match, we can use this ID | ||
144 | return id, nil | ||
145 | } | ||
146 | } | ||
147 | } | ||
148 | return -1, fmt.Errorf("Error looking in /etc/%s for unused ID: %v", file, err) | ||
149 | } | ||
150 | id++ | ||
151 | if id > idMAX { | ||
152 | return -1, fmt.Errorf("Maximum id in %q reached with finding unused numeric ID", file) | ||
153 | } | ||
154 | } | ||
155 | } | ||
diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/idtools/usergroupadd_unsupported.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/idtools/usergroupadd_unsupported.go new file mode 100644 index 0000000..d98b354 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/idtools/usergroupadd_unsupported.go | |||
@@ -0,0 +1,12 @@ | |||
1 | // +build !linux | ||
2 | |||
3 | package idtools | ||
4 | |||
5 | import "fmt" | ||
6 | |||
7 | // AddNamespaceRangesUser takes a name and finds an unused uid, gid pair | ||
8 | // and calls the appropriate helper function to add the group and then | ||
9 | // the user to the group in /etc/group and /etc/passwd respectively. | ||
10 | func AddNamespaceRangesUser(name string) (int, int, error) { | ||
11 | return -1, -1, fmt.Errorf("No support for adding users or groups on this OS") | ||
12 | } | ||
diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/bytespipe.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/bytespipe.go new file mode 100644 index 0000000..e263c28 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/bytespipe.go | |||
@@ -0,0 +1,152 @@ | |||
1 | package ioutils | ||
2 | |||
3 | import ( | ||
4 | "errors" | ||
5 | "io" | ||
6 | "sync" | ||
7 | ) | ||
8 | |||
9 | // maxCap is the highest capacity to use in byte slices that buffer data. | ||
10 | const maxCap = 1e6 | ||
11 | |||
12 | // blockThreshold is the minimum number of bytes in the buffer which will cause | ||
13 | // a write to BytesPipe to block when allocating a new slice. | ||
14 | const blockThreshold = 1e6 | ||
15 | |||
16 | // ErrClosed is returned when Write is called on a closed BytesPipe. | ||
17 | var ErrClosed = errors.New("write to closed BytesPipe") | ||
18 | |||
19 | // BytesPipe is io.ReadWriteCloser which works similarly to pipe(queue). | ||
20 | // All written data may be read at most once. Also, BytesPipe allocates | ||
21 | // and releases new byte slices to adjust to current needs, so the buffer | ||
22 | // won't be overgrown after peak loads. | ||
23 | type BytesPipe struct { | ||
24 | mu sync.Mutex | ||
25 | wait *sync.Cond | ||
26 | buf [][]byte // slice of byte-slices of buffered data | ||
27 | lastRead int // index in the first slice to a read point | ||
28 | bufLen int // length of data buffered over the slices | ||
29 | closeErr error // error to return from next Read. set to nil if not closed. | ||
30 | } | ||
31 | |||
32 | // NewBytesPipe creates new BytesPipe, initialized by specified slice. | ||
33 | // If buf is nil, then it will be initialized with slice which cap is 64. | ||
34 | // buf will be adjusted in a way that len(buf) == 0, cap(buf) == cap(buf). | ||
35 | func NewBytesPipe(buf []byte) *BytesPipe { | ||
36 | if cap(buf) == 0 { | ||
37 | buf = make([]byte, 0, 64) | ||
38 | } | ||
39 | bp := &BytesPipe{ | ||
40 | buf: [][]byte{buf[:0]}, | ||
41 | } | ||
42 | bp.wait = sync.NewCond(&bp.mu) | ||
43 | return bp | ||
44 | } | ||
45 | |||
46 | // Write writes p to BytesPipe. | ||
47 | // It can allocate new []byte slices in a process of writing. | ||
48 | func (bp *BytesPipe) Write(p []byte) (int, error) { | ||
49 | bp.mu.Lock() | ||
50 | defer bp.mu.Unlock() | ||
51 | written := 0 | ||
52 | for { | ||
53 | if bp.closeErr != nil { | ||
54 | return written, ErrClosed | ||
55 | } | ||
56 | // write data to the last buffer | ||
57 | b := bp.buf[len(bp.buf)-1] | ||
58 | // copy data to the current empty allocated area | ||
59 | n := copy(b[len(b):cap(b)], p) | ||
60 | // increment buffered data length | ||
61 | bp.bufLen += n | ||
62 | // include written data in last buffer | ||
63 | bp.buf[len(bp.buf)-1] = b[:len(b)+n] | ||
64 | |||
65 | written += n | ||
66 | |||
67 | // if there was enough room to write all then break | ||
68 | if len(p) == n { | ||
69 | break | ||
70 | } | ||
71 | |||
72 | // more data: write to the next slice | ||
73 | p = p[n:] | ||
74 | |||
75 | // block if too much data is still in the buffer | ||
76 | for bp.bufLen >= blockThreshold { | ||
77 | bp.wait.Wait() | ||
78 | } | ||
79 | |||
80 | // allocate slice that has twice the size of the last unless maximum reached | ||
81 | nextCap := 2 * cap(bp.buf[len(bp.buf)-1]) | ||
82 | if nextCap > maxCap { | ||
83 | nextCap = maxCap | ||
84 | } | ||
85 | // add new byte slice to the buffers slice and continue writing | ||
86 | bp.buf = append(bp.buf, make([]byte, 0, nextCap)) | ||
87 | } | ||
88 | bp.wait.Broadcast() | ||
89 | return written, nil | ||
90 | } | ||
91 | |||
92 | // CloseWithError causes further reads from a BytesPipe to return immediately. | ||
93 | func (bp *BytesPipe) CloseWithError(err error) error { | ||
94 | bp.mu.Lock() | ||
95 | if err != nil { | ||
96 | bp.closeErr = err | ||
97 | } else { | ||
98 | bp.closeErr = io.EOF | ||
99 | } | ||
100 | bp.wait.Broadcast() | ||
101 | bp.mu.Unlock() | ||
102 | return nil | ||
103 | } | ||
104 | |||
105 | // Close causes further reads from a BytesPipe to return immediately. | ||
106 | func (bp *BytesPipe) Close() error { | ||
107 | return bp.CloseWithError(nil) | ||
108 | } | ||
109 | |||
110 | func (bp *BytesPipe) len() int { | ||
111 | return bp.bufLen - bp.lastRead | ||
112 | } | ||
113 | |||
114 | // Read reads bytes from BytesPipe. | ||
115 | // Data could be read only once. | ||
116 | func (bp *BytesPipe) Read(p []byte) (n int, err error) { | ||
117 | bp.mu.Lock() | ||
118 | defer bp.mu.Unlock() | ||
119 | if bp.len() == 0 { | ||
120 | if bp.closeErr != nil { | ||
121 | return 0, bp.closeErr | ||
122 | } | ||
123 | bp.wait.Wait() | ||
124 | if bp.len() == 0 && bp.closeErr != nil { | ||
125 | return 0, bp.closeErr | ||
126 | } | ||
127 | } | ||
128 | for { | ||
129 | read := copy(p, bp.buf[0][bp.lastRead:]) | ||
130 | n += read | ||
131 | bp.lastRead += read | ||
132 | if bp.len() == 0 { | ||
133 | // we have read everything. reset to the beginning. | ||
134 | bp.lastRead = 0 | ||
135 | bp.bufLen -= len(bp.buf[0]) | ||
136 | bp.buf[0] = bp.buf[0][:0] | ||
137 | break | ||
138 | } | ||
139 | // break if everything was read | ||
140 | if len(p) == read { | ||
141 | break | ||
142 | } | ||
143 | // more buffered data and more asked. read from next slice. | ||
144 | p = p[read:] | ||
145 | bp.lastRead = 0 | ||
146 | bp.bufLen -= len(bp.buf[0]) | ||
147 | bp.buf[0] = nil // throw away old slice | ||
148 | bp.buf = bp.buf[1:] // switch to next | ||
149 | } | ||
150 | bp.wait.Broadcast() | ||
151 | return | ||
152 | } | ||
diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/fmt.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/fmt.go new file mode 100644 index 0000000..0b04b0b --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/fmt.go | |||
@@ -0,0 +1,22 @@ | |||
1 | package ioutils | ||
2 | |||
3 | import ( | ||
4 | "fmt" | ||
5 | "io" | ||
6 | ) | ||
7 | |||
8 | // FprintfIfNotEmpty prints the string value if it's not empty | ||
9 | func FprintfIfNotEmpty(w io.Writer, format, value string) (int, error) { | ||
10 | if value != "" { | ||
11 | return fmt.Fprintf(w, format, value) | ||
12 | } | ||
13 | return 0, nil | ||
14 | } | ||
15 | |||
16 | // FprintfIfTrue prints the boolean value if it's true | ||
17 | func FprintfIfTrue(w io.Writer, format string, ok bool) (int, error) { | ||
18 | if ok { | ||
19 | return fmt.Fprintf(w, format, ok) | ||
20 | } | ||
21 | return 0, nil | ||
22 | } | ||
diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/multireader.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/multireader.go new file mode 100644 index 0000000..0d2d76b --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/multireader.go | |||
@@ -0,0 +1,226 @@ | |||
1 | package ioutils | ||
2 | |||
3 | import ( | ||
4 | "bytes" | ||
5 | "fmt" | ||
6 | "io" | ||
7 | "os" | ||
8 | ) | ||
9 | |||
10 | type pos struct { | ||
11 | idx int | ||
12 | offset int64 | ||
13 | } | ||
14 | |||
15 | type multiReadSeeker struct { | ||
16 | readers []io.ReadSeeker | ||
17 | pos *pos | ||
18 | posIdx map[io.ReadSeeker]int | ||
19 | } | ||
20 | |||
21 | func (r *multiReadSeeker) Seek(offset int64, whence int) (int64, error) { | ||
22 | var tmpOffset int64 | ||
23 | switch whence { | ||
24 | case os.SEEK_SET: | ||
25 | for i, rdr := range r.readers { | ||
26 | // get size of the current reader | ||
27 | s, err := rdr.Seek(0, os.SEEK_END) | ||
28 | if err != nil { | ||
29 | return -1, err | ||
30 | } | ||
31 | |||
32 | if offset > tmpOffset+s { | ||
33 | if i == len(r.readers)-1 { | ||
34 | rdrOffset := s + (offset - tmpOffset) | ||
35 | if _, err := rdr.Seek(rdrOffset, os.SEEK_SET); err != nil { | ||
36 | return -1, err | ||
37 | } | ||
38 | r.pos = &pos{i, rdrOffset} | ||
39 | return offset, nil | ||
40 | } | ||
41 | |||
42 | tmpOffset += s | ||
43 | continue | ||
44 | } | ||
45 | |||
46 | rdrOffset := offset - tmpOffset | ||
47 | idx := i | ||
48 | |||
49 | rdr.Seek(rdrOffset, os.SEEK_SET) | ||
50 | // make sure all following readers are at 0 | ||
51 | for _, rdr := range r.readers[i+1:] { | ||
52 | rdr.Seek(0, os.SEEK_SET) | ||
53 | } | ||
54 | |||
55 | if rdrOffset == s && i != len(r.readers)-1 { | ||
56 | idx++ | ||
57 | rdrOffset = 0 | ||
58 | } | ||
59 | r.pos = &pos{idx, rdrOffset} | ||
60 | return offset, nil | ||
61 | } | ||
62 | case os.SEEK_END: | ||
63 | for _, rdr := range r.readers { | ||
64 | s, err := rdr.Seek(0, os.SEEK_END) | ||
65 | if err != nil { | ||
66 | return -1, err | ||
67 | } | ||
68 | tmpOffset += s | ||
69 | } | ||
70 | r.Seek(tmpOffset+offset, os.SEEK_SET) | ||
71 | return tmpOffset + offset, nil | ||
72 | case os.SEEK_CUR: | ||
73 | if r.pos == nil { | ||
74 | return r.Seek(offset, os.SEEK_SET) | ||
75 | } | ||
76 | // Just return the current offset | ||
77 | if offset == 0 { | ||
78 | return r.getCurOffset() | ||
79 | } | ||
80 | |||
81 | curOffset, err := r.getCurOffset() | ||
82 | if err != nil { | ||
83 | return -1, err | ||
84 | } | ||
85 | rdr, rdrOffset, err := r.getReaderForOffset(curOffset + offset) | ||
86 | if err != nil { | ||
87 | return -1, err | ||
88 | } | ||
89 | |||
90 | r.pos = &pos{r.posIdx[rdr], rdrOffset} | ||
91 | return curOffset + offset, nil | ||
92 | default: | ||
93 | return -1, fmt.Errorf("Invalid whence: %d", whence) | ||
94 | } | ||
95 | |||
96 | return -1, fmt.Errorf("Error seeking for whence: %d, offset: %d", whence, offset) | ||
97 | } | ||
98 | |||
99 | func (r *multiReadSeeker) getReaderForOffset(offset int64) (io.ReadSeeker, int64, error) { | ||
100 | var rdr io.ReadSeeker | ||
101 | var rdrOffset int64 | ||
102 | |||
103 | for i, rdr := range r.readers { | ||
104 | offsetTo, err := r.getOffsetToReader(rdr) | ||
105 | if err != nil { | ||
106 | return nil, -1, err | ||
107 | } | ||
108 | if offsetTo > offset { | ||
109 | rdr = r.readers[i-1] | ||
110 | rdrOffset = offsetTo - offset | ||
111 | break | ||
112 | } | ||
113 | |||
114 | if rdr == r.readers[len(r.readers)-1] { | ||
115 | rdrOffset = offsetTo + offset | ||
116 | break | ||
117 | } | ||
118 | } | ||
119 | |||
120 | return rdr, rdrOffset, nil | ||
121 | } | ||
122 | |||
123 | func (r *multiReadSeeker) getCurOffset() (int64, error) { | ||
124 | var totalSize int64 | ||
125 | for _, rdr := range r.readers[:r.pos.idx+1] { | ||
126 | if r.posIdx[rdr] == r.pos.idx { | ||
127 | totalSize += r.pos.offset | ||
128 | break | ||
129 | } | ||
130 | |||
131 | size, err := getReadSeekerSize(rdr) | ||
132 | if err != nil { | ||
133 | return -1, fmt.Errorf("error getting seeker size: %v", err) | ||
134 | } | ||
135 | totalSize += size | ||
136 | } | ||
137 | return totalSize, nil | ||
138 | } | ||
139 | |||
140 | func (r *multiReadSeeker) getOffsetToReader(rdr io.ReadSeeker) (int64, error) { | ||
141 | var offset int64 | ||
142 | for _, r := range r.readers { | ||
143 | if r == rdr { | ||
144 | break | ||
145 | } | ||
146 | |||
147 | size, err := getReadSeekerSize(rdr) | ||
148 | if err != nil { | ||
149 | return -1, err | ||
150 | } | ||
151 | offset += size | ||
152 | } | ||
153 | return offset, nil | ||
154 | } | ||
155 | |||
156 | func (r *multiReadSeeker) Read(b []byte) (int, error) { | ||
157 | if r.pos == nil { | ||
158 | r.pos = &pos{0, 0} | ||
159 | } | ||
160 | |||
161 | bCap := int64(cap(b)) | ||
162 | buf := bytes.NewBuffer(nil) | ||
163 | var rdr io.ReadSeeker | ||
164 | |||
165 | for _, rdr = range r.readers[r.pos.idx:] { | ||
166 | readBytes, err := io.CopyN(buf, rdr, bCap) | ||
167 | if err != nil && err != io.EOF { | ||
168 | return -1, err | ||
169 | } | ||
170 | bCap -= readBytes | ||
171 | |||
172 | if bCap == 0 { | ||
173 | break | ||
174 | } | ||
175 | } | ||
176 | |||
177 | rdrPos, err := rdr.Seek(0, os.SEEK_CUR) | ||
178 | if err != nil { | ||
179 | return -1, err | ||
180 | } | ||
181 | r.pos = &pos{r.posIdx[rdr], rdrPos} | ||
182 | return buf.Read(b) | ||
183 | } | ||
184 | |||
185 | func getReadSeekerSize(rdr io.ReadSeeker) (int64, error) { | ||
186 | // save the current position | ||
187 | pos, err := rdr.Seek(0, os.SEEK_CUR) | ||
188 | if err != nil { | ||
189 | return -1, err | ||
190 | } | ||
191 | |||
192 | // get the size | ||
193 | size, err := rdr.Seek(0, os.SEEK_END) | ||
194 | if err != nil { | ||
195 | return -1, err | ||
196 | } | ||
197 | |||
198 | // reset the position | ||
199 | if _, err := rdr.Seek(pos, os.SEEK_SET); err != nil { | ||
200 | return -1, err | ||
201 | } | ||
202 | return size, nil | ||
203 | } | ||
204 | |||
205 | // MultiReadSeeker returns a ReadSeeker that's the logical concatenation of the provided | ||
206 | // input readseekers. After calling this method the initial position is set to the | ||
207 | // beginning of the first ReadSeeker. At the end of a ReadSeeker, Read always advances | ||
208 | // to the beginning of the next ReadSeeker and returns EOF at the end of the last ReadSeeker. | ||
209 | // Seek can be used over the sum of lengths of all readseekers. | ||
210 | // | ||
211 | // When a MultiReadSeeker is used, no Read and Seek operations should be made on | ||
212 | // its ReadSeeker components. Also, users should make no assumption on the state | ||
213 | // of individual readseekers while the MultiReadSeeker is used. | ||
214 | func MultiReadSeeker(readers ...io.ReadSeeker) io.ReadSeeker { | ||
215 | if len(readers) == 1 { | ||
216 | return readers[0] | ||
217 | } | ||
218 | idx := make(map[io.ReadSeeker]int) | ||
219 | for i, rdr := range readers { | ||
220 | idx[rdr] = i | ||
221 | } | ||
222 | return &multiReadSeeker{ | ||
223 | readers: readers, | ||
224 | posIdx: idx, | ||
225 | } | ||
226 | } | ||
diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/readers.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/readers.go new file mode 100644 index 0000000..a891955 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/readers.go | |||
@@ -0,0 +1,154 @@ | |||
1 | package ioutils | ||
2 | |||
3 | import ( | ||
4 | "crypto/sha256" | ||
5 | "encoding/hex" | ||
6 | "io" | ||
7 | |||
8 | "github.com/fsouza/go-dockerclient/external/golang.org/x/net/context" | ||
9 | ) | ||
10 | |||
11 | type readCloserWrapper struct { | ||
12 | io.Reader | ||
13 | closer func() error | ||
14 | } | ||
15 | |||
16 | func (r *readCloserWrapper) Close() error { | ||
17 | return r.closer() | ||
18 | } | ||
19 | |||
20 | // NewReadCloserWrapper returns a new io.ReadCloser. | ||
21 | func NewReadCloserWrapper(r io.Reader, closer func() error) io.ReadCloser { | ||
22 | return &readCloserWrapper{ | ||
23 | Reader: r, | ||
24 | closer: closer, | ||
25 | } | ||
26 | } | ||
27 | |||
28 | type readerErrWrapper struct { | ||
29 | reader io.Reader | ||
30 | closer func() | ||
31 | } | ||
32 | |||
33 | func (r *readerErrWrapper) Read(p []byte) (int, error) { | ||
34 | n, err := r.reader.Read(p) | ||
35 | if err != nil { | ||
36 | r.closer() | ||
37 | } | ||
38 | return n, err | ||
39 | } | ||
40 | |||
41 | // NewReaderErrWrapper returns a new io.Reader. | ||
42 | func NewReaderErrWrapper(r io.Reader, closer func()) io.Reader { | ||
43 | return &readerErrWrapper{ | ||
44 | reader: r, | ||
45 | closer: closer, | ||
46 | } | ||
47 | } | ||
48 | |||
49 | // HashData returns the sha256 sum of src. | ||
50 | func HashData(src io.Reader) (string, error) { | ||
51 | h := sha256.New() | ||
52 | if _, err := io.Copy(h, src); err != nil { | ||
53 | return "", err | ||
54 | } | ||
55 | return "sha256:" + hex.EncodeToString(h.Sum(nil)), nil | ||
56 | } | ||
57 | |||
58 | // OnEOFReader wraps a io.ReadCloser and a function | ||
59 | // the function will run at the end of file or close the file. | ||
60 | type OnEOFReader struct { | ||
61 | Rc io.ReadCloser | ||
62 | Fn func() | ||
63 | } | ||
64 | |||
65 | func (r *OnEOFReader) Read(p []byte) (n int, err error) { | ||
66 | n, err = r.Rc.Read(p) | ||
67 | if err == io.EOF { | ||
68 | r.runFunc() | ||
69 | } | ||
70 | return | ||
71 | } | ||
72 | |||
73 | // Close closes the file and run the function. | ||
74 | func (r *OnEOFReader) Close() error { | ||
75 | err := r.Rc.Close() | ||
76 | r.runFunc() | ||
77 | return err | ||
78 | } | ||
79 | |||
80 | func (r *OnEOFReader) runFunc() { | ||
81 | if fn := r.Fn; fn != nil { | ||
82 | fn() | ||
83 | r.Fn = nil | ||
84 | } | ||
85 | } | ||
86 | |||
87 | // cancelReadCloser wraps an io.ReadCloser with a context for cancelling read | ||
88 | // operations. | ||
89 | type cancelReadCloser struct { | ||
90 | cancel func() | ||
91 | pR *io.PipeReader // Stream to read from | ||
92 | pW *io.PipeWriter | ||
93 | } | ||
94 | |||
95 | // NewCancelReadCloser creates a wrapper that closes the ReadCloser when the | ||
96 | // context is cancelled. The returned io.ReadCloser must be closed when it is | ||
97 | // no longer needed. | ||
98 | func NewCancelReadCloser(ctx context.Context, in io.ReadCloser) io.ReadCloser { | ||
99 | pR, pW := io.Pipe() | ||
100 | |||
101 | // Create a context used to signal when the pipe is closed | ||
102 | doneCtx, cancel := context.WithCancel(context.Background()) | ||
103 | |||
104 | p := &cancelReadCloser{ | ||
105 | cancel: cancel, | ||
106 | pR: pR, | ||
107 | pW: pW, | ||
108 | } | ||
109 | |||
110 | go func() { | ||
111 | _, err := io.Copy(pW, in) | ||
112 | select { | ||
113 | case <-ctx.Done(): | ||
114 | // If the context was closed, p.closeWithError | ||
115 | // was already called. Calling it again would | ||
116 | // change the error that Read returns. | ||
117 | default: | ||
118 | p.closeWithError(err) | ||
119 | } | ||
120 | in.Close() | ||
121 | }() | ||
122 | go func() { | ||
123 | for { | ||
124 | select { | ||
125 | case <-ctx.Done(): | ||
126 | p.closeWithError(ctx.Err()) | ||
127 | case <-doneCtx.Done(): | ||
128 | return | ||
129 | } | ||
130 | } | ||
131 | }() | ||
132 | |||
133 | return p | ||
134 | } | ||
135 | |||
136 | // Read wraps the Read method of the pipe that provides data from the wrapped | ||
137 | // ReadCloser. | ||
138 | func (p *cancelReadCloser) Read(buf []byte) (n int, err error) { | ||
139 | return p.pR.Read(buf) | ||
140 | } | ||
141 | |||
142 | // closeWithError closes the wrapper and its underlying reader. It will | ||
143 | // cause future calls to Read to return err. | ||
144 | func (p *cancelReadCloser) closeWithError(err error) { | ||
145 | p.pW.CloseWithError(err) | ||
146 | p.cancel() | ||
147 | } | ||
148 | |||
149 | // Close closes the wrapper its underlying reader. It will cause | ||
150 | // future calls to Read to return io.EOF. | ||
151 | func (p *cancelReadCloser) Close() error { | ||
152 | p.closeWithError(io.EOF) | ||
153 | return nil | ||
154 | } | ||
diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/scheduler.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/scheduler.go new file mode 100644 index 0000000..3c88f29 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/scheduler.go | |||
@@ -0,0 +1,6 @@ | |||
1 | // +build !gccgo | ||
2 | |||
3 | package ioutils | ||
4 | |||
5 | func callSchedulerIfNecessary() { | ||
6 | } | ||
diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/scheduler_gccgo.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/scheduler_gccgo.go new file mode 100644 index 0000000..c11d02b --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/scheduler_gccgo.go | |||
@@ -0,0 +1,13 @@ | |||
1 | // +build gccgo | ||
2 | |||
3 | package ioutils | ||
4 | |||
5 | import ( | ||
6 | "runtime" | ||
7 | ) | ||
8 | |||
9 | func callSchedulerIfNecessary() { | ||
10 | //allow or force Go scheduler to switch context, without explicitly | ||
11 | //forcing this will make it hang when using gccgo implementation | ||
12 | runtime.Gosched() | ||
13 | } | ||
diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/temp_unix.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/temp_unix.go new file mode 100644 index 0000000..1539ad2 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/temp_unix.go | |||
@@ -0,0 +1,10 @@ | |||
1 | // +build !windows | ||
2 | |||
3 | package ioutils | ||
4 | |||
5 | import "io/ioutil" | ||
6 | |||
7 | // TempDir on Unix systems is equivalent to ioutil.TempDir. | ||
8 | func TempDir(dir, prefix string) (string, error) { | ||
9 | return ioutil.TempDir(dir, prefix) | ||
10 | } | ||
diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/temp_windows.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/temp_windows.go new file mode 100644 index 0000000..72c0bc5 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/temp_windows.go | |||
@@ -0,0 +1,18 @@ | |||
1 | // +build windows | ||
2 | |||
3 | package ioutils | ||
4 | |||
5 | import ( | ||
6 | "io/ioutil" | ||
7 | |||
8 | "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/longpath" | ||
9 | ) | ||
10 | |||
11 | // TempDir is the equivalent of ioutil.TempDir, except that the result is in Windows longpath format. | ||
12 | func TempDir(dir, prefix string) (string, error) { | ||
13 | tempDir, err := ioutil.TempDir(dir, prefix) | ||
14 | if err != nil { | ||
15 | return "", err | ||
16 | } | ||
17 | return longpath.AddPrefix(tempDir), nil | ||
18 | } | ||
diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/writeflusher.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/writeflusher.go new file mode 100644 index 0000000..2b35a26 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/writeflusher.go | |||
@@ -0,0 +1,92 @@ | |||
1 | package ioutils | ||
2 | |||
3 | import ( | ||
4 | "errors" | ||
5 | "io" | ||
6 | "net/http" | ||
7 | "sync" | ||
8 | ) | ||
9 | |||
10 | // WriteFlusher wraps the Write and Flush operation ensuring that every write | ||
11 | // is a flush. In addition, the Close method can be called to intercept | ||
12 | // Read/Write calls if the targets lifecycle has already ended. | ||
13 | type WriteFlusher struct { | ||
14 | mu sync.Mutex | ||
15 | w io.Writer | ||
16 | flusher http.Flusher | ||
17 | flushed bool | ||
18 | closed error | ||
19 | |||
20 | // TODO(stevvooe): Use channel for closed instead, remove mutex. Using a | ||
21 | // channel will allow one to properly order the operations. | ||
22 | } | ||
23 | |||
24 | var errWriteFlusherClosed = errors.New("writeflusher: closed") | ||
25 | |||
26 | func (wf *WriteFlusher) Write(b []byte) (n int, err error) { | ||
27 | wf.mu.Lock() | ||
28 | defer wf.mu.Unlock() | ||
29 | if wf.closed != nil { | ||
30 | return 0, wf.closed | ||
31 | } | ||
32 | |||
33 | n, err = wf.w.Write(b) | ||
34 | wf.flush() // every write is a flush. | ||
35 | return n, err | ||
36 | } | ||
37 | |||
38 | // Flush the stream immediately. | ||
39 | func (wf *WriteFlusher) Flush() { | ||
40 | wf.mu.Lock() | ||
41 | defer wf.mu.Unlock() | ||
42 | |||
43 | wf.flush() | ||
44 | } | ||
45 | |||
46 | // flush the stream immediately without taking a lock. Used internally. | ||
47 | func (wf *WriteFlusher) flush() { | ||
48 | if wf.closed != nil { | ||
49 | return | ||
50 | } | ||
51 | |||
52 | wf.flushed = true | ||
53 | wf.flusher.Flush() | ||
54 | } | ||
55 | |||
56 | // Flushed returns the state of flushed. | ||
57 | // If it's flushed, return true, or else it return false. | ||
58 | func (wf *WriteFlusher) Flushed() bool { | ||
59 | // BUG(stevvooe): Remove this method. Its use is inherently racy. Seems to | ||
60 | // be used to detect whether or a response code has been issued or not. | ||
61 | // Another hook should be used instead. | ||
62 | wf.mu.Lock() | ||
63 | defer wf.mu.Unlock() | ||
64 | |||
65 | return wf.flushed | ||
66 | } | ||
67 | |||
68 | // Close closes the write flusher, disallowing any further writes to the | ||
69 | // target. After the flusher is closed, all calls to write or flush will | ||
70 | // result in an error. | ||
71 | func (wf *WriteFlusher) Close() error { | ||
72 | wf.mu.Lock() | ||
73 | defer wf.mu.Unlock() | ||
74 | |||
75 | if wf.closed != nil { | ||
76 | return wf.closed | ||
77 | } | ||
78 | |||
79 | wf.closed = errWriteFlusherClosed | ||
80 | return nil | ||
81 | } | ||
82 | |||
83 | // NewWriteFlusher returns a new WriteFlusher. | ||
84 | func NewWriteFlusher(w io.Writer) *WriteFlusher { | ||
85 | var flusher http.Flusher | ||
86 | if f, ok := w.(http.Flusher); ok { | ||
87 | flusher = f | ||
88 | } else { | ||
89 | flusher = &NopFlusher{} | ||
90 | } | ||
91 | return &WriteFlusher{w: w, flusher: flusher} | ||
92 | } | ||
diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/writers.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/writers.go new file mode 100644 index 0000000..ccc7f9c --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/writers.go | |||
@@ -0,0 +1,66 @@ | |||
1 | package ioutils | ||
2 | |||
3 | import "io" | ||
4 | |||
5 | // NopWriter represents a type which write operation is nop. | ||
6 | type NopWriter struct{} | ||
7 | |||
8 | func (*NopWriter) Write(buf []byte) (int, error) { | ||
9 | return len(buf), nil | ||
10 | } | ||
11 | |||
12 | type nopWriteCloser struct { | ||
13 | io.Writer | ||
14 | } | ||
15 | |||
16 | func (w *nopWriteCloser) Close() error { return nil } | ||
17 | |||
18 | // NopWriteCloser returns a nopWriteCloser. | ||
19 | func NopWriteCloser(w io.Writer) io.WriteCloser { | ||
20 | return &nopWriteCloser{w} | ||
21 | } | ||
22 | |||
23 | // NopFlusher represents a type which flush operation is nop. | ||
24 | type NopFlusher struct{} | ||
25 | |||
26 | // Flush is a nop operation. | ||
27 | func (f *NopFlusher) Flush() {} | ||
28 | |||
29 | type writeCloserWrapper struct { | ||
30 | io.Writer | ||
31 | closer func() error | ||
32 | } | ||
33 | |||
34 | func (r *writeCloserWrapper) Close() error { | ||
35 | return r.closer() | ||
36 | } | ||
37 | |||
38 | // NewWriteCloserWrapper returns a new io.WriteCloser. | ||
39 | func NewWriteCloserWrapper(r io.Writer, closer func() error) io.WriteCloser { | ||
40 | return &writeCloserWrapper{ | ||
41 | Writer: r, | ||
42 | closer: closer, | ||
43 | } | ||
44 | } | ||
45 | |||
46 | // WriteCounter wraps a concrete io.Writer and hold a count of the number | ||
47 | // of bytes written to the writer during a "session". | ||
48 | // This can be convenient when write return is masked | ||
49 | // (e.g., json.Encoder.Encode()) | ||
50 | type WriteCounter struct { | ||
51 | Count int64 | ||
52 | Writer io.Writer | ||
53 | } | ||
54 | |||
55 | // NewWriteCounter returns a new WriteCounter. | ||
56 | func NewWriteCounter(w io.Writer) *WriteCounter { | ||
57 | return &WriteCounter{ | ||
58 | Writer: w, | ||
59 | } | ||
60 | } | ||
61 | |||
62 | func (wc *WriteCounter) Write(p []byte) (count int, err error) { | ||
63 | count, err = wc.Writer.Write(p) | ||
64 | wc.Count += int64(count) | ||
65 | return | ||
66 | } | ||
diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/longpath/longpath.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/longpath/longpath.go new file mode 100644 index 0000000..9b15bff --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/longpath/longpath.go | |||
@@ -0,0 +1,26 @@ | |||
1 | // longpath introduces some constants and helper functions for handling long paths | ||
2 | // in Windows, which are expected to be prepended with `\\?\` and followed by either | ||
3 | // a drive letter, a UNC server\share, or a volume identifier. | ||
4 | |||
5 | package longpath | ||
6 | |||
7 | import ( | ||
8 | "strings" | ||
9 | ) | ||
10 | |||
11 | // Prefix is the longpath prefix for Windows file paths. | ||
12 | const Prefix = `\\?\` | ||
13 | |||
14 | // AddPrefix will add the Windows long path prefix to the path provided if | ||
15 | // it does not already have it. | ||
16 | func AddPrefix(path string) string { | ||
17 | if !strings.HasPrefix(path, Prefix) { | ||
18 | if strings.HasPrefix(path, `\\`) { | ||
19 | // This is a UNC path, so we need to add 'UNC' to the path as well. | ||
20 | path = Prefix + `UNC` + path[1:] | ||
21 | } else { | ||
22 | path = Prefix + path | ||
23 | } | ||
24 | } | ||
25 | return path | ||
26 | } | ||
diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/pools/pools.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/pools/pools.go new file mode 100644 index 0000000..515fb4d --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/pools/pools.go | |||
@@ -0,0 +1,119 @@ | |||
1 | // Package pools provides a collection of pools which provide various | ||
2 | // data types with buffers. These can be used to lower the number of | ||
3 | // memory allocations and reuse buffers. | ||
4 | // | ||
5 | // New pools should be added to this package to allow them to be | ||
6 | // shared across packages. | ||
7 | // | ||
8 | // Utility functions which operate on pools should be added to this | ||
9 | // package to allow them to be reused. | ||
10 | package pools | ||
11 | |||
12 | import ( | ||
13 | "bufio" | ||
14 | "io" | ||
15 | "sync" | ||
16 | |||
17 | "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils" | ||
18 | ) | ||
19 | |||
20 | var ( | ||
21 | // BufioReader32KPool is a pool which returns bufio.Reader with a 32K buffer. | ||
22 | BufioReader32KPool *BufioReaderPool | ||
23 | // BufioWriter32KPool is a pool which returns bufio.Writer with a 32K buffer. | ||
24 | BufioWriter32KPool *BufioWriterPool | ||
25 | ) | ||
26 | |||
27 | const buffer32K = 32 * 1024 | ||
28 | |||
29 | // BufioReaderPool is a bufio reader that uses sync.Pool. | ||
30 | type BufioReaderPool struct { | ||
31 | pool sync.Pool | ||
32 | } | ||
33 | |||
34 | func init() { | ||
35 | BufioReader32KPool = newBufioReaderPoolWithSize(buffer32K) | ||
36 | BufioWriter32KPool = newBufioWriterPoolWithSize(buffer32K) | ||
37 | } | ||
38 | |||
39 | // newBufioReaderPoolWithSize is unexported because new pools should be | ||
40 | // added here to be shared where required. | ||
41 | func newBufioReaderPoolWithSize(size int) *BufioReaderPool { | ||
42 | pool := sync.Pool{ | ||
43 | New: func() interface{} { return bufio.NewReaderSize(nil, size) }, | ||
44 | } | ||
45 | return &BufioReaderPool{pool: pool} | ||
46 | } | ||
47 | |||
48 | // Get returns a bufio.Reader which reads from r. The buffer size is that of the pool. | ||
49 | func (bufPool *BufioReaderPool) Get(r io.Reader) *bufio.Reader { | ||
50 | buf := bufPool.pool.Get().(*bufio.Reader) | ||
51 | buf.Reset(r) | ||
52 | return buf | ||
53 | } | ||
54 | |||
55 | // Put puts the bufio.Reader back into the pool. | ||
56 | func (bufPool *BufioReaderPool) Put(b *bufio.Reader) { | ||
57 | b.Reset(nil) | ||
58 | bufPool.pool.Put(b) | ||
59 | } | ||
60 | |||
61 | // Copy is a convenience wrapper which uses a buffer to avoid allocation in io.Copy. | ||
62 | func Copy(dst io.Writer, src io.Reader) (written int64, err error) { | ||
63 | buf := BufioReader32KPool.Get(src) | ||
64 | written, err = io.Copy(dst, buf) | ||
65 | BufioReader32KPool.Put(buf) | ||
66 | return | ||
67 | } | ||
68 | |||
69 | // NewReadCloserWrapper returns a wrapper which puts the bufio.Reader back | ||
70 | // into the pool and closes the reader if it's an io.ReadCloser. | ||
71 | func (bufPool *BufioReaderPool) NewReadCloserWrapper(buf *bufio.Reader, r io.Reader) io.ReadCloser { | ||
72 | return ioutils.NewReadCloserWrapper(r, func() error { | ||
73 | if readCloser, ok := r.(io.ReadCloser); ok { | ||
74 | readCloser.Close() | ||
75 | } | ||
76 | bufPool.Put(buf) | ||
77 | return nil | ||
78 | }) | ||
79 | } | ||
80 | |||
81 | // BufioWriterPool is a bufio writer that uses sync.Pool. | ||
82 | type BufioWriterPool struct { | ||
83 | pool sync.Pool | ||
84 | } | ||
85 | |||
86 | // newBufioWriterPoolWithSize is unexported because new pools should be | ||
87 | // added here to be shared where required. | ||
88 | func newBufioWriterPoolWithSize(size int) *BufioWriterPool { | ||
89 | pool := sync.Pool{ | ||
90 | New: func() interface{} { return bufio.NewWriterSize(nil, size) }, | ||
91 | } | ||
92 | return &BufioWriterPool{pool: pool} | ||
93 | } | ||
94 | |||
95 | // Get returns a bufio.Writer which writes to w. The buffer size is that of the pool. | ||
96 | func (bufPool *BufioWriterPool) Get(w io.Writer) *bufio.Writer { | ||
97 | buf := bufPool.pool.Get().(*bufio.Writer) | ||
98 | buf.Reset(w) | ||
99 | return buf | ||
100 | } | ||
101 | |||
102 | // Put puts the bufio.Writer back into the pool. | ||
103 | func (bufPool *BufioWriterPool) Put(b *bufio.Writer) { | ||
104 | b.Reset(nil) | ||
105 | bufPool.pool.Put(b) | ||
106 | } | ||
107 | |||
108 | // NewWriteCloserWrapper returns a wrapper which puts the bufio.Writer back | ||
109 | // into the pool and closes the writer if it's an io.Writecloser. | ||
110 | func (bufPool *BufioWriterPool) NewWriteCloserWrapper(buf *bufio.Writer, w io.Writer) io.WriteCloser { | ||
111 | return ioutils.NewWriteCloserWrapper(w, func() error { | ||
112 | buf.Flush() | ||
113 | if writeCloser, ok := w.(io.WriteCloser); ok { | ||
114 | writeCloser.Close() | ||
115 | } | ||
116 | bufPool.Put(buf) | ||
117 | return nil | ||
118 | }) | ||
119 | } | ||
diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/promise/promise.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/promise/promise.go new file mode 100644 index 0000000..dd52b90 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/promise/promise.go | |||
@@ -0,0 +1,11 @@ | |||
1 | package promise | ||
2 | |||
3 | // Go is a basic promise implementation: it wraps calls a function in a goroutine, | ||
4 | // and returns a channel which will later return the function's return value. | ||
5 | func Go(f func() error) chan error { | ||
6 | ch := make(chan error, 1) | ||
7 | go func() { | ||
8 | ch <- f() | ||
9 | }() | ||
10 | return ch | ||
11 | } | ||
diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/stdcopy/stdcopy.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/stdcopy/stdcopy.go new file mode 100644 index 0000000..b2c6004 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/stdcopy/stdcopy.go | |||
@@ -0,0 +1,175 @@ | |||
1 | package stdcopy | ||
2 | |||
3 | import ( | ||
4 | "encoding/binary" | ||
5 | "errors" | ||
6 | "io" | ||
7 | |||
8 | "github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus" | ||
9 | ) | ||
10 | |||
11 | const ( | ||
12 | stdWriterPrefixLen = 8 | ||
13 | stdWriterFdIndex = 0 | ||
14 | stdWriterSizeIndex = 4 | ||
15 | |||
16 | startingBufLen = 32*1024 + stdWriterPrefixLen + 1 | ||
17 | ) | ||
18 | |||
19 | // StdType prefixes type and length to standard stream. | ||
20 | type StdType [stdWriterPrefixLen]byte | ||
21 | |||
22 | var ( | ||
23 | // Stdin represents standard input stream type. | ||
24 | Stdin = StdType{0: 0} | ||
25 | // Stdout represents standard output stream type. | ||
26 | Stdout = StdType{0: 1} | ||
27 | // Stderr represents standard error steam type. | ||
28 | Stderr = StdType{0: 2} | ||
29 | ) | ||
30 | |||
31 | // StdWriter is wrapper of io.Writer with extra customized info. | ||
32 | type StdWriter struct { | ||
33 | io.Writer | ||
34 | prefix StdType | ||
35 | sizeBuf []byte | ||
36 | } | ||
37 | |||
38 | func (w *StdWriter) Write(buf []byte) (n int, err error) { | ||
39 | var n1, n2 int | ||
40 | if w == nil || w.Writer == nil { | ||
41 | return 0, errors.New("Writer not instantiated") | ||
42 | } | ||
43 | binary.BigEndian.PutUint32(w.prefix[4:], uint32(len(buf))) | ||
44 | n1, err = w.Writer.Write(w.prefix[:]) | ||
45 | if err != nil { | ||
46 | n = n1 - stdWriterPrefixLen | ||
47 | } else { | ||
48 | n2, err = w.Writer.Write(buf) | ||
49 | n = n1 + n2 - stdWriterPrefixLen | ||
50 | } | ||
51 | if n < 0 { | ||
52 | n = 0 | ||
53 | } | ||
54 | return | ||
55 | } | ||
56 | |||
57 | // NewStdWriter instantiates a new Writer. | ||
58 | // Everything written to it will be encapsulated using a custom format, | ||
59 | // and written to the underlying `w` stream. | ||
60 | // This allows multiple write streams (e.g. stdout and stderr) to be muxed into a single connection. | ||
61 | // `t` indicates the id of the stream to encapsulate. | ||
62 | // It can be stdcopy.Stdin, stdcopy.Stdout, stdcopy.Stderr. | ||
63 | func NewStdWriter(w io.Writer, t StdType) *StdWriter { | ||
64 | return &StdWriter{ | ||
65 | Writer: w, | ||
66 | prefix: t, | ||
67 | sizeBuf: make([]byte, 4), | ||
68 | } | ||
69 | } | ||
70 | |||
71 | var errInvalidStdHeader = errors.New("Unrecognized input header") | ||
72 | |||
73 | // StdCopy is a modified version of io.Copy. | ||
74 | // | ||
75 | // StdCopy will demultiplex `src`, assuming that it contains two streams, | ||
76 | // previously multiplexed together using a StdWriter instance. | ||
77 | // As it reads from `src`, StdCopy will write to `dstout` and `dsterr`. | ||
78 | // | ||
79 | // StdCopy will read until it hits EOF on `src`. It will then return a nil error. | ||
80 | // In other words: if `err` is non nil, it indicates a real underlying error. | ||
81 | // | ||
82 | // `written` will hold the total number of bytes written to `dstout` and `dsterr`. | ||
83 | func StdCopy(dstout, dsterr io.Writer, src io.Reader) (written int64, err error) { | ||
84 | var ( | ||
85 | buf = make([]byte, startingBufLen) | ||
86 | bufLen = len(buf) | ||
87 | nr, nw int | ||
88 | er, ew error | ||
89 | out io.Writer | ||
90 | frameSize int | ||
91 | ) | ||
92 | |||
93 | for { | ||
94 | // Make sure we have at least a full header | ||
95 | for nr < stdWriterPrefixLen { | ||
96 | var nr2 int | ||
97 | nr2, er = src.Read(buf[nr:]) | ||
98 | nr += nr2 | ||
99 | if er == io.EOF { | ||
100 | if nr < stdWriterPrefixLen { | ||
101 | logrus.Debugf("Corrupted prefix: %v", buf[:nr]) | ||
102 | return written, nil | ||
103 | } | ||
104 | break | ||
105 | } | ||
106 | if er != nil { | ||
107 | logrus.Debugf("Error reading header: %s", er) | ||
108 | return 0, er | ||
109 | } | ||
110 | } | ||
111 | |||
112 | // Check the first byte to know where to write | ||
113 | switch buf[stdWriterFdIndex] { | ||
114 | case 0: | ||
115 | fallthrough | ||
116 | case 1: | ||
117 | // Write on stdout | ||
118 | out = dstout | ||
119 | case 2: | ||
120 | // Write on stderr | ||
121 | out = dsterr | ||
122 | default: | ||
123 | logrus.Debugf("Error selecting output fd: (%d)", buf[stdWriterFdIndex]) | ||
124 | return 0, errInvalidStdHeader | ||
125 | } | ||
126 | |||
127 | // Retrieve the size of the frame | ||
128 | frameSize = int(binary.BigEndian.Uint32(buf[stdWriterSizeIndex : stdWriterSizeIndex+4])) | ||
129 | logrus.Debugf("framesize: %d", frameSize) | ||
130 | |||
131 | // Check if the buffer is big enough to read the frame. | ||
132 | // Extend it if necessary. | ||
133 | if frameSize+stdWriterPrefixLen > bufLen { | ||
134 | logrus.Debugf("Extending buffer cap by %d (was %d)", frameSize+stdWriterPrefixLen-bufLen+1, len(buf)) | ||
135 | buf = append(buf, make([]byte, frameSize+stdWriterPrefixLen-bufLen+1)...) | ||
136 | bufLen = len(buf) | ||
137 | } | ||
138 | |||
139 | // While the amount of bytes read is less than the size of the frame + header, we keep reading | ||
140 | for nr < frameSize+stdWriterPrefixLen { | ||
141 | var nr2 int | ||
142 | nr2, er = src.Read(buf[nr:]) | ||
143 | nr += nr2 | ||
144 | if er == io.EOF { | ||
145 | if nr < frameSize+stdWriterPrefixLen { | ||
146 | logrus.Debugf("Corrupted frame: %v", buf[stdWriterPrefixLen:nr]) | ||
147 | return written, nil | ||
148 | } | ||
149 | break | ||
150 | } | ||
151 | if er != nil { | ||
152 | logrus.Debugf("Error reading frame: %s", er) | ||
153 | return 0, er | ||
154 | } | ||
155 | } | ||
156 | |||
157 | // Write the retrieved frame (without header) | ||
158 | nw, ew = out.Write(buf[stdWriterPrefixLen : frameSize+stdWriterPrefixLen]) | ||
159 | if ew != nil { | ||
160 | logrus.Debugf("Error writing frame: %s", ew) | ||
161 | return 0, ew | ||
162 | } | ||
163 | // If the frame has not been fully written: error | ||
164 | if nw != frameSize { | ||
165 | logrus.Debugf("Error Short Write: (%d on %d)", nw, frameSize) | ||
166 | return 0, io.ErrShortWrite | ||
167 | } | ||
168 | written += int64(nw) | ||
169 | |||
170 | // Move the rest of the buffer to the beginning | ||
171 | copy(buf, buf[frameSize+stdWriterPrefixLen:]) | ||
172 | // Move the index | ||
173 | nr -= frameSize + stdWriterPrefixLen | ||
174 | } | ||
175 | } | ||
diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/chtimes.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/chtimes.go new file mode 100644 index 0000000..acf3f56 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/chtimes.go | |||
@@ -0,0 +1,47 @@ | |||
1 | package system | ||
2 | |||
3 | import ( | ||
4 | "os" | ||
5 | "syscall" | ||
6 | "time" | ||
7 | "unsafe" | ||
8 | ) | ||
9 | |||
10 | var ( | ||
11 | maxTime time.Time | ||
12 | ) | ||
13 | |||
14 | func init() { | ||
15 | if unsafe.Sizeof(syscall.Timespec{}.Nsec) == 8 { | ||
16 | // This is a 64 bit timespec | ||
17 | // os.Chtimes limits time to the following | ||
18 | maxTime = time.Unix(0, 1<<63-1) | ||
19 | } else { | ||
20 | // This is a 32 bit timespec | ||
21 | maxTime = time.Unix(1<<31-1, 0) | ||
22 | } | ||
23 | } | ||
24 | |||
25 | // Chtimes changes the access time and modified time of a file at the given path | ||
26 | func Chtimes(name string, atime time.Time, mtime time.Time) error { | ||
27 | unixMinTime := time.Unix(0, 0) | ||
28 | unixMaxTime := maxTime | ||
29 | |||
30 | // If the modified time is prior to the Unix Epoch, or after the | ||
31 | // end of Unix Time, os.Chtimes has undefined behavior | ||
32 | // default to Unix Epoch in this case, just in case | ||
33 | |||
34 | if atime.Before(unixMinTime) || atime.After(unixMaxTime) { | ||
35 | atime = unixMinTime | ||
36 | } | ||
37 | |||
38 | if mtime.Before(unixMinTime) || mtime.After(unixMaxTime) { | ||
39 | mtime = unixMinTime | ||
40 | } | ||
41 | |||
42 | if err := os.Chtimes(name, atime, mtime); err != nil { | ||
43 | return err | ||
44 | } | ||
45 | |||
46 | return nil | ||
47 | } | ||
diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/chtimes_unix.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/chtimes_unix.go new file mode 100644 index 0000000..09d58bc --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/chtimes_unix.go | |||
@@ -0,0 +1,14 @@ | |||
1 | // +build !windows | ||
2 | |||
3 | package system | ||
4 | |||
5 | import ( | ||
6 | "time" | ||
7 | ) | ||
8 | |||
9 | //setCTime will set the create time on a file. On Unix, the create | ||
10 | //time is updated as a side effect of setting the modified time, so | ||
11 | //no action is required. | ||
12 | func setCTime(path string, ctime time.Time) error { | ||
13 | return nil | ||
14 | } | ||
diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/chtimes_windows.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/chtimes_windows.go new file mode 100644 index 0000000..2945868 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/chtimes_windows.go | |||
@@ -0,0 +1,27 @@ | |||
1 | // +build windows | ||
2 | |||
3 | package system | ||
4 | |||
5 | import ( | ||
6 | "syscall" | ||
7 | "time" | ||
8 | ) | ||
9 | |||
10 | //setCTime will set the create time on a file. On Windows, this requires | ||
11 | //calling SetFileTime and explicitly including the create time. | ||
12 | func setCTime(path string, ctime time.Time) error { | ||
13 | ctimespec := syscall.NsecToTimespec(ctime.UnixNano()) | ||
14 | pathp, e := syscall.UTF16PtrFromString(path) | ||
15 | if e != nil { | ||
16 | return e | ||
17 | } | ||
18 | h, e := syscall.CreateFile(pathp, | ||
19 | syscall.FILE_WRITE_ATTRIBUTES, syscall.FILE_SHARE_WRITE, nil, | ||
20 | syscall.OPEN_EXISTING, syscall.FILE_FLAG_BACKUP_SEMANTICS, 0) | ||
21 | if e != nil { | ||
22 | return e | ||
23 | } | ||
24 | defer syscall.Close(h) | ||
25 | c := syscall.NsecToFiletime(syscall.TimespecToNsec(ctimespec)) | ||
26 | return syscall.SetFileTime(h, &c, nil, nil) | ||
27 | } | ||
diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/errors.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/errors.go new file mode 100644 index 0000000..2883189 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/errors.go | |||
@@ -0,0 +1,10 @@ | |||
1 | package system | ||
2 | |||
3 | import ( | ||
4 | "errors" | ||
5 | ) | ||
6 | |||
7 | var ( | ||
8 | // ErrNotSupportedPlatform means the platform is not supported. | ||
9 | ErrNotSupportedPlatform = errors.New("platform and architecture is not supported") | ||
10 | ) | ||
diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/events_windows.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/events_windows.go new file mode 100644 index 0000000..04e2de7 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/events_windows.go | |||
@@ -0,0 +1,83 @@ | |||
1 | package system | ||
2 | |||
3 | // This file implements syscalls for Win32 events which are not implemented | ||
4 | // in golang. | ||
5 | |||
6 | import ( | ||
7 | "syscall" | ||
8 | "unsafe" | ||
9 | ) | ||
10 | |||
11 | var ( | ||
12 | procCreateEvent = modkernel32.NewProc("CreateEventW") | ||
13 | procOpenEvent = modkernel32.NewProc("OpenEventW") | ||
14 | procSetEvent = modkernel32.NewProc("SetEvent") | ||
15 | procResetEvent = modkernel32.NewProc("ResetEvent") | ||
16 | procPulseEvent = modkernel32.NewProc("PulseEvent") | ||
17 | ) | ||
18 | |||
19 | // CreateEvent implements win32 CreateEventW func in golang. It will create an event object. | ||
20 | func CreateEvent(eventAttributes *syscall.SecurityAttributes, manualReset bool, initialState bool, name string) (handle syscall.Handle, err error) { | ||
21 | namep, _ := syscall.UTF16PtrFromString(name) | ||
22 | var _p1 uint32 | ||
23 | if manualReset { | ||
24 | _p1 = 1 | ||
25 | } | ||
26 | var _p2 uint32 | ||
27 | if initialState { | ||
28 | _p2 = 1 | ||
29 | } | ||
30 | r0, _, e1 := procCreateEvent.Call(uintptr(unsafe.Pointer(eventAttributes)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(namep))) | ||
31 | use(unsafe.Pointer(namep)) | ||
32 | handle = syscall.Handle(r0) | ||
33 | if handle == syscall.InvalidHandle { | ||
34 | err = e1 | ||
35 | } | ||
36 | return | ||
37 | } | ||
38 | |||
39 | // OpenEvent implements win32 OpenEventW func in golang. It opens an event object. | ||
40 | func OpenEvent(desiredAccess uint32, inheritHandle bool, name string) (handle syscall.Handle, err error) { | ||
41 | namep, _ := syscall.UTF16PtrFromString(name) | ||
42 | var _p1 uint32 | ||
43 | if inheritHandle { | ||
44 | _p1 = 1 | ||
45 | } | ||
46 | r0, _, e1 := procOpenEvent.Call(uintptr(desiredAccess), uintptr(_p1), uintptr(unsafe.Pointer(namep))) | ||
47 | use(unsafe.Pointer(namep)) | ||
48 | handle = syscall.Handle(r0) | ||
49 | if handle == syscall.InvalidHandle { | ||
50 | err = e1 | ||
51 | } | ||
52 | return | ||
53 | } | ||
54 | |||
55 | // SetEvent implements win32 SetEvent func in golang. | ||
56 | func SetEvent(handle syscall.Handle) (err error) { | ||
57 | return setResetPulse(handle, procSetEvent) | ||
58 | } | ||
59 | |||
60 | // ResetEvent implements win32 ResetEvent func in golang. | ||
61 | func ResetEvent(handle syscall.Handle) (err error) { | ||
62 | return setResetPulse(handle, procResetEvent) | ||
63 | } | ||
64 | |||
65 | // PulseEvent implements win32 PulseEvent func in golang. | ||
66 | func PulseEvent(handle syscall.Handle) (err error) { | ||
67 | return setResetPulse(handle, procPulseEvent) | ||
68 | } | ||
69 | |||
70 | func setResetPulse(handle syscall.Handle, proc *syscall.LazyProc) (err error) { | ||
71 | r0, _, _ := proc.Call(uintptr(handle)) | ||
72 | if r0 != 0 { | ||
73 | err = syscall.Errno(r0) | ||
74 | } | ||
75 | return | ||
76 | } | ||
77 | |||
78 | var temp unsafe.Pointer | ||
79 | |||
80 | // use ensures a variable is kept alive without the GC freeing while still needed | ||
81 | func use(p unsafe.Pointer) { | ||
82 | temp = p | ||
83 | } | ||
diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/filesys.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/filesys.go new file mode 100644 index 0000000..c14feb8 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/filesys.go | |||
@@ -0,0 +1,19 @@ | |||
1 | // +build !windows | ||
2 | |||
3 | package system | ||
4 | |||
5 | import ( | ||
6 | "os" | ||
7 | "path/filepath" | ||
8 | ) | ||
9 | |||
10 | // MkdirAll creates a directory named path along with any necessary parents, | ||
11 | // with permission specified by attribute perm for all dir created. | ||
12 | func MkdirAll(path string, perm os.FileMode) error { | ||
13 | return os.MkdirAll(path, perm) | ||
14 | } | ||
15 | |||
16 | // IsAbs is a platform-specific wrapper for filepath.IsAbs. | ||
17 | func IsAbs(path string) bool { | ||
18 | return filepath.IsAbs(path) | ||
19 | } | ||
diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/filesys_windows.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/filesys_windows.go new file mode 100644 index 0000000..16823d5 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/filesys_windows.go | |||
@@ -0,0 +1,82 @@ | |||
1 | // +build windows | ||
2 | |||
3 | package system | ||
4 | |||
5 | import ( | ||
6 | "os" | ||
7 | "path/filepath" | ||
8 | "regexp" | ||
9 | "strings" | ||
10 | "syscall" | ||
11 | ) | ||
12 | |||
13 | // MkdirAll implementation that is volume path aware for Windows. | ||
14 | func MkdirAll(path string, perm os.FileMode) error { | ||
15 | if re := regexp.MustCompile(`^\\\\\?\\Volume{[a-z0-9-]+}$`); re.MatchString(path) { | ||
16 | return nil | ||
17 | } | ||
18 | |||
19 | // The rest of this method is copied from os.MkdirAll and should be kept | ||
20 | // as-is to ensure compatibility. | ||
21 | |||
22 | // Fast path: if we can tell whether path is a directory or file, stop with success or error. | ||
23 | dir, err := os.Stat(path) | ||
24 | if err == nil { | ||
25 | if dir.IsDir() { | ||
26 | return nil | ||
27 | } | ||
28 | return &os.PathError{ | ||
29 | Op: "mkdir", | ||
30 | Path: path, | ||
31 | Err: syscall.ENOTDIR, | ||
32 | } | ||
33 | } | ||
34 | |||
35 | // Slow path: make sure parent exists and then call Mkdir for path. | ||
36 | i := len(path) | ||
37 | for i > 0 && os.IsPathSeparator(path[i-1]) { // Skip trailing path separator. | ||
38 | i-- | ||
39 | } | ||
40 | |||
41 | j := i | ||
42 | for j > 0 && !os.IsPathSeparator(path[j-1]) { // Scan backward over element. | ||
43 | j-- | ||
44 | } | ||
45 | |||
46 | if j > 1 { | ||
47 | // Create parent | ||
48 | err = MkdirAll(path[0:j-1], perm) | ||
49 | if err != nil { | ||
50 | return err | ||
51 | } | ||
52 | } | ||
53 | |||
54 | // Parent now exists; invoke Mkdir and use its result. | ||
55 | err = os.Mkdir(path, perm) | ||
56 | if err != nil { | ||
57 | // Handle arguments like "foo/." by | ||
58 | // double-checking that directory doesn't exist. | ||
59 | dir, err1 := os.Lstat(path) | ||
60 | if err1 == nil && dir.IsDir() { | ||
61 | return nil | ||
62 | } | ||
63 | return err | ||
64 | } | ||
65 | return nil | ||
66 | } | ||
67 | |||
68 | // IsAbs is a platform-specific wrapper for filepath.IsAbs. On Windows, | ||
69 | // golang filepath.IsAbs does not consider a path \windows\system32 as absolute | ||
70 | // as it doesn't start with a drive-letter/colon combination. However, in | ||
71 | // docker we need to verify things such as WORKDIR /windows/system32 in | ||
72 | // a Dockerfile (which gets translated to \windows\system32 when being processed | ||
73 | // by the daemon. This SHOULD be treated as absolute from a docker processing | ||
74 | // perspective. | ||
75 | func IsAbs(path string) bool { | ||
76 | if !filepath.IsAbs(path) { | ||
77 | if !strings.HasPrefix(path, string(os.PathSeparator)) { | ||
78 | return false | ||
79 | } | ||
80 | } | ||
81 | return true | ||
82 | } | ||
diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/lstat.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/lstat.go new file mode 100644 index 0000000..bd23c4d --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/lstat.go | |||
@@ -0,0 +1,19 @@ | |||
1 | // +build !windows | ||
2 | |||
3 | package system | ||
4 | |||
5 | import ( | ||
6 | "syscall" | ||
7 | ) | ||
8 | |||
9 | // Lstat takes a path to a file and returns | ||
10 | // a system.StatT type pertaining to that file. | ||
11 | // | ||
12 | // Throws an error if the file does not exist | ||
13 | func Lstat(path string) (*StatT, error) { | ||
14 | s := &syscall.Stat_t{} | ||
15 | if err := syscall.Lstat(path, s); err != nil { | ||
16 | return nil, err | ||
17 | } | ||
18 | return fromStatT(s) | ||
19 | } | ||
diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/lstat_windows.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/lstat_windows.go new file mode 100644 index 0000000..49e87eb --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/lstat_windows.go | |||
@@ -0,0 +1,25 @@ | |||
1 | // +build windows | ||
2 | |||
3 | package system | ||
4 | |||
5 | import ( | ||
6 | "os" | ||
7 | ) | ||
8 | |||
9 | // Lstat calls os.Lstat to get a fileinfo interface back. | ||
10 | // This is then copied into our own locally defined structure. | ||
11 | // Note the Linux version uses fromStatT to do the copy back, | ||
12 | // but that not strictly necessary when already in an OS specific module. | ||
13 | func Lstat(path string) (*StatT, error) { | ||
14 | fi, err := os.Lstat(path) | ||
15 | if err != nil { | ||
16 | return nil, err | ||
17 | } | ||
18 | |||
19 | return &StatT{ | ||
20 | name: fi.Name(), | ||
21 | size: fi.Size(), | ||
22 | mode: fi.Mode(), | ||
23 | modTime: fi.ModTime(), | ||
24 | isDir: fi.IsDir()}, nil | ||
25 | } | ||
diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/meminfo.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/meminfo.go new file mode 100644 index 0000000..3b6e947 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/meminfo.go | |||
@@ -0,0 +1,17 @@ | |||
1 | package system | ||
2 | |||
3 | // MemInfo contains memory statistics of the host system. | ||
4 | type MemInfo struct { | ||
5 | // Total usable RAM (i.e. physical RAM minus a few reserved bits and the | ||
6 | // kernel binary code). | ||
7 | MemTotal int64 | ||
8 | |||
9 | // Amount of free memory. | ||
10 | MemFree int64 | ||
11 | |||
12 | // Total amount of swap space available. | ||
13 | SwapTotal int64 | ||
14 | |||
15 | // Amount of swap space that is currently unused. | ||
16 | SwapFree int64 | ||
17 | } | ||
diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/meminfo_linux.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/meminfo_linux.go new file mode 100644 index 0000000..c14dbf3 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/meminfo_linux.go | |||
@@ -0,0 +1,66 @@ | |||
1 | package system | ||
2 | |||
3 | import ( | ||
4 | "bufio" | ||
5 | "io" | ||
6 | "os" | ||
7 | "strconv" | ||
8 | "strings" | ||
9 | |||
10 | "github.com/fsouza/go-dockerclient/external/github.com/docker/go-units" | ||
11 | ) | ||
12 | |||
13 | // ReadMemInfo retrieves memory statistics of the host system and returns a | ||
14 | // MemInfo type. | ||
15 | func ReadMemInfo() (*MemInfo, error) { | ||
16 | file, err := os.Open("/proc/meminfo") | ||
17 | if err != nil { | ||
18 | return nil, err | ||
19 | } | ||
20 | defer file.Close() | ||
21 | return parseMemInfo(file) | ||
22 | } | ||
23 | |||
24 | // parseMemInfo parses the /proc/meminfo file into | ||
25 | // a MemInfo object given a io.Reader to the file. | ||
26 | // | ||
27 | // Throws error if there are problems reading from the file | ||
28 | func parseMemInfo(reader io.Reader) (*MemInfo, error) { | ||
29 | meminfo := &MemInfo{} | ||
30 | scanner := bufio.NewScanner(reader) | ||
31 | for scanner.Scan() { | ||
32 | // Expected format: ["MemTotal:", "1234", "kB"] | ||
33 | parts := strings.Fields(scanner.Text()) | ||
34 | |||
35 | // Sanity checks: Skip malformed entries. | ||
36 | if len(parts) < 3 || parts[2] != "kB" { | ||
37 | continue | ||
38 | } | ||
39 | |||
40 | // Convert to bytes. | ||
41 | size, err := strconv.Atoi(parts[1]) | ||
42 | if err != nil { | ||
43 | continue | ||
44 | } | ||
45 | bytes := int64(size) * units.KiB | ||
46 | |||
47 | switch parts[0] { | ||
48 | case "MemTotal:": | ||
49 | meminfo.MemTotal = bytes | ||
50 | case "MemFree:": | ||
51 | meminfo.MemFree = bytes | ||
52 | case "SwapTotal:": | ||
53 | meminfo.SwapTotal = bytes | ||
54 | case "SwapFree:": | ||
55 | meminfo.SwapFree = bytes | ||
56 | } | ||
57 | |||
58 | } | ||
59 | |||
60 | // Handle errors that may have occurred during the reading of the file. | ||
61 | if err := scanner.Err(); err != nil { | ||
62 | return nil, err | ||
63 | } | ||
64 | |||
65 | return meminfo, nil | ||
66 | } | ||
diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/meminfo_unsupported.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/meminfo_unsupported.go new file mode 100644 index 0000000..82ddd30 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/meminfo_unsupported.go | |||
@@ -0,0 +1,8 @@ | |||
1 | // +build !linux,!windows | ||
2 | |||
3 | package system | ||
4 | |||
5 | // ReadMemInfo is not supported on platforms other than linux and windows. | ||
6 | func ReadMemInfo() (*MemInfo, error) { | ||
7 | return nil, ErrNotSupportedPlatform | ||
8 | } | ||
diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/meminfo_windows.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/meminfo_windows.go new file mode 100644 index 0000000..d466425 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/meminfo_windows.go | |||
@@ -0,0 +1,44 @@ | |||
1 | package system | ||
2 | |||
3 | import ( | ||
4 | "syscall" | ||
5 | "unsafe" | ||
6 | ) | ||
7 | |||
8 | var ( | ||
9 | modkernel32 = syscall.NewLazyDLL("kernel32.dll") | ||
10 | |||
11 | procGlobalMemoryStatusEx = modkernel32.NewProc("GlobalMemoryStatusEx") | ||
12 | ) | ||
13 | |||
14 | // https://msdn.microsoft.com/en-us/library/windows/desktop/aa366589(v=vs.85).aspx | ||
15 | // https://msdn.microsoft.com/en-us/library/windows/desktop/aa366770(v=vs.85).aspx | ||
16 | type memorystatusex struct { | ||
17 | dwLength uint32 | ||
18 | dwMemoryLoad uint32 | ||
19 | ullTotalPhys uint64 | ||
20 | ullAvailPhys uint64 | ||
21 | ullTotalPageFile uint64 | ||
22 | ullAvailPageFile uint64 | ||
23 | ullTotalVirtual uint64 | ||
24 | ullAvailVirtual uint64 | ||
25 | ullAvailExtendedVirtual uint64 | ||
26 | } | ||
27 | |||
28 | // ReadMemInfo retrieves memory statistics of the host system and returns a | ||
29 | // MemInfo type. | ||
30 | func ReadMemInfo() (*MemInfo, error) { | ||
31 | msi := &memorystatusex{ | ||
32 | dwLength: 64, | ||
33 | } | ||
34 | r1, _, _ := procGlobalMemoryStatusEx.Call(uintptr(unsafe.Pointer(msi))) | ||
35 | if r1 == 0 { | ||
36 | return &MemInfo{}, nil | ||
37 | } | ||
38 | return &MemInfo{ | ||
39 | MemTotal: int64(msi.ullTotalPhys), | ||
40 | MemFree: int64(msi.ullAvailPhys), | ||
41 | SwapTotal: int64(msi.ullTotalPageFile), | ||
42 | SwapFree: int64(msi.ullAvailPageFile), | ||
43 | }, nil | ||
44 | } | ||
diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/mknod.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/mknod.go new file mode 100644 index 0000000..7395818 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/mknod.go | |||
@@ -0,0 +1,22 @@ | |||
1 | // +build !windows | ||
2 | |||
3 | package system | ||
4 | |||
5 | import ( | ||
6 | "syscall" | ||
7 | ) | ||
8 | |||
9 | // Mknod creates a filesystem node (file, device special file or named pipe) named path | ||
10 | // with attributes specified by mode and dev. | ||
11 | func Mknod(path string, mode uint32, dev int) error { | ||
12 | return syscall.Mknod(path, mode, dev) | ||
13 | } | ||
14 | |||
15 | // Mkdev is used to build the value of linux devices (in /dev/) which specifies major | ||
16 | // and minor number of the newly created device special file. | ||
17 | // Linux device nodes are a bit weird due to backwards compat with 16 bit device nodes. | ||
18 | // They are, from low to high: the lower 8 bits of the minor, then 12 bits of the major, | ||
19 | // then the top 12 bits of the minor. | ||
20 | func Mkdev(major int64, minor int64) uint32 { | ||
21 | return uint32(((minor & 0xfff00) << 12) | ((major & 0xfff) << 8) | (minor & 0xff)) | ||
22 | } | ||
diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/mknod_windows.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/mknod_windows.go new file mode 100644 index 0000000..2e863c0 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/mknod_windows.go | |||
@@ -0,0 +1,13 @@ | |||
1 | // +build windows | ||
2 | |||
3 | package system | ||
4 | |||
5 | // Mknod is not implemented on Windows. | ||
6 | func Mknod(path string, mode uint32, dev int) error { | ||
7 | return ErrNotSupportedPlatform | ||
8 | } | ||
9 | |||
10 | // Mkdev is not implemented on Windows. | ||
11 | func Mkdev(major int64, minor int64) uint32 { | ||
12 | panic("Mkdev not implemented on Windows.") | ||
13 | } | ||
diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/path_unix.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/path_unix.go new file mode 100644 index 0000000..1b6cc9c --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/path_unix.go | |||
@@ -0,0 +1,8 @@ | |||
1 | // +build !windows | ||
2 | |||
3 | package system | ||
4 | |||
5 | // DefaultPathEnv is unix style list of directories to search for | ||
6 | // executables. Each directory is separated from the next by a colon | ||
7 | // ':' character . | ||
8 | const DefaultPathEnv = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" | ||
diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/path_windows.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/path_windows.go new file mode 100644 index 0000000..09e7f89 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/path_windows.go | |||
@@ -0,0 +1,7 @@ | |||
1 | // +build windows | ||
2 | |||
3 | package system | ||
4 | |||
5 | // DefaultPathEnv is deliberately empty on Windows as the default path will be set by | ||
6 | // the container. Docker has no context of what the default path should be. | ||
7 | const DefaultPathEnv = "" | ||
diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat.go new file mode 100644 index 0000000..087034c --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat.go | |||
@@ -0,0 +1,53 @@ | |||
1 | // +build !windows | ||
2 | |||
3 | package system | ||
4 | |||
5 | import ( | ||
6 | "syscall" | ||
7 | ) | ||
8 | |||
9 | // StatT type contains status of a file. It contains metadata | ||
10 | // like permission, owner, group, size, etc about a file. | ||
11 | type StatT struct { | ||
12 | mode uint32 | ||
13 | uid uint32 | ||
14 | gid uint32 | ||
15 | rdev uint64 | ||
16 | size int64 | ||
17 | mtim syscall.Timespec | ||
18 | } | ||
19 | |||
20 | // Mode returns file's permission mode. | ||
21 | func (s StatT) Mode() uint32 { | ||
22 | return s.mode | ||
23 | } | ||
24 | |||
25 | // UID returns file's user id of owner. | ||
26 | func (s StatT) UID() uint32 { | ||
27 | return s.uid | ||
28 | } | ||
29 | |||
30 | // GID returns file's group id of owner. | ||
31 | func (s StatT) GID() uint32 { | ||
32 | return s.gid | ||
33 | } | ||
34 | |||
35 | // Rdev returns file's device ID (if it's special file). | ||
36 | func (s StatT) Rdev() uint64 { | ||
37 | return s.rdev | ||
38 | } | ||
39 | |||
40 | // Size returns file's size. | ||
41 | func (s StatT) Size() int64 { | ||
42 | return s.size | ||
43 | } | ||
44 | |||
45 | // Mtim returns file's last modification time. | ||
46 | func (s StatT) Mtim() syscall.Timespec { | ||
47 | return s.mtim | ||
48 | } | ||
49 | |||
50 | // GetLastModification returns file's last modification time. | ||
51 | func (s StatT) GetLastModification() syscall.Timespec { | ||
52 | return s.Mtim() | ||
53 | } | ||
diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat_freebsd.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat_freebsd.go new file mode 100644 index 0000000..d0fb6f1 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat_freebsd.go | |||
@@ -0,0 +1,27 @@ | |||
1 | package system | ||
2 | |||
3 | import ( | ||
4 | "syscall" | ||
5 | ) | ||
6 | |||
7 | // fromStatT converts a syscall.Stat_t type to a system.Stat_t type | ||
8 | func fromStatT(s *syscall.Stat_t) (*StatT, error) { | ||
9 | return &StatT{size: s.Size, | ||
10 | mode: uint32(s.Mode), | ||
11 | uid: s.Uid, | ||
12 | gid: s.Gid, | ||
13 | rdev: uint64(s.Rdev), | ||
14 | mtim: s.Mtimespec}, nil | ||
15 | } | ||
16 | |||
17 | // Stat takes a path to a file and returns | ||
18 | // a system.Stat_t type pertaining to that file. | ||
19 | // | ||
20 | // Throws an error if the file does not exist | ||
21 | func Stat(path string) (*StatT, error) { | ||
22 | s := &syscall.Stat_t{} | ||
23 | if err := syscall.Stat(path, s); err != nil { | ||
24 | return nil, err | ||
25 | } | ||
26 | return fromStatT(s) | ||
27 | } | ||
diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat_linux.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat_linux.go new file mode 100644 index 0000000..8b1eded --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat_linux.go | |||
@@ -0,0 +1,33 @@ | |||
1 | package system | ||
2 | |||
3 | import ( | ||
4 | "syscall" | ||
5 | ) | ||
6 | |||
7 | // fromStatT converts a syscall.Stat_t type to a system.Stat_t type | ||
8 | func fromStatT(s *syscall.Stat_t) (*StatT, error) { | ||
9 | return &StatT{size: s.Size, | ||
10 | mode: s.Mode, | ||
11 | uid: s.Uid, | ||
12 | gid: s.Gid, | ||
13 | rdev: s.Rdev, | ||
14 | mtim: s.Mtim}, nil | ||
15 | } | ||
16 | |||
17 | // FromStatT exists only on linux, and loads a system.StatT from a | ||
18 | // syscal.Stat_t. | ||
19 | func FromStatT(s *syscall.Stat_t) (*StatT, error) { | ||
20 | return fromStatT(s) | ||
21 | } | ||
22 | |||
23 | // Stat takes a path to a file and returns | ||
24 | // a system.StatT type pertaining to that file. | ||
25 | // | ||
26 | // Throws an error if the file does not exist | ||
27 | func Stat(path string) (*StatT, error) { | ||
28 | s := &syscall.Stat_t{} | ||
29 | if err := syscall.Stat(path, s); err != nil { | ||
30 | return nil, err | ||
31 | } | ||
32 | return fromStatT(s) | ||
33 | } | ||
diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat_openbsd.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat_openbsd.go new file mode 100644 index 0000000..3c3b71f --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat_openbsd.go | |||
@@ -0,0 +1,15 @@ | |||
1 | package system | ||
2 | |||
3 | import ( | ||
4 | "syscall" | ||
5 | ) | ||
6 | |||
7 | // fromStatT creates a system.StatT type from a syscall.Stat_t type | ||
8 | func fromStatT(s *syscall.Stat_t) (*StatT, error) { | ||
9 | return &StatT{size: s.Size, | ||
10 | mode: uint32(s.Mode), | ||
11 | uid: s.Uid, | ||
12 | gid: s.Gid, | ||
13 | rdev: uint64(s.Rdev), | ||
14 | mtim: s.Mtim}, nil | ||
15 | } | ||
diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat_solaris.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat_solaris.go new file mode 100644 index 0000000..b01d08a --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat_solaris.go | |||
@@ -0,0 +1,17 @@ | |||
1 | // +build solaris | ||
2 | |||
3 | package system | ||
4 | |||
5 | import ( | ||
6 | "syscall" | ||
7 | ) | ||
8 | |||
9 | // fromStatT creates a system.StatT type from a syscall.Stat_t type | ||
10 | func fromStatT(s *syscall.Stat_t) (*StatT, error) { | ||
11 | return &StatT{size: s.Size, | ||
12 | mode: uint32(s.Mode), | ||
13 | uid: s.Uid, | ||
14 | gid: s.Gid, | ||
15 | rdev: uint64(s.Rdev), | ||
16 | mtim: s.Mtim}, nil | ||
17 | } | ||
diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat_unsupported.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat_unsupported.go new file mode 100644 index 0000000..f53e9de --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat_unsupported.go | |||
@@ -0,0 +1,17 @@ | |||
1 | // +build !linux,!windows,!freebsd,!solaris,!openbsd | ||
2 | |||
3 | package system | ||
4 | |||
5 | import ( | ||
6 | "syscall" | ||
7 | ) | ||
8 | |||
9 | // fromStatT creates a system.StatT type from a syscall.Stat_t type | ||
10 | func fromStatT(s *syscall.Stat_t) (*StatT, error) { | ||
11 | return &StatT{size: s.Size, | ||
12 | mode: uint32(s.Mode), | ||
13 | uid: s.Uid, | ||
14 | gid: s.Gid, | ||
15 | rdev: uint64(s.Rdev), | ||
16 | mtim: s.Mtimespec}, nil | ||
17 | } | ||
diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat_windows.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat_windows.go new file mode 100644 index 0000000..39490c6 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat_windows.go | |||
@@ -0,0 +1,43 @@ | |||
1 | // +build windows | ||
2 | |||
3 | package system | ||
4 | |||
5 | import ( | ||
6 | "os" | ||
7 | "time" | ||
8 | ) | ||
9 | |||
10 | // StatT type contains status of a file. It contains metadata | ||
11 | // like name, permission, size, etc about a file. | ||
12 | type StatT struct { | ||
13 | name string | ||
14 | size int64 | ||
15 | mode os.FileMode | ||
16 | modTime time.Time | ||
17 | isDir bool | ||
18 | } | ||
19 | |||
20 | // Name returns file's name. | ||
21 | func (s StatT) Name() string { | ||
22 | return s.name | ||
23 | } | ||
24 | |||
25 | // Size returns file's size. | ||
26 | func (s StatT) Size() int64 { | ||
27 | return s.size | ||
28 | } | ||
29 | |||
30 | // Mode returns file's permission mode. | ||
31 | func (s StatT) Mode() os.FileMode { | ||
32 | return s.mode | ||
33 | } | ||
34 | |||
35 | // ModTime returns file's last modification time. | ||
36 | func (s StatT) ModTime() time.Time { | ||
37 | return s.modTime | ||
38 | } | ||
39 | |||
40 | // IsDir returns whether file is actually a directory. | ||
41 | func (s StatT) IsDir() bool { | ||
42 | return s.isDir | ||
43 | } | ||
diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/syscall_unix.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/syscall_unix.go new file mode 100644 index 0000000..f1497c5 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/syscall_unix.go | |||
@@ -0,0 +1,11 @@ | |||
1 | // +build linux freebsd | ||
2 | |||
3 | package system | ||
4 | |||
5 | import "syscall" | ||
6 | |||
7 | // Unmount is a platform-specific helper function to call | ||
8 | // the unmount syscall. | ||
9 | func Unmount(dest string) error { | ||
10 | return syscall.Unmount(dest, 0) | ||
11 | } | ||
diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/syscall_windows.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/syscall_windows.go new file mode 100644 index 0000000..273aa23 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/syscall_windows.go | |||
@@ -0,0 +1,36 @@ | |||
1 | package system | ||
2 | |||
3 | import ( | ||
4 | "fmt" | ||
5 | "syscall" | ||
6 | ) | ||
7 | |||
8 | // OSVersion is a wrapper for Windows version information | ||
9 | // https://msdn.microsoft.com/en-us/library/windows/desktop/ms724439(v=vs.85).aspx | ||
10 | type OSVersion struct { | ||
11 | Version uint32 | ||
12 | MajorVersion uint8 | ||
13 | MinorVersion uint8 | ||
14 | Build uint16 | ||
15 | } | ||
16 | |||
17 | // GetOSVersion gets the operating system version on Windows. Note that | ||
18 | // docker.exe must be manifested to get the correct version information. | ||
19 | func GetOSVersion() (OSVersion, error) { | ||
20 | var err error | ||
21 | osv := OSVersion{} | ||
22 | osv.Version, err = syscall.GetVersion() | ||
23 | if err != nil { | ||
24 | return osv, fmt.Errorf("Failed to call GetVersion()") | ||
25 | } | ||
26 | osv.MajorVersion = uint8(osv.Version & 0xFF) | ||
27 | osv.MinorVersion = uint8(osv.Version >> 8 & 0xFF) | ||
28 | osv.Build = uint16(osv.Version >> 16) | ||
29 | return osv, nil | ||
30 | } | ||
31 | |||
32 | // Unmount is a platform-specific helper function to call | ||
33 | // the unmount syscall. Not supported on Windows | ||
34 | func Unmount(dest string) error { | ||
35 | return nil | ||
36 | } | ||
diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/umask.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/umask.go new file mode 100644 index 0000000..c670fcd --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/umask.go | |||
@@ -0,0 +1,13 @@ | |||
1 | // +build !windows | ||
2 | |||
3 | package system | ||
4 | |||
5 | import ( | ||
6 | "syscall" | ||
7 | ) | ||
8 | |||
9 | // Umask sets current process's file mode creation mask to newmask | ||
10 | // and return oldmask. | ||
11 | func Umask(newmask int) (oldmask int, err error) { | ||
12 | return syscall.Umask(newmask), nil | ||
13 | } | ||
diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/umask_windows.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/umask_windows.go new file mode 100644 index 0000000..13f1de1 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/umask_windows.go | |||
@@ -0,0 +1,9 @@ | |||
1 | // +build windows | ||
2 | |||
3 | package system | ||
4 | |||
5 | // Umask is not supported on the windows platform. | ||
6 | func Umask(newmask int) (oldmask int, err error) { | ||
7 | // should not be called on cli code path | ||
8 | return 0, ErrNotSupportedPlatform | ||
9 | } | ||
diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/utimes_darwin.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/utimes_darwin.go new file mode 100644 index 0000000..0a16197 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/utimes_darwin.go | |||
@@ -0,0 +1,8 @@ | |||
1 | package system | ||
2 | |||
3 | import "syscall" | ||
4 | |||
5 | // LUtimesNano is not supported by darwin platform. | ||
6 | func LUtimesNano(path string, ts []syscall.Timespec) error { | ||
7 | return ErrNotSupportedPlatform | ||
8 | } | ||
diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/utimes_freebsd.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/utimes_freebsd.go new file mode 100644 index 0000000..e2eac3b --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/utimes_freebsd.go | |||
@@ -0,0 +1,22 @@ | |||
1 | package system | ||
2 | |||
3 | import ( | ||
4 | "syscall" | ||
5 | "unsafe" | ||
6 | ) | ||
7 | |||
8 | // LUtimesNano is used to change access and modification time of the specified path. | ||
9 | // It's used for symbol link file because syscall.UtimesNano doesn't support a NOFOLLOW flag atm. | ||
10 | func LUtimesNano(path string, ts []syscall.Timespec) error { | ||
11 | var _path *byte | ||
12 | _path, err := syscall.BytePtrFromString(path) | ||
13 | if err != nil { | ||
14 | return err | ||
15 | } | ||
16 | |||
17 | if _, _, err := syscall.Syscall(syscall.SYS_LUTIMES, uintptr(unsafe.Pointer(_path)), uintptr(unsafe.Pointer(&ts[0])), 0); err != 0 && err != syscall.ENOSYS { | ||
18 | return err | ||
19 | } | ||
20 | |||
21 | return nil | ||
22 | } | ||
diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/utimes_linux.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/utimes_linux.go new file mode 100644 index 0000000..fc8a1ab --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/utimes_linux.go | |||
@@ -0,0 +1,26 @@ | |||
1 | package system | ||
2 | |||
3 | import ( | ||
4 | "syscall" | ||
5 | "unsafe" | ||
6 | ) | ||
7 | |||
8 | // LUtimesNano is used to change access and modification time of the specified path. | ||
9 | // It's used for symbol link file because syscall.UtimesNano doesn't support a NOFOLLOW flag atm. | ||
10 | func LUtimesNano(path string, ts []syscall.Timespec) error { | ||
11 | // These are not currently available in syscall | ||
12 | atFdCwd := -100 | ||
13 | atSymLinkNoFollow := 0x100 | ||
14 | |||
15 | var _path *byte | ||
16 | _path, err := syscall.BytePtrFromString(path) | ||
17 | if err != nil { | ||
18 | return err | ||
19 | } | ||
20 | |||
21 | if _, _, err := syscall.Syscall6(syscall.SYS_UTIMENSAT, uintptr(atFdCwd), uintptr(unsafe.Pointer(_path)), uintptr(unsafe.Pointer(&ts[0])), uintptr(atSymLinkNoFollow), 0, 0); err != 0 && err != syscall.ENOSYS { | ||
22 | return err | ||
23 | } | ||
24 | |||
25 | return nil | ||
26 | } | ||
diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/utimes_unsupported.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/utimes_unsupported.go new file mode 100644 index 0000000..50c3a04 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/utimes_unsupported.go | |||
@@ -0,0 +1,10 @@ | |||
1 | // +build !linux,!freebsd,!darwin | ||
2 | |||
3 | package system | ||
4 | |||
5 | import "syscall" | ||
6 | |||
7 | // LUtimesNano is not supported on platforms other than linux, freebsd and darwin. | ||
8 | func LUtimesNano(path string, ts []syscall.Timespec) error { | ||
9 | return ErrNotSupportedPlatform | ||
10 | } | ||
diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/xattrs_linux.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/xattrs_linux.go new file mode 100644 index 0000000..d2e2c05 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/xattrs_linux.go | |||
@@ -0,0 +1,63 @@ | |||
1 | package system | ||
2 | |||
3 | import ( | ||
4 | "syscall" | ||
5 | "unsafe" | ||
6 | ) | ||
7 | |||
8 | // Lgetxattr retrieves the value of the extended attribute identified by attr | ||
9 | // and associated with the given path in the file system. | ||
10 | // It will returns a nil slice and nil error if the xattr is not set. | ||
11 | func Lgetxattr(path string, attr string) ([]byte, error) { | ||
12 | pathBytes, err := syscall.BytePtrFromString(path) | ||
13 | if err != nil { | ||
14 | return nil, err | ||
15 | } | ||
16 | attrBytes, err := syscall.BytePtrFromString(attr) | ||
17 | if err != nil { | ||
18 | return nil, err | ||
19 | } | ||
20 | |||
21 | dest := make([]byte, 128) | ||
22 | destBytes := unsafe.Pointer(&dest[0]) | ||
23 | sz, _, errno := syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(destBytes), uintptr(len(dest)), 0, 0) | ||
24 | if errno == syscall.ENODATA { | ||
25 | return nil, nil | ||
26 | } | ||
27 | if errno == syscall.ERANGE { | ||
28 | dest = make([]byte, sz) | ||
29 | destBytes := unsafe.Pointer(&dest[0]) | ||
30 | sz, _, errno = syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(destBytes), uintptr(len(dest)), 0, 0) | ||
31 | } | ||
32 | if errno != 0 { | ||
33 | return nil, errno | ||
34 | } | ||
35 | |||
36 | return dest[:sz], nil | ||
37 | } | ||
38 | |||
39 | var _zero uintptr | ||
40 | |||
41 | // Lsetxattr sets the value of the extended attribute identified by attr | ||
42 | // and associated with the given path in the file system. | ||
43 | func Lsetxattr(path string, attr string, data []byte, flags int) error { | ||
44 | pathBytes, err := syscall.BytePtrFromString(path) | ||
45 | if err != nil { | ||
46 | return err | ||
47 | } | ||
48 | attrBytes, err := syscall.BytePtrFromString(attr) | ||
49 | if err != nil { | ||
50 | return err | ||
51 | } | ||
52 | var dataBytes unsafe.Pointer | ||
53 | if len(data) > 0 { | ||
54 | dataBytes = unsafe.Pointer(&data[0]) | ||
55 | } else { | ||
56 | dataBytes = unsafe.Pointer(&_zero) | ||
57 | } | ||
58 | _, _, errno := syscall.Syscall6(syscall.SYS_LSETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(dataBytes), uintptr(len(data)), uintptr(flags), 0) | ||
59 | if errno != 0 { | ||
60 | return errno | ||
61 | } | ||
62 | return nil | ||
63 | } | ||
diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/xattrs_unsupported.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/xattrs_unsupported.go new file mode 100644 index 0000000..0114f22 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/xattrs_unsupported.go | |||
@@ -0,0 +1,13 @@ | |||
1 | // +build !linux | ||
2 | |||
3 | package system | ||
4 | |||
5 | // Lgetxattr is not supported on platforms other than linux. | ||
6 | func Lgetxattr(path string, attr string) ([]byte, error) { | ||
7 | return nil, ErrNotSupportedPlatform | ||
8 | } | ||
9 | |||
10 | // Lsetxattr is not supported on platforms other than linux. | ||
11 | func Lsetxattr(path string, attr string, data []byte, flags int) error { | ||
12 | return ErrNotSupportedPlatform | ||
13 | } | ||
diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/go-units/CONTRIBUTING.md b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/go-units/CONTRIBUTING.md new file mode 100644 index 0000000..9ea86d7 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/go-units/CONTRIBUTING.md | |||
@@ -0,0 +1,67 @@ | |||
1 | # Contributing to go-units | ||
2 | |||
3 | Want to hack on go-units? Awesome! Here are instructions to get you started. | ||
4 | |||
5 | go-units is a part of the [Docker](https://www.docker.com) project, and follows | ||
6 | the same rules and principles. If you're already familiar with the way | ||
7 | Docker does things, you'll feel right at home. | ||
8 | |||
9 | Otherwise, go read Docker's | ||
10 | [contributions guidelines](https://github.com/docker/docker/blob/master/CONTRIBUTING.md), | ||
11 | [issue triaging](https://github.com/docker/docker/blob/master/project/ISSUE-TRIAGE.md), | ||
12 | [review process](https://github.com/docker/docker/blob/master/project/REVIEWING.md) and | ||
13 | [branches and tags](https://github.com/docker/docker/blob/master/project/BRANCHES-AND-TAGS.md). | ||
14 | |||
15 | ### Sign your work | ||
16 | |||
17 | The sign-off is a simple line at the end of the explanation for the patch. Your | ||
18 | signature certifies that you wrote the patch or otherwise have the right to pass | ||
19 | it on as an open-source patch. The rules are pretty simple: if you can certify | ||
20 | the below (from [developercertificate.org](http://developercertificate.org/)): | ||
21 | |||
22 | ``` | ||
23 | Developer Certificate of Origin | ||
24 | Version 1.1 | ||
25 | |||
26 | Copyright (C) 2004, 2006 The Linux Foundation and its contributors. | ||
27 | 660 York Street, Suite 102, | ||
28 | San Francisco, CA 94110 USA | ||
29 | |||
30 | Everyone is permitted to copy and distribute verbatim copies of this | ||
31 | license document, but changing it is not allowed. | ||
32 | |||
33 | Developer's Certificate of Origin 1.1 | ||
34 | |||
35 | By making a contribution to this project, I certify that: | ||
36 | |||
37 | (a) The contribution was created in whole or in part by me and I | ||
38 | have the right to submit it under the open source license | ||
39 | indicated in the file; or | ||
40 | |||
41 | (b) The contribution is based upon previous work that, to the best | ||
42 | of my knowledge, is covered under an appropriate open source | ||
43 | license and I have the right under that license to submit that | ||
44 | work with modifications, whether created in whole or in part | ||
45 | by me, under the same open source license (unless I am | ||
46 | permitted to submit under a different license), as indicated | ||
47 | in the file; or | ||
48 | |||
49 | (c) The contribution was provided directly to me by some other | ||
50 | person who certified (a), (b) or (c) and I have not modified | ||
51 | it. | ||
52 | |||
53 | (d) I understand and agree that this project and the contribution | ||
54 | are public and that a record of the contribution (including all | ||
55 | personal information I submit with it, including my sign-off) is | ||
56 | maintained indefinitely and may be redistributed consistent with | ||
57 | this project or the open source license(s) involved. | ||
58 | ``` | ||
59 | |||
60 | Then you just add a line to every git commit message: | ||
61 | |||
62 | Signed-off-by: Joe Smith <joe.smith@email.com> | ||
63 | |||
64 | Use your real name (sorry, no pseudonyms or anonymous contributions.) | ||
65 | |||
66 | If you set your `user.name` and `user.email` git configs, you can sign your | ||
67 | commit automatically with `git commit -s`. | ||
diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/go-units/LICENSE.code b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/go-units/LICENSE.code new file mode 100644 index 0000000..b55b37b --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/go-units/LICENSE.code | |||
@@ -0,0 +1,191 @@ | |||
1 | |||
2 | Apache License | ||
3 | Version 2.0, January 2004 | ||
4 | https://www.apache.org/licenses/ | ||
5 | |||
6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION | ||
7 | |||
8 | 1. Definitions. | ||
9 | |||
10 | "License" shall mean the terms and conditions for use, reproduction, | ||
11 | and distribution as defined by Sections 1 through 9 of this document. | ||
12 | |||
13 | "Licensor" shall mean the copyright owner or entity authorized by | ||
14 | the copyright owner that is granting the License. | ||
15 | |||
16 | "Legal Entity" shall mean the union of the acting entity and all | ||
17 | other entities that control, are controlled by, or are under common | ||
18 | control with that entity. For the purposes of this definition, | ||
19 | "control" means (i) the power, direct or indirect, to cause the | ||
20 | direction or management of such entity, whether by contract or | ||
21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the | ||
22 | outstanding shares, or (iii) beneficial ownership of such entity. | ||
23 | |||
24 | "You" (or "Your") shall mean an individual or Legal Entity | ||
25 | exercising permissions granted by this License. | ||
26 | |||
27 | "Source" form shall mean the preferred form for making modifications, | ||
28 | including but not limited to software source code, documentation | ||
29 | source, and configuration files. | ||
30 | |||
31 | "Object" form shall mean any form resulting from mechanical | ||
32 | transformation or translation of a Source form, including but | ||
33 | not limited to compiled object code, generated documentation, | ||
34 | and conversions to other media types. | ||
35 | |||
36 | "Work" shall mean the work of authorship, whether in Source or | ||
37 | Object form, made available under the License, as indicated by a | ||
38 | copyright notice that is included in or attached to the work | ||
39 | (an example is provided in the Appendix below). | ||
40 | |||
41 | "Derivative Works" shall mean any work, whether in Source or Object | ||
42 | form, that is based on (or derived from) the Work and for which the | ||
43 | editorial revisions, annotations, elaborations, or other modifications | ||
44 | represent, as a whole, an original work of authorship. For the purposes | ||
45 | of this License, Derivative Works shall not include works that remain | ||
46 | separable from, or merely link (or bind by name) to the interfaces of, | ||
47 | the Work and Derivative Works thereof. | ||
48 | |||
49 | "Contribution" shall mean any work of authorship, including | ||
50 | the original version of the Work and any modifications or additions | ||
51 | to that Work or Derivative Works thereof, that is intentionally | ||
52 | submitted to Licensor for inclusion in the Work by the copyright owner | ||
53 | or by an individual or Legal Entity authorized to submit on behalf of | ||
54 | the copyright owner. For the purposes of this definition, "submitted" | ||
55 | means any form of electronic, verbal, or written communication sent | ||
56 | to the Licensor or its representatives, including but not limited to | ||
57 | communication on electronic mailing lists, source code control systems, | ||
58 | and issue tracking systems that are managed by, or on behalf of, the | ||
59 | Licensor for the purpose of discussing and improving the Work, but | ||
60 | excluding communication that is conspicuously marked or otherwise | ||
61 | designated in writing by the copyright owner as "Not a Contribution." | ||
62 | |||
63 | "Contributor" shall mean Licensor and any individual or Legal Entity | ||
64 | on behalf of whom a Contribution has been received by Licensor and | ||
65 | subsequently incorporated within the Work. | ||
66 | |||
67 | 2. Grant of Copyright License. Subject to the terms and conditions of | ||
68 | this License, each Contributor hereby grants to You a perpetual, | ||
69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable | ||
70 | copyright license to reproduce, prepare Derivative Works of, | ||
71 | publicly display, publicly perform, sublicense, and distribute the | ||
72 | Work and such Derivative Works in Source or Object form. | ||
73 | |||
74 | 3. Grant of Patent License. Subject to the terms and conditions of | ||
75 | this License, each Contributor hereby grants to You a perpetual, | ||
76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable | ||
77 | (except as stated in this section) patent license to make, have made, | ||
78 | use, offer to sell, sell, import, and otherwise transfer the Work, | ||
79 | where such license applies only to those patent claims licensable | ||
80 | by such Contributor that are necessarily infringed by their | ||
81 | Contribution(s) alone or by combination of their Contribution(s) | ||
82 | with the Work to which such Contribution(s) was submitted. If You | ||
83 | institute patent litigation against any entity (including a | ||
84 | cross-claim or counterclaim in a lawsuit) alleging that the Work | ||
85 | or a Contribution incorporated within the Work constitutes direct | ||
86 | or contributory patent infringement, then any patent licenses | ||
87 | granted to You under this License for that Work shall terminate | ||
88 | as of the date such litigation is filed. | ||
89 | |||
90 | 4. Redistribution. You may reproduce and distribute copies of the | ||
91 | Work or Derivative Works thereof in any medium, with or without | ||
92 | modifications, and in Source or Object form, provided that You | ||
93 | meet the following conditions: | ||
94 | |||
95 | (a) You must give any other recipients of the Work or | ||
96 | Derivative Works a copy of this License; and | ||
97 | |||
98 | (b) You must cause any modified files to carry prominent notices | ||
99 | stating that You changed the files; and | ||
100 | |||
101 | (c) You must retain, in the Source form of any Derivative Works | ||
102 | that You distribute, all copyright, patent, trademark, and | ||
103 | attribution notices from the Source form of the Work, | ||
104 | excluding those notices that do not pertain to any part of | ||
105 | the Derivative Works; and | ||
106 | |||
107 | (d) If the Work includes a "NOTICE" text file as part of its | ||
108 | distribution, then any Derivative Works that You distribute must | ||
109 | include a readable copy of the attribution notices contained | ||
110 | within such NOTICE file, excluding those notices that do not | ||
111 | pertain to any part of the Derivative Works, in at least one | ||
112 | of the following places: within a NOTICE text file distributed | ||
113 | as part of the Derivative Works; within the Source form or | ||
114 | documentation, if provided along with the Derivative Works; or, | ||
115 | within a display generated by the Derivative Works, if and | ||
116 | wherever such third-party notices normally appear. The contents | ||
117 | of the NOTICE file are for informational purposes only and | ||
118 | do not modify the License. You may add Your own attribution | ||
119 | notices within Derivative Works that You distribute, alongside | ||
120 | or as an addendum to the NOTICE text from the Work, provided | ||
121 | that such additional attribution notices cannot be construed | ||
122 | as modifying the License. | ||
123 | |||
124 | You may add Your own copyright statement to Your modifications and | ||
125 | may provide additional or different license terms and conditions | ||
126 | for use, reproduction, or distribution of Your modifications, or | ||
127 | for any such Derivative Works as a whole, provided Your use, | ||
128 | reproduction, and distribution of the Work otherwise complies with | ||
129 | the conditions stated in this License. | ||
130 | |||
131 | 5. Submission of Contributions. Unless You explicitly state otherwise, | ||
132 | any Contribution intentionally submitted for inclusion in the Work | ||
133 | by You to the Licensor shall be under the terms and conditions of | ||
134 | this License, without any additional terms or conditions. | ||
135 | Notwithstanding the above, nothing herein shall supersede or modify | ||
136 | the terms of any separate license agreement you may have executed | ||
137 | with Licensor regarding such Contributions. | ||
138 | |||
139 | 6. Trademarks. This License does not grant permission to use the trade | ||
140 | names, trademarks, service marks, or product names of the Licensor, | ||
141 | except as required for reasonable and customary use in describing the | ||
142 | origin of the Work and reproducing the content of the NOTICE file. | ||
143 | |||
144 | 7. Disclaimer of Warranty. Unless required by applicable law or | ||
145 | agreed to in writing, Licensor provides the Work (and each | ||
146 | Contributor provides its Contributions) on an "AS IS" BASIS, | ||
147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or | ||
148 | implied, including, without limitation, any warranties or conditions | ||
149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A | ||
150 | PARTICULAR PURPOSE. You are solely responsible for determining the | ||
151 | appropriateness of using or redistributing the Work and assume any | ||
152 | risks associated with Your exercise of permissions under this License. | ||
153 | |||
154 | 8. Limitation of Liability. In no event and under no legal theory, | ||
155 | whether in tort (including negligence), contract, or otherwise, | ||
156 | unless required by applicable law (such as deliberate and grossly | ||
157 | negligent acts) or agreed to in writing, shall any Contributor be | ||
158 | liable to You for damages, including any direct, indirect, special, | ||
159 | incidental, or consequential damages of any character arising as a | ||
160 | result of this License or out of the use or inability to use the | ||
161 | Work (including but not limited to damages for loss of goodwill, | ||
162 | work stoppage, computer failure or malfunction, or any and all | ||
163 | other commercial damages or losses), even if such Contributor | ||
164 | has been advised of the possibility of such damages. | ||
165 | |||
166 | 9. Accepting Warranty or Additional Liability. While redistributing | ||
167 | the Work or Derivative Works thereof, You may choose to offer, | ||
168 | and charge a fee for, acceptance of support, warranty, indemnity, | ||
169 | or other liability obligations and/or rights consistent with this | ||
170 | License. However, in accepting such obligations, You may act only | ||
171 | on Your own behalf and on Your sole responsibility, not on behalf | ||
172 | of any other Contributor, and only if You agree to indemnify, | ||
173 | defend, and hold each Contributor harmless for any liability | ||
174 | incurred by, or claims asserted against, such Contributor by reason | ||
175 | of your accepting any such warranty or additional liability. | ||
176 | |||
177 | END OF TERMS AND CONDITIONS | ||
178 | |||
179 | Copyright 2015 Docker, Inc. | ||
180 | |||
181 | Licensed under the Apache License, Version 2.0 (the "License"); | ||
182 | you may not use this file except in compliance with the License. | ||
183 | You may obtain a copy of the License at | ||
184 | |||
185 | https://www.apache.org/licenses/LICENSE-2.0 | ||
186 | |||
187 | Unless required by applicable law or agreed to in writing, software | ||
188 | distributed under the License is distributed on an "AS IS" BASIS, | ||
189 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
190 | See the License for the specific language governing permissions and | ||
191 | limitations under the License. | ||
diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/go-units/LICENSE.docs b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/go-units/LICENSE.docs new file mode 100644 index 0000000..e26cd4f --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/go-units/LICENSE.docs | |||
@@ -0,0 +1,425 @@ | |||
1 | Attribution-ShareAlike 4.0 International | ||
2 | |||
3 | ======================================================================= | ||
4 | |||
5 | Creative Commons Corporation ("Creative Commons") is not a law firm and | ||
6 | does not provide legal services or legal advice. Distribution of | ||
7 | Creative Commons public licenses does not create a lawyer-client or | ||
8 | other relationship. Creative Commons makes its licenses and related | ||
9 | information available on an "as-is" basis. Creative Commons gives no | ||
10 | warranties regarding its licenses, any material licensed under their | ||
11 | terms and conditions, or any related information. Creative Commons | ||
12 | disclaims all liability for damages resulting from their use to the | ||
13 | fullest extent possible. | ||
14 | |||
15 | Using Creative Commons Public Licenses | ||
16 | |||
17 | Creative Commons public licenses provide a standard set of terms and | ||
18 | conditions that creators and other rights holders may use to share | ||
19 | original works of authorship and other material subject to copyright | ||
20 | and certain other rights specified in the public license below. The | ||
21 | following considerations are for informational purposes only, are not | ||
22 | exhaustive, and do not form part of our licenses. | ||
23 | |||
24 | Considerations for licensors: Our public licenses are | ||
25 | intended for use by those authorized to give the public | ||
26 | permission to use material in ways otherwise restricted by | ||
27 | copyright and certain other rights. Our licenses are | ||
28 | irrevocable. Licensors should read and understand the terms | ||
29 | and conditions of the license they choose before applying it. | ||
30 | Licensors should also secure all rights necessary before | ||
31 | applying our licenses so that the public can reuse the | ||
32 | material as expected. Licensors should clearly mark any | ||
33 | material not subject to the license. This includes other CC- | ||
34 | licensed material, or material used under an exception or | ||
35 | limitation to copyright. More considerations for licensors: | ||
36 | wiki.creativecommons.org/Considerations_for_licensors | ||
37 | |||
38 | Considerations for the public: By using one of our public | ||
39 | licenses, a licensor grants the public permission to use the | ||
40 | licensed material under specified terms and conditions. If | ||
41 | the licensor's permission is not necessary for any reason--for | ||
42 | example, because of any applicable exception or limitation to | ||
43 | copyright--then that use is not regulated by the license. Our | ||
44 | licenses grant only permissions under copyright and certain | ||
45 | other rights that a licensor has authority to grant. Use of | ||
46 | the licensed material may still be restricted for other | ||
47 | reasons, including because others have copyright or other | ||
48 | rights in the material. A licensor may make special requests, | ||
49 | such as asking that all changes be marked or described. | ||
50 | Although not required by our licenses, you are encouraged to | ||
51 | respect those requests where reasonable. More_considerations | ||
52 | for the public: | ||
53 | wiki.creativecommons.org/Considerations_for_licensees | ||
54 | |||
55 | ======================================================================= | ||
56 | |||
57 | Creative Commons Attribution-ShareAlike 4.0 International Public | ||
58 | License | ||
59 | |||
60 | By exercising the Licensed Rights (defined below), You accept and agree | ||
61 | to be bound by the terms and conditions of this Creative Commons | ||
62 | Attribution-ShareAlike 4.0 International Public License ("Public | ||
63 | License"). To the extent this Public License may be interpreted as a | ||
64 | contract, You are granted the Licensed Rights in consideration of Your | ||
65 | acceptance of these terms and conditions, and the Licensor grants You | ||
66 | such rights in consideration of benefits the Licensor receives from | ||
67 | making the Licensed Material available under these terms and | ||
68 | conditions. | ||
69 | |||
70 | |||
71 | Section 1 -- Definitions. | ||
72 | |||
73 | a. Adapted Material means material subject to Copyright and Similar | ||
74 | Rights that is derived from or based upon the Licensed Material | ||
75 | and in which the Licensed Material is translated, altered, | ||
76 | arranged, transformed, or otherwise modified in a manner requiring | ||
77 | permission under the Copyright and Similar Rights held by the | ||
78 | Licensor. For purposes of this Public License, where the Licensed | ||
79 | Material is a musical work, performance, or sound recording, | ||
80 | Adapted Material is always produced where the Licensed Material is | ||
81 | synched in timed relation with a moving image. | ||
82 | |||
83 | b. Adapter's License means the license You apply to Your Copyright | ||
84 | and Similar Rights in Your contributions to Adapted Material in | ||
85 | accordance with the terms and conditions of this Public License. | ||
86 | |||
87 | c. BY-SA Compatible License means a license listed at | ||
88 | creativecommons.org/compatiblelicenses, approved by Creative | ||
89 | Commons as essentially the equivalent of this Public License. | ||
90 | |||
91 | d. Copyright and Similar Rights means copyright and/or similar rights | ||
92 | closely related to copyright including, without limitation, | ||
93 | performance, broadcast, sound recording, and Sui Generis Database | ||
94 | Rights, without regard to how the rights are labeled or | ||
95 | categorized. For purposes of this Public License, the rights | ||
96 | specified in Section 2(b)(1)-(2) are not Copyright and Similar | ||
97 | Rights. | ||
98 | |||
99 | e. Effective Technological Measures means those measures that, in the | ||
100 | absence of proper authority, may not be circumvented under laws | ||
101 | fulfilling obligations under Article 11 of the WIPO Copyright | ||
102 | Treaty adopted on December 20, 1996, and/or similar international | ||
103 | agreements. | ||
104 | |||
105 | f. Exceptions and Limitations means fair use, fair dealing, and/or | ||
106 | any other exception or limitation to Copyright and Similar Rights | ||
107 | that applies to Your use of the Licensed Material. | ||
108 | |||
109 | g. License Elements means the license attributes listed in the name | ||
110 | of a Creative Commons Public License. The License Elements of this | ||
111 | Public License are Attribution and ShareAlike. | ||
112 | |||
113 | h. Licensed Material means the artistic or literary work, database, | ||
114 | or other material to which the Licensor applied this Public | ||
115 | License. | ||
116 | |||
117 | i. Licensed Rights means the rights granted to You subject to the | ||
118 | terms and conditions of this Public License, which are limited to | ||
119 | all Copyright and Similar Rights that apply to Your use of the | ||
120 | Licensed Material and that the Licensor has authority to license. | ||
121 | |||
122 | j. Licensor means the individual(s) or entity(ies) granting rights | ||
123 | under this Public License. | ||
124 | |||
125 | k. Share means to provide material to the public by any means or | ||
126 | process that requires permission under the Licensed Rights, such | ||
127 | as reproduction, public display, public performance, distribution, | ||
128 | dissemination, communication, or importation, and to make material | ||
129 | available to the public including in ways that members of the | ||
130 | public may access the material from a place and at a time | ||
131 | individually chosen by them. | ||
132 | |||
133 | l. Sui Generis Database Rights means rights other than copyright | ||
134 | resulting from Directive 96/9/EC of the European Parliament and of | ||
135 | the Council of 11 March 1996 on the legal protection of databases, | ||
136 | as amended and/or succeeded, as well as other essentially | ||
137 | equivalent rights anywhere in the world. | ||
138 | |||
139 | m. You means the individual or entity exercising the Licensed Rights | ||
140 | under this Public License. Your has a corresponding meaning. | ||
141 | |||
142 | |||
143 | Section 2 -- Scope. | ||
144 | |||
145 | a. License grant. | ||
146 | |||
147 | 1. Subject to the terms and conditions of this Public License, | ||
148 | the Licensor hereby grants You a worldwide, royalty-free, | ||
149 | non-sublicensable, non-exclusive, irrevocable license to | ||
150 | exercise the Licensed Rights in the Licensed Material to: | ||
151 | |||
152 | a. reproduce and Share the Licensed Material, in whole or | ||
153 | in part; and | ||
154 | |||
155 | b. produce, reproduce, and Share Adapted Material. | ||
156 | |||
157 | 2. Exceptions and Limitations. For the avoidance of doubt, where | ||
158 | Exceptions and Limitations apply to Your use, this Public | ||
159 | License does not apply, and You do not need to comply with | ||
160 | its terms and conditions. | ||
161 | |||
162 | 3. Term. The term of this Public License is specified in Section | ||
163 | 6(a). | ||
164 | |||
165 | 4. Media and formats; technical modifications allowed. The | ||
166 | Licensor authorizes You to exercise the Licensed Rights in | ||
167 | all media and formats whether now known or hereafter created, | ||
168 | and to make technical modifications necessary to do so. The | ||
169 | Licensor waives and/or agrees not to assert any right or | ||
170 | authority to forbid You from making technical modifications | ||
171 | necessary to exercise the Licensed Rights, including | ||
172 | technical modifications necessary to circumvent Effective | ||
173 | Technological Measures. For purposes of this Public License, | ||
174 | simply making modifications authorized by this Section 2(a) | ||
175 | (4) never produces Adapted Material. | ||
176 | |||
177 | 5. Downstream recipients. | ||
178 | |||
179 | a. Offer from the Licensor -- Licensed Material. Every | ||
180 | recipient of the Licensed Material automatically | ||
181 | receives an offer from the Licensor to exercise the | ||
182 | Licensed Rights under the terms and conditions of this | ||
183 | Public License. | ||
184 | |||
185 | b. Additional offer from the Licensor -- Adapted Material. | ||
186 | Every recipient of Adapted Material from You | ||
187 | automatically receives an offer from the Licensor to | ||
188 | exercise the Licensed Rights in the Adapted Material | ||
189 | under the conditions of the Adapter's License You apply. | ||
190 | |||
191 | c. No downstream restrictions. You may not offer or impose | ||
192 | any additional or different terms or conditions on, or | ||
193 | apply any Effective Technological Measures to, the | ||
194 | Licensed Material if doing so restricts exercise of the | ||
195 | Licensed Rights by any recipient of the Licensed | ||
196 | Material. | ||
197 | |||
198 | 6. No endorsement. Nothing in this Public License constitutes or | ||
199 | may be construed as permission to assert or imply that You | ||
200 | are, or that Your use of the Licensed Material is, connected | ||
201 | with, or sponsored, endorsed, or granted official status by, | ||
202 | the Licensor or others designated to receive attribution as | ||
203 | provided in Section 3(a)(1)(A)(i). | ||
204 | |||
205 | b. Other rights. | ||
206 | |||
207 | 1. Moral rights, such as the right of integrity, are not | ||
208 | licensed under this Public License, nor are publicity, | ||
209 | privacy, and/or other similar personality rights; however, to | ||
210 | the extent possible, the Licensor waives and/or agrees not to | ||
211 | assert any such rights held by the Licensor to the limited | ||
212 | extent necessary to allow You to exercise the Licensed | ||
213 | Rights, but not otherwise. | ||
214 | |||
215 | 2. Patent and trademark rights are not licensed under this | ||
216 | Public License. | ||
217 | |||
218 | 3. To the extent possible, the Licensor waives any right to | ||
219 | collect royalties from You for the exercise of the Licensed | ||
220 | Rights, whether directly or through a collecting society | ||
221 | under any voluntary or waivable statutory or compulsory | ||
222 | licensing scheme. In all other cases the Licensor expressly | ||
223 | reserves any right to collect such royalties. | ||
224 | |||
225 | |||
226 | Section 3 -- License Conditions. | ||
227 | |||
228 | Your exercise of the Licensed Rights is expressly made subject to the | ||
229 | following conditions. | ||
230 | |||
231 | a. Attribution. | ||
232 | |||
233 | 1. If You Share the Licensed Material (including in modified | ||
234 | form), You must: | ||
235 | |||
236 | a. retain the following if it is supplied by the Licensor | ||
237 | with the Licensed Material: | ||
238 | |||
239 | i. identification of the creator(s) of the Licensed | ||
240 | Material and any others designated to receive | ||
241 | attribution, in any reasonable manner requested by | ||
242 | the Licensor (including by pseudonym if | ||
243 | designated); | ||
244 | |||
245 | ii. a copyright notice; | ||
246 | |||
247 | iii. a notice that refers to this Public License; | ||
248 | |||
249 | iv. a notice that refers to the disclaimer of | ||
250 | warranties; | ||
251 | |||
252 | v. a URI or hyperlink to the Licensed Material to the | ||
253 | extent reasonably practicable; | ||
254 | |||
255 | b. indicate if You modified the Licensed Material and | ||
256 | retain an indication of any previous modifications; and | ||
257 | |||
258 | c. indicate the Licensed Material is licensed under this | ||
259 | Public License, and include the text of, or the URI or | ||
260 | hyperlink to, this Public License. | ||
261 | |||
262 | 2. You may satisfy the conditions in Section 3(a)(1) in any | ||
263 | reasonable manner based on the medium, means, and context in | ||
264 | which You Share the Licensed Material. For example, it may be | ||
265 | reasonable to satisfy the conditions by providing a URI or | ||
266 | hyperlink to a resource that includes the required | ||
267 | information. | ||
268 | |||
269 | 3. If requested by the Licensor, You must remove any of the | ||
270 | information required by Section 3(a)(1)(A) to the extent | ||
271 | reasonably practicable. | ||
272 | |||
273 | b. ShareAlike. | ||
274 | |||
275 | In addition to the conditions in Section 3(a), if You Share | ||
276 | Adapted Material You produce, the following conditions also apply. | ||
277 | |||
278 | 1. The Adapter's License You apply must be a Creative Commons | ||
279 | license with the same License Elements, this version or | ||
280 | later, or a BY-SA Compatible License. | ||
281 | |||
282 | 2. You must include the text of, or the URI or hyperlink to, the | ||
283 | Adapter's License You apply. You may satisfy this condition | ||
284 | in any reasonable manner based on the medium, means, and | ||
285 | context in which You Share Adapted Material. | ||
286 | |||
287 | 3. You may not offer or impose any additional or different terms | ||
288 | or conditions on, or apply any Effective Technological | ||
289 | Measures to, Adapted Material that restrict exercise of the | ||
290 | rights granted under the Adapter's License You apply. | ||
291 | |||
292 | |||
293 | Section 4 -- Sui Generis Database Rights. | ||
294 | |||
295 | Where the Licensed Rights include Sui Generis Database Rights that | ||
296 | apply to Your use of the Licensed Material: | ||
297 | |||
298 | a. for the avoidance of doubt, Section 2(a)(1) grants You the right | ||
299 | to extract, reuse, reproduce, and Share all or a substantial | ||
300 | portion of the contents of the database; | ||
301 | |||
302 | b. if You include all or a substantial portion of the database | ||
303 | contents in a database in which You have Sui Generis Database | ||
304 | Rights, then the database in which You have Sui Generis Database | ||
305 | Rights (but not its individual contents) is Adapted Material, | ||
306 | |||
307 | including for purposes of Section 3(b); and | ||
308 | c. You must comply with the conditions in Section 3(a) if You Share | ||
309 | all or a substantial portion of the contents of the database. | ||
310 | |||
311 | For the avoidance of doubt, this Section 4 supplements and does not | ||
312 | replace Your obligations under this Public License where the Licensed | ||
313 | Rights include other Copyright and Similar Rights. | ||
314 | |||
315 | |||
316 | Section 5 -- Disclaimer of Warranties and Limitation of Liability. | ||
317 | |||
318 | a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE | ||
319 | EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS | ||
320 | AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF | ||
321 | ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS, | ||
322 | IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION, | ||
323 | WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR | ||
324 | PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS, | ||
325 | ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT | ||
326 | KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT | ||
327 | ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU. | ||
328 | |||
329 | b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE | ||
330 | TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION, | ||
331 | NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT, | ||
332 | INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES, | ||
333 | COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR | ||
334 | USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN | ||
335 | ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR | ||
336 | DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR | ||
337 | IN PART, THIS LIMITATION MAY NOT APPLY TO YOU. | ||
338 | |||
339 | c. The disclaimer of warranties and limitation of liability provided | ||
340 | above shall be interpreted in a manner that, to the extent | ||
341 | possible, most closely approximates an absolute disclaimer and | ||
342 | waiver of all liability. | ||
343 | |||
344 | |||
345 | Section 6 -- Term and Termination. | ||
346 | |||
347 | a. This Public License applies for the term of the Copyright and | ||
348 | Similar Rights licensed here. However, if You fail to comply with | ||
349 | this Public License, then Your rights under this Public License | ||
350 | terminate automatically. | ||
351 | |||
352 | b. Where Your right to use the Licensed Material has terminated under | ||
353 | Section 6(a), it reinstates: | ||
354 | |||
355 | 1. automatically as of the date the violation is cured, provided | ||
356 | it is cured within 30 days of Your discovery of the | ||
357 | violation; or | ||
358 | |||
359 | 2. upon express reinstatement by the Licensor. | ||
360 | |||
361 | For the avoidance of doubt, this Section 6(b) does not affect any | ||
362 | right the Licensor may have to seek remedies for Your violations | ||
363 | of this Public License. | ||
364 | |||
365 | c. For the avoidance of doubt, the Licensor may also offer the | ||
366 | Licensed Material under separate terms or conditions or stop | ||
367 | distributing the Licensed Material at any time; however, doing so | ||
368 | will not terminate this Public License. | ||
369 | |||
370 | d. Sections 1, 5, 6, 7, and 8 survive termination of this Public | ||
371 | License. | ||
372 | |||
373 | |||
374 | Section 7 -- Other Terms and Conditions. | ||
375 | |||
376 | a. The Licensor shall not be bound by any additional or different | ||
377 | terms or conditions communicated by You unless expressly agreed. | ||
378 | |||
379 | b. Any arrangements, understandings, or agreements regarding the | ||
380 | Licensed Material not stated herein are separate from and | ||
381 | independent of the terms and conditions of this Public License. | ||
382 | |||
383 | |||
384 | Section 8 -- Interpretation. | ||
385 | |||
386 | a. For the avoidance of doubt, this Public License does not, and | ||
387 | shall not be interpreted to, reduce, limit, restrict, or impose | ||
388 | conditions on any use of the Licensed Material that could lawfully | ||
389 | be made without permission under this Public License. | ||
390 | |||
391 | b. To the extent possible, if any provision of this Public License is | ||
392 | deemed unenforceable, it shall be automatically reformed to the | ||
393 | minimum extent necessary to make it enforceable. If the provision | ||
394 | cannot be reformed, it shall be severed from this Public License | ||
395 | without affecting the enforceability of the remaining terms and | ||
396 | conditions. | ||
397 | |||
398 | c. No term or condition of this Public License will be waived and no | ||
399 | failure to comply consented to unless expressly agreed to by the | ||
400 | Licensor. | ||
401 | |||
402 | d. Nothing in this Public License constitutes or may be interpreted | ||
403 | as a limitation upon, or waiver of, any privileges and immunities | ||
404 | that apply to the Licensor or You, including from the legal | ||
405 | processes of any jurisdiction or authority. | ||
406 | |||
407 | |||
408 | ======================================================================= | ||
409 | |||
410 | Creative Commons is not a party to its public licenses. | ||
411 | Notwithstanding, Creative Commons may elect to apply one of its public | ||
412 | licenses to material it publishes and in those instances will be | ||
413 | considered the "Licensor." Except for the limited purpose of indicating | ||
414 | that material is shared under a Creative Commons public license or as | ||
415 | otherwise permitted by the Creative Commons policies published at | ||
416 | creativecommons.org/policies, Creative Commons does not authorize the | ||
417 | use of the trademark "Creative Commons" or any other trademark or logo | ||
418 | of Creative Commons without its prior written consent including, | ||
419 | without limitation, in connection with any unauthorized modifications | ||
420 | to any of its public licenses or any other arrangements, | ||
421 | understandings, or agreements concerning use of licensed material. For | ||
422 | the avoidance of doubt, this paragraph does not form part of the public | ||
423 | licenses. | ||
424 | |||
425 | Creative Commons may be contacted at creativecommons.org. | ||
diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/go-units/MAINTAINERS b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/go-units/MAINTAINERS new file mode 100644 index 0000000..477be8b --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/go-units/MAINTAINERS | |||
@@ -0,0 +1,27 @@ | |||
1 | # go-connections maintainers file | ||
2 | # | ||
3 | # This file describes who runs the docker/go-connections project and how. | ||
4 | # This is a living document - if you see something out of date or missing, speak up! | ||
5 | # | ||
6 | # It is structured to be consumable by both humans and programs. | ||
7 | # To extract its contents programmatically, use any TOML-compliant parser. | ||
8 | # | ||
9 | # This file is compiled into the MAINTAINERS file in docker/opensource. | ||
10 | # | ||
11 | [Org] | ||
12 | [Org."Core maintainers"] | ||
13 | people = [ | ||
14 | "calavera", | ||
15 | ] | ||
16 | |||
17 | [people] | ||
18 | |||
19 | # A reference list of all people associated with the project. | ||
20 | # All other sections should refer to people by their canonical key | ||
21 | # in the people section. | ||
22 | |||
23 | # ADD YOURSELF HERE IN ALPHABETICAL ORDER | ||
24 | [people.calavera] | ||
25 | Name = "David Calavera" | ||
26 | Email = "david.calavera@gmail.com" | ||
27 | GitHub = "calavera" | ||
diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/go-units/README.md b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/go-units/README.md new file mode 100644 index 0000000..3ce4d79 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/go-units/README.md | |||
@@ -0,0 +1,18 @@ | |||
1 | [![GoDoc](https://godoc.org/github.com/docker/go-units?status.svg)](https://godoc.org/github.com/docker/go-units) | ||
2 | |||
3 | # Introduction | ||
4 | |||
5 | go-units is a library to transform human friendly measurements into machine friendly values. | ||
6 | |||
7 | ## Usage | ||
8 | |||
9 | See the [docs in godoc](https://godoc.org/github.com/docker/go-units) for examples and documentation. | ||
10 | |||
11 | ## Copyright and license | ||
12 | |||
13 | Copyright © 2015 Docker, Inc. All rights reserved, except as follows. Code | ||
14 | is released under the Apache 2.0 license. The README.md file, and files in the | ||
15 | "docs" folder are licensed under the Creative Commons Attribution 4.0 | ||
16 | International License under the terms and conditions set forth in the file | ||
17 | "LICENSE.docs". You may obtain a duplicate copy of the same license, titled | ||
18 | CC-BY-SA-4.0, at http://creativecommons.org/licenses/by/4.0/. | ||
diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/go-units/circle.yml b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/go-units/circle.yml new file mode 100644 index 0000000..9043b35 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/go-units/circle.yml | |||
@@ -0,0 +1,11 @@ | |||
1 | dependencies: | ||
2 | post: | ||
3 | # install golint | ||
4 | - go get github.com/golang/lint/golint | ||
5 | |||
6 | test: | ||
7 | pre: | ||
8 | # run analysis before tests | ||
9 | - go vet ./... | ||
10 | - test -z "$(golint ./... | tee /dev/stderr)" | ||
11 | - test -z "$(gofmt -s -l . | tee /dev/stderr)" | ||
diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/go-units/duration.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/go-units/duration.go new file mode 100644 index 0000000..c219a8a --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/go-units/duration.go | |||
@@ -0,0 +1,33 @@ | |||
1 | // Package units provides helper function to parse and print size and time units | ||
2 | // in human-readable format. | ||
3 | package units | ||
4 | |||
5 | import ( | ||
6 | "fmt" | ||
7 | "time" | ||
8 | ) | ||
9 | |||
10 | // HumanDuration returns a human-readable approximation of a duration | ||
11 | // (eg. "About a minute", "4 hours ago", etc.). | ||
12 | func HumanDuration(d time.Duration) string { | ||
13 | if seconds := int(d.Seconds()); seconds < 1 { | ||
14 | return "Less than a second" | ||
15 | } else if seconds < 60 { | ||
16 | return fmt.Sprintf("%d seconds", seconds) | ||
17 | } else if minutes := int(d.Minutes()); minutes == 1 { | ||
18 | return "About a minute" | ||
19 | } else if minutes < 60 { | ||
20 | return fmt.Sprintf("%d minutes", minutes) | ||
21 | } else if hours := int(d.Hours()); hours == 1 { | ||
22 | return "About an hour" | ||
23 | } else if hours < 48 { | ||
24 | return fmt.Sprintf("%d hours", hours) | ||
25 | } else if hours < 24*7*2 { | ||
26 | return fmt.Sprintf("%d days", hours/24) | ||
27 | } else if hours < 24*30*3 { | ||
28 | return fmt.Sprintf("%d weeks", hours/24/7) | ||
29 | } else if hours < 24*365*2 { | ||
30 | return fmt.Sprintf("%d months", hours/24/30) | ||
31 | } | ||
32 | return fmt.Sprintf("%d years", int(d.Hours())/24/365) | ||
33 | } | ||
diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/go-units/size.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/go-units/size.go new file mode 100644 index 0000000..3b59daf --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/go-units/size.go | |||
@@ -0,0 +1,95 @@ | |||
1 | package units | ||
2 | |||
3 | import ( | ||
4 | "fmt" | ||
5 | "regexp" | ||
6 | "strconv" | ||
7 | "strings" | ||
8 | ) | ||
9 | |||
10 | // See: http://en.wikipedia.org/wiki/Binary_prefix | ||
11 | const ( | ||
12 | // Decimal | ||
13 | |||
14 | KB = 1000 | ||
15 | MB = 1000 * KB | ||
16 | GB = 1000 * MB | ||
17 | TB = 1000 * GB | ||
18 | PB = 1000 * TB | ||
19 | |||
20 | // Binary | ||
21 | |||
22 | KiB = 1024 | ||
23 | MiB = 1024 * KiB | ||
24 | GiB = 1024 * MiB | ||
25 | TiB = 1024 * GiB | ||
26 | PiB = 1024 * TiB | ||
27 | ) | ||
28 | |||
29 | type unitMap map[string]int64 | ||
30 | |||
31 | var ( | ||
32 | decimalMap = unitMap{"k": KB, "m": MB, "g": GB, "t": TB, "p": PB} | ||
33 | binaryMap = unitMap{"k": KiB, "m": MiB, "g": GiB, "t": TiB, "p": PiB} | ||
34 | sizeRegex = regexp.MustCompile(`^(\d+)([kKmMgGtTpP])?[bB]?$`) | ||
35 | ) | ||
36 | |||
37 | var decimapAbbrs = []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"} | ||
38 | var binaryAbbrs = []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"} | ||
39 | |||
40 | // CustomSize returns a human-readable approximation of a size | ||
41 | // using custom format. | ||
42 | func CustomSize(format string, size float64, base float64, _map []string) string { | ||
43 | i := 0 | ||
44 | for size >= base { | ||
45 | size = size / base | ||
46 | i++ | ||
47 | } | ||
48 | return fmt.Sprintf(format, size, _map[i]) | ||
49 | } | ||
50 | |||
51 | // HumanSize returns a human-readable approximation of a size | ||
52 | // capped at 4 valid numbers (eg. "2.746 MB", "796 KB"). | ||
53 | func HumanSize(size float64) string { | ||
54 | return CustomSize("%.4g %s", size, 1000.0, decimapAbbrs) | ||
55 | } | ||
56 | |||
57 | // BytesSize returns a human-readable size in bytes, kibibytes, | ||
58 | // mebibytes, gibibytes, or tebibytes (eg. "44kiB", "17MiB"). | ||
59 | func BytesSize(size float64) string { | ||
60 | return CustomSize("%.4g %s", size, 1024.0, binaryAbbrs) | ||
61 | } | ||
62 | |||
63 | // FromHumanSize returns an integer from a human-readable specification of a | ||
64 | // size using SI standard (eg. "44kB", "17MB"). | ||
65 | func FromHumanSize(size string) (int64, error) { | ||
66 | return parseSize(size, decimalMap) | ||
67 | } | ||
68 | |||
69 | // RAMInBytes parses a human-readable string representing an amount of RAM | ||
70 | // in bytes, kibibytes, mebibytes, gibibytes, or tebibytes and | ||
71 | // returns the number of bytes, or -1 if the string is unparseable. | ||
72 | // Units are case-insensitive, and the 'b' suffix is optional. | ||
73 | func RAMInBytes(size string) (int64, error) { | ||
74 | return parseSize(size, binaryMap) | ||
75 | } | ||
76 | |||
77 | // Parses the human-readable size string into the amount it represents. | ||
78 | func parseSize(sizeStr string, uMap unitMap) (int64, error) { | ||
79 | matches := sizeRegex.FindStringSubmatch(sizeStr) | ||
80 | if len(matches) != 3 { | ||
81 | return -1, fmt.Errorf("invalid size: '%s'", sizeStr) | ||
82 | } | ||
83 | |||
84 | size, err := strconv.ParseInt(matches[1], 10, 0) | ||
85 | if err != nil { | ||
86 | return -1, err | ||
87 | } | ||
88 | |||
89 | unitPrefix := strings.ToLower(matches[2]) | ||
90 | if mul, ok := uMap[unitPrefix]; ok { | ||
91 | size *= mul | ||
92 | } | ||
93 | |||
94 | return size, nil | ||
95 | } | ||
diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/go-units/ulimit.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/go-units/ulimit.go new file mode 100644 index 0000000..5ac7fd8 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/go-units/ulimit.go | |||
@@ -0,0 +1,118 @@ | |||
1 | package units | ||
2 | |||
3 | import ( | ||
4 | "fmt" | ||
5 | "strconv" | ||
6 | "strings" | ||
7 | ) | ||
8 | |||
9 | // Ulimit is a human friendly version of Rlimit. | ||
10 | type Ulimit struct { | ||
11 | Name string | ||
12 | Hard int64 | ||
13 | Soft int64 | ||
14 | } | ||
15 | |||
16 | // Rlimit specifies the resource limits, such as max open files. | ||
17 | type Rlimit struct { | ||
18 | Type int `json:"type,omitempty"` | ||
19 | Hard uint64 `json:"hard,omitempty"` | ||
20 | Soft uint64 `json:"soft,omitempty"` | ||
21 | } | ||
22 | |||
23 | const ( | ||
24 | // magic numbers for making the syscall | ||
25 | // some of these are defined in the syscall package, but not all. | ||
26 | // Also since Windows client doesn't get access to the syscall package, need to | ||
27 | // define these here | ||
28 | rlimitAs = 9 | ||
29 | rlimitCore = 4 | ||
30 | rlimitCPU = 0 | ||
31 | rlimitData = 2 | ||
32 | rlimitFsize = 1 | ||
33 | rlimitLocks = 10 | ||
34 | rlimitMemlock = 8 | ||
35 | rlimitMsgqueue = 12 | ||
36 | rlimitNice = 13 | ||
37 | rlimitNofile = 7 | ||
38 | rlimitNproc = 6 | ||
39 | rlimitRss = 5 | ||
40 | rlimitRtprio = 14 | ||
41 | rlimitRttime = 15 | ||
42 | rlimitSigpending = 11 | ||
43 | rlimitStack = 3 | ||
44 | ) | ||
45 | |||
46 | var ulimitNameMapping = map[string]int{ | ||
47 | //"as": rlimitAs, // Disabled since this doesn't seem usable with the way Docker inits a container. | ||
48 | "core": rlimitCore, | ||
49 | "cpu": rlimitCPU, | ||
50 | "data": rlimitData, | ||
51 | "fsize": rlimitFsize, | ||
52 | "locks": rlimitLocks, | ||
53 | "memlock": rlimitMemlock, | ||
54 | "msgqueue": rlimitMsgqueue, | ||
55 | "nice": rlimitNice, | ||
56 | "nofile": rlimitNofile, | ||
57 | "nproc": rlimitNproc, | ||
58 | "rss": rlimitRss, | ||
59 | "rtprio": rlimitRtprio, | ||
60 | "rttime": rlimitRttime, | ||
61 | "sigpending": rlimitSigpending, | ||
62 | "stack": rlimitStack, | ||
63 | } | ||
64 | |||
65 | // ParseUlimit parses and returns a Ulimit from the specified string. | ||
66 | func ParseUlimit(val string) (*Ulimit, error) { | ||
67 | parts := strings.SplitN(val, "=", 2) | ||
68 | if len(parts) != 2 { | ||
69 | return nil, fmt.Errorf("invalid ulimit argument: %s", val) | ||
70 | } | ||
71 | |||
72 | if _, exists := ulimitNameMapping[parts[0]]; !exists { | ||
73 | return nil, fmt.Errorf("invalid ulimit type: %s", parts[0]) | ||
74 | } | ||
75 | |||
76 | var ( | ||
77 | soft int64 | ||
78 | hard = &soft // default to soft in case no hard was set | ||
79 | temp int64 | ||
80 | err error | ||
81 | ) | ||
82 | switch limitVals := strings.Split(parts[1], ":"); len(limitVals) { | ||
83 | case 2: | ||
84 | temp, err = strconv.ParseInt(limitVals[1], 10, 64) | ||
85 | if err != nil { | ||
86 | return nil, err | ||
87 | } | ||
88 | hard = &temp | ||
89 | fallthrough | ||
90 | case 1: | ||
91 | soft, err = strconv.ParseInt(limitVals[0], 10, 64) | ||
92 | if err != nil { | ||
93 | return nil, err | ||
94 | } | ||
95 | default: | ||
96 | return nil, fmt.Errorf("too many limit value arguments - %s, can only have up to two, `soft[:hard]`", parts[1]) | ||
97 | } | ||
98 | |||
99 | if soft > *hard { | ||
100 | return nil, fmt.Errorf("ulimit soft limit must be less than or equal to hard limit: %d > %d", soft, *hard) | ||
101 | } | ||
102 | |||
103 | return &Ulimit{Name: parts[0], Soft: soft, Hard: *hard}, nil | ||
104 | } | ||
105 | |||
106 | // GetRlimit returns the RLimit corresponding to Ulimit. | ||
107 | func (u *Ulimit) GetRlimit() (*Rlimit, error) { | ||
108 | t, exists := ulimitNameMapping[u.Name] | ||
109 | if !exists { | ||
110 | return nil, fmt.Errorf("invalid ulimit name %s", u.Name) | ||
111 | } | ||
112 | |||
113 | return &Rlimit{Type: t, Soft: uint64(u.Soft), Hard: uint64(u.Hard)}, nil | ||
114 | } | ||
115 | |||
116 | func (u *Ulimit) String() string { | ||
117 | return fmt.Sprintf("%s=%d:%d", u.Name, u.Soft, u.Hard) | ||
118 | } | ||