aboutsummaryrefslogtreecommitdiffhomepage
path: root/vendor/github.com/ulikunitz
diff options
context:
space:
mode:
Diffstat (limited to 'vendor/github.com/ulikunitz')
-rw-r--r--vendor/github.com/ulikunitz/xz/.gitignore25
-rw-r--r--vendor/github.com/ulikunitz/xz/LICENSE26
-rw-r--r--vendor/github.com/ulikunitz/xz/README.md71
-rw-r--r--vendor/github.com/ulikunitz/xz/TODO.md315
-rw-r--r--vendor/github.com/ulikunitz/xz/bits.go74
-rw-r--r--vendor/github.com/ulikunitz/xz/crc.go54
-rw-r--r--vendor/github.com/ulikunitz/xz/example.go40
-rw-r--r--vendor/github.com/ulikunitz/xz/format.go728
-rw-r--r--vendor/github.com/ulikunitz/xz/fox.xzbin0 -> 104 bytes
-rw-r--r--vendor/github.com/ulikunitz/xz/internal/hash/cyclic_poly.go181
-rw-r--r--vendor/github.com/ulikunitz/xz/internal/hash/doc.go14
-rw-r--r--vendor/github.com/ulikunitz/xz/internal/hash/rabin_karp.go66
-rw-r--r--vendor/github.com/ulikunitz/xz/internal/hash/roller.go29
-rw-r--r--vendor/github.com/ulikunitz/xz/internal/xlog/xlog.go457
-rw-r--r--vendor/github.com/ulikunitz/xz/lzma/bintree.go523
-rw-r--r--vendor/github.com/ulikunitz/xz/lzma/bitops.go45
-rw-r--r--vendor/github.com/ulikunitz/xz/lzma/breader.go39
-rw-r--r--vendor/github.com/ulikunitz/xz/lzma/buffer.go171
-rw-r--r--vendor/github.com/ulikunitz/xz/lzma/bytewriter.go37
-rw-r--r--vendor/github.com/ulikunitz/xz/lzma/decoder.go277
-rw-r--r--vendor/github.com/ulikunitz/xz/lzma/decoderdict.go135
-rw-r--r--vendor/github.com/ulikunitz/xz/lzma/directcodec.go49
-rw-r--r--vendor/github.com/ulikunitz/xz/lzma/distcodec.go156
-rw-r--r--vendor/github.com/ulikunitz/xz/lzma/encoder.go268
-rw-r--r--vendor/github.com/ulikunitz/xz/lzma/encoderdict.go149
-rw-r--r--vendor/github.com/ulikunitz/xz/lzma/fox.lzmabin0 -> 67 bytes
-rw-r--r--vendor/github.com/ulikunitz/xz/lzma/hashtable.go309
-rw-r--r--vendor/github.com/ulikunitz/xz/lzma/header.go167
-rw-r--r--vendor/github.com/ulikunitz/xz/lzma/header2.go398
-rw-r--r--vendor/github.com/ulikunitz/xz/lzma/lengthcodec.go129
-rw-r--r--vendor/github.com/ulikunitz/xz/lzma/literalcodec.go132
-rw-r--r--vendor/github.com/ulikunitz/xz/lzma/matchalgorithm.go52
-rw-r--r--vendor/github.com/ulikunitz/xz/lzma/operation.go80
-rw-r--r--vendor/github.com/ulikunitz/xz/lzma/prob.go53
-rw-r--r--vendor/github.com/ulikunitz/xz/lzma/properties.go69
-rw-r--r--vendor/github.com/ulikunitz/xz/lzma/rangecodec.go248
-rw-r--r--vendor/github.com/ulikunitz/xz/lzma/reader.go100
-rw-r--r--vendor/github.com/ulikunitz/xz/lzma/reader2.go232
-rw-r--r--vendor/github.com/ulikunitz/xz/lzma/state.go151
-rw-r--r--vendor/github.com/ulikunitz/xz/lzma/treecodecs.go133
-rw-r--r--vendor/github.com/ulikunitz/xz/lzma/writer.go209
-rw-r--r--vendor/github.com/ulikunitz/xz/lzma/writer2.go305
-rw-r--r--vendor/github.com/ulikunitz/xz/lzmafilter.go117
-rw-r--r--vendor/github.com/ulikunitz/xz/make-docs5
-rw-r--r--vendor/github.com/ulikunitz/xz/reader.go373
-rw-r--r--vendor/github.com/ulikunitz/xz/writer.go386
46 files changed, 7577 insertions, 0 deletions
diff --git a/vendor/github.com/ulikunitz/xz/.gitignore b/vendor/github.com/ulikunitz/xz/.gitignore
new file mode 100644
index 0000000..e3c2fc2
--- /dev/null
+++ b/vendor/github.com/ulikunitz/xz/.gitignore
@@ -0,0 +1,25 @@
1# .gitignore
2
3TODO.html
4README.html
5
6lzma/writer.txt
7lzma/reader.txt
8
9cmd/gxz/gxz
10cmd/xb/xb
11
12# test executables
13*.test
14
15# profile files
16*.out
17
18# vim swap file
19.*.swp
20
21# executables on windows
22*.exe
23
24# default compression test file
25enwik8*
diff --git a/vendor/github.com/ulikunitz/xz/LICENSE b/vendor/github.com/ulikunitz/xz/LICENSE
new file mode 100644
index 0000000..58ebdc1
--- /dev/null
+++ b/vendor/github.com/ulikunitz/xz/LICENSE
@@ -0,0 +1,26 @@
1Copyright (c) 2014-2016 Ulrich Kunitz
2All rights reserved.
3
4Redistribution and use in source and binary forms, with or without
5modification, are permitted provided that the following conditions are met:
6
7* Redistributions of source code must retain the above copyright notice, this
8 list of conditions and the following disclaimer.
9
10* Redistributions in binary form must reproduce the above copyright notice,
11 this list of conditions and the following disclaimer in the documentation
12 and/or other materials provided with the distribution.
13
14* My name, Ulrich Kunitz, may not be used to endorse or promote products
15 derived from this software without specific prior written permission.
16
17THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
20DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
21FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
23SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
24CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
25OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/ulikunitz/xz/README.md b/vendor/github.com/ulikunitz/xz/README.md
new file mode 100644
index 0000000..969ae7a
--- /dev/null
+++ b/vendor/github.com/ulikunitz/xz/README.md
@@ -0,0 +1,71 @@
1# Package xz
2
3This Go language package supports the reading and writing of xz
4compressed streams. It includes also a gxz command for compressing and
5decompressing data. The package is completely written in Go and doesn't
6have any dependency on any C code.
7
8The package is currently under development. There might be bugs and APIs
9are not considered stable. At this time the package cannot compete with
10the xz tool regarding compression speed and size. The algorithms there
11have been developed over a long time and are highly optimized. However
12there are a number of improvements planned and I'm very optimistic about
13parallel compression and decompression. Stay tuned!
14
15# Using the API
16
17The following example program shows how to use the API.
18
19 package main
20
21 import (
22 "bytes"
23 "io"
24 "log"
25 "os"
26
27 "github.com/ulikunitz/xz"
28 )
29
30 func main() {
31 const text = "The quick brown fox jumps over the lazy dog.\n"
32 var buf bytes.Buffer
33 // compress text
34 w, err := xz.NewWriter(&buf)
35 if err != nil {
36 log.Fatalf("xz.NewWriter error %s", err)
37 }
38 if _, err := io.WriteString(w, text); err != nil {
39 log.Fatalf("WriteString error %s", err)
40 }
41 if err := w.Close(); err != nil {
42 log.Fatalf("w.Close error %s", err)
43 }
44 // decompress buffer and write output to stdout
45 r, err := xz.NewReader(&buf)
46 if err != nil {
47 log.Fatalf("NewReader error %s", err)
48 }
49 if _, err = io.Copy(os.Stdout, r); err != nil {
50 log.Fatalf("io.Copy error %s", err)
51 }
52 }
53
54# Using the gxz compression tool
55
56The package includes a gxz command line utility for compression and
57decompression.
58
59Use following command for installation:
60
61 $ go get github.com/ulikunitz/xz/cmd/gxz
62
63To test it call the following command.
64
65 $ gxz bigfile
66
67After some time a much smaller file bigfile.xz will replace bigfile.
68To decompress it use the following command.
69
70 $ gxz -d bigfile.xz
71
diff --git a/vendor/github.com/ulikunitz/xz/TODO.md b/vendor/github.com/ulikunitz/xz/TODO.md
new file mode 100644
index 0000000..7b34c0c
--- /dev/null
+++ b/vendor/github.com/ulikunitz/xz/TODO.md
@@ -0,0 +1,315 @@
1# TODO list
2
3## Release v0.6
4
51. Review encoder and check for lzma improvements under xz.
62. Fix binary tree matcher.
73. Compare compression ratio with xz tool using comparable parameters
8 and optimize parameters
94. Do some optimizations
10 - rename operation action and make it a simple type of size 8
11 - make maxMatches, wordSize parameters
12 - stop searching after a certain length is found (parameter sweetLen)
13
14## Release v0.7
15
161. Optimize code
172. Do statistical analysis to get linear presets.
183. Test sync.Pool compatability for xz and lzma Writer and Reader
193. Fuzz optimized code.
20
21## Release v0.8
22
231. Support parallel go routines for writing and reading xz files.
242. Support a ReaderAt interface for xz files with small block sizes.
253. Improve compatibility between gxz and xz
264. Provide manual page for gxz
27
28## Release v0.9
29
301. Improve documentation
312. Fuzz again
32
33## Release v1.0
34
351. Full functioning gxz
362. Add godoc URL to README.md (godoc.org)
373. Resolve all issues.
384. Define release candidates.
395. Public announcement.
40
41## Package lzma
42
43### Release v0.6
44
45- Rewrite Encoder into a simple greedy one-op-at-a-time encoder
46 including
47 + simple scan at the dictionary head for the same byte
48 + use the killer byte (requiring matches to get longer, the first
49 test should be the byte that would make the match longer)
50
51
52## Optimizations
53
54- There may be a lot of false sharing in lzma.State; check whether this
55 can be improved by reorganizing the internal structure of it.
56- Check whether batching encoding and decoding improves speed.
57
58### DAG optimizations
59
60- Use full buffer to create minimal bit-length above range encoder.
61- Might be too slow (see v0.4)
62
63### Different match finders
64
65- hashes with 2, 3 characters additional to 4 characters
66- binary trees with 2-7 characters (uint64 as key, use uint32 as
67 pointers into a an array)
68- rb-trees with 2-7 characters (uint64 as key, use uint32 as pointers
69 into an array with bit-steeling for the colors)
70
71## Release Procedure
72
73- execute goch -l for all packages; probably with lower param like 0.5.
74- check orthography with gospell
75- Write release notes in doc/relnotes.
76- Update README.md
77- xb copyright . in xz directory to ensure all new files have Copyright
78 header
79- VERSION=<version> go generate github.com/ulikunitz/xz/... to update
80 version files
81- Execute test for Linux/amd64, Linux/x86 and Windows/amd64.
82- Update TODO.md - write short log entry
83- git checkout master && git merge dev
84- git tag -a <version>
85- git push
86
87## Log
88
89### 2017-06-05
90
91Release v0.5.4 fixes issues #15 of another problem with the padding size
92check for the xz block header. I removed the check completely.
93
94### 2017-02-15
95
96Release v0.5.3 fixes issue #12 regarding the decompression of an empty
97XZ stream. Many thanks to Tomasz KÅ‚ak, who reported the issue.
98
99### 2016-12-02
100
101Release v0.5.2 became necessary to allow the decoding of xz files with
1024-byte padding in the block header. Many thanks to Greg, who reported
103the issue.
104
105### 2016-07-23
106
107Release v0.5.1 became necessary to fix problems with 32-bit platforms.
108Many thanks to Bruno Brigas, who reported the issue.
109
110### 2016-07-04
111
112Release v0.5 provides improvements to the compressor and provides support for
113the decompression of xz files with multiple xz streams.
114
115### 2016-01-31
116
117Another compression rate increase by checking the byte at length of the
118best match first, before checking the whole prefix. This makes the
119compressor even faster. We have now a large time budget to beat the
120compression ratio of the xz tool. For enwik8 we have now over 40 seconds
121to reduce the compressed file size for another 7 MiB.
122
123### 2016-01-30
124
125I simplified the encoder. Speed and compression rate increased
126dramatically. A high compression rate affects also the decompression
127speed. The approach with the buffer and optimizing for operation
128compression rate has not been successful. Going for the maximum length
129appears to be the best approach.
130
131### 2016-01-28
132
133The release v0.4 is ready. It provides a working xz implementation,
134which is rather slow, but works and is interoperable with the xz tool.
135It is an important milestone.
136
137### 2016-01-10
138
139I have the first working implementation of an xz reader and writer. I'm
140happy about reaching this milestone.
141
142### 2015-12-02
143
144I'm now ready to implement xz because, I have a working LZMA2
145implementation. I decided today that v0.4 will use the slow encoder
146using the operations buffer to be able to go back, if I intend to do so.
147
148### 2015-10-21
149
150I have restarted the work on the library. While trying to implement
151LZMA2, I discovered that I need to resimplify the encoder and decoder
152functions. The option approach is too complicated. Using a limited byte
153writer and not caring for written bytes at all and not to try to handle
154uncompressed data simplifies the LZMA encoder and decoder much.
155Processing uncompressed data and handling limits is a feature of the
156LZMA2 format not of LZMA.
157
158I learned an interesting method from the LZO format. If the last copy is
159too far away they are moving the head one 2 bytes and not 1 byte to
160reduce processing times.
161
162### 2015-08-26
163
164I have now reimplemented the lzma package. The code is reasonably fast,
165but can still be optimized. The next step is to implement LZMA2 and then
166xz.
167
168### 2015-07-05
169
170Created release v0.3. The version is the foundation for a full xz
171implementation that is the target of v0.4.
172
173### 2015-06-11
174
175The gflag package has been developed because I couldn't use flag and
176pflag for a fully compatible support of gzip's and lzma's options. It
177seems to work now quite nicely.
178
179### 2015-06-05
180
181The overflow issue was interesting to research, however Henry S. Warren
182Jr. Hacker's Delight book was very helpful as usual and had the issue
183explained perfectly. Fefe's information on his website was based on the
184C FAQ and quite bad, because it didn't address the issue of -MININT ==
185MININT.
186
187### 2015-06-04
188
189It has been a productive day. I improved the interface of lzma.Reader
190and lzma.Writer and fixed the error handling.
191
192### 2015-06-01
193
194By computing the bit length of the LZMA operations I was able to
195improve the greedy algorithm implementation. By using an 8 MByte buffer
196the compression rate was not as good as for xz but already better then
197gzip default.
198
199Compression is currently slow, but this is something we will be able to
200improve over time.
201
202### 2015-05-26
203
204Checked the license of ogier/pflag. The binary lzmago binary should
205include the license terms for the pflag library.
206
207I added the endorsement clause as used by Google for the Go sources the
208LICENSE file.
209
210### 2015-05-22
211
212The package lzb contains now the basic implementation for creating or
213reading LZMA byte streams. It allows the support for the implementation
214of the DAG-shortest-path algorithm for the compression function.
215
216### 2015-04-23
217
218Completed yesterday the lzbase classes. I'm a little bit concerned that
219using the components may require too much code, but on the other hand
220there is a lot of flexibility.
221
222### 2015-04-22
223
224Implemented Reader and Writer during the Bayern game against Porto. The
225second half gave me enough time.
226
227### 2015-04-21
228
229While showering today morning I discovered that the design for OpEncoder
230and OpDecoder doesn't work, because encoding/decoding might depend on
231the current status of the dictionary. This is not exactly the right way
232to start the day.
233
234Therefore we need to keep the Reader and Writer design. This time around
235we simplify it by ignoring size limits. These can be added by wrappers
236around the Reader and Writer interfaces. The Parameters type isn't
237needed anymore.
238
239However I will implement a ReaderState and WriterState type to use
240static typing to ensure the right State object is combined with the
241right lzbase.Reader and lzbase.Writer.
242
243As a start I have implemented ReaderState and WriterState to ensure
244that the state for reading is only used by readers and WriterState only
245used by Writers.
246
247### 2015-04-20
248
249Today I implemented the OpDecoder and tested OpEncoder and OpDecoder.
250
251### 2015-04-08
252
253Came up with a new simplified design for lzbase. I implemented already
254the type State that replaces OpCodec.
255
256### 2015-04-06
257
258The new lzma package is now fully usable and lzmago is using it now. The
259old lzma package has been completely removed.
260
261### 2015-04-05
262
263Implemented lzma.Reader and tested it.
264
265### 2015-04-04
266
267Implemented baseReader by adapting code form lzma.Reader.
268
269### 2015-04-03
270
271The opCodec has been copied yesterday to lzma2. opCodec has a high
272number of dependencies on other files in lzma2. Therefore I had to copy
273almost all files from lzma.
274
275### 2015-03-31
276
277Removed only a TODO item.
278
279However in Francesco Campoy's presentation "Go for Javaneros
280(Javaïstes?)" is the the idea that using an embedded field E, all the
281methods of E will be defined on T. If E is an interface T satisfies E.
282
283https://talks.golang.org/2014/go4java.slide#51
284
285I have never used this, but it seems to be a cool idea.
286
287### 2015-03-30
288
289Finished the type writerDict and wrote a simple test.
290
291### 2015-03-25
292
293I started to implement the writerDict.
294
295### 2015-03-24
296
297After thinking long about the LZMA2 code and several false starts, I
298have now a plan to create a self-sufficient lzma2 package that supports
299the classic LZMA format as well as LZMA2. The core idea is to support a
300baseReader and baseWriter type that support the basic LZMA stream
301without any headers. Both types must support the reuse of dictionaries
302and the opCodec.
303
304### 2015-01-10
305
3061. Implemented simple lzmago tool
3072. Tested tool against large 4.4G file
308 - compression worked correctly; tested decompression with lzma
309 - decompression hits a full buffer condition
3103. Fixed a bug in the compressor and wrote a test for it
3114. Executed full cycle for 4.4 GB file; performance can be improved ;-)
312
313### 2015-01-11
314
315- Release v0.2 because of the working LZMA encoder and decoder
diff --git a/vendor/github.com/ulikunitz/xz/bits.go b/vendor/github.com/ulikunitz/xz/bits.go
new file mode 100644
index 0000000..fadc1a5
--- /dev/null
+++ b/vendor/github.com/ulikunitz/xz/bits.go
@@ -0,0 +1,74 @@
1// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package xz
6
7import (
8 "errors"
9 "io"
10)
11
12// putUint32LE puts the little-endian representation of x into the first
13// four bytes of p.
14func putUint32LE(p []byte, x uint32) {
15 p[0] = byte(x)
16 p[1] = byte(x >> 8)
17 p[2] = byte(x >> 16)
18 p[3] = byte(x >> 24)
19}
20
21// putUint64LE puts the little-endian representation of x into the first
22// eight bytes of p.
23func putUint64LE(p []byte, x uint64) {
24 p[0] = byte(x)
25 p[1] = byte(x >> 8)
26 p[2] = byte(x >> 16)
27 p[3] = byte(x >> 24)
28 p[4] = byte(x >> 32)
29 p[5] = byte(x >> 40)
30 p[6] = byte(x >> 48)
31 p[7] = byte(x >> 56)
32}
33
34// uint32LE converts a little endian representation to an uint32 value.
35func uint32LE(p []byte) uint32 {
36 return uint32(p[0]) | uint32(p[1])<<8 | uint32(p[2])<<16 |
37 uint32(p[3])<<24
38}
39
40// putUvarint puts a uvarint representation of x into the byte slice.
41func putUvarint(p []byte, x uint64) int {
42 i := 0
43 for x >= 0x80 {
44 p[i] = byte(x) | 0x80
45 x >>= 7
46 i++
47 }
48 p[i] = byte(x)
49 return i + 1
50}
51
52// errOverflow indicates an overflow of the 64-bit unsigned integer.
53var errOverflowU64 = errors.New("xz: uvarint overflows 64-bit unsigned integer")
54
55// readUvarint reads a uvarint from the given byte reader.
56func readUvarint(r io.ByteReader) (x uint64, n int, err error) {
57 var s uint
58 i := 0
59 for {
60 b, err := r.ReadByte()
61 if err != nil {
62 return x, i, err
63 }
64 i++
65 if b < 0x80 {
66 if i > 10 || i == 10 && b > 1 {
67 return x, i, errOverflowU64
68 }
69 return x | uint64(b)<<s, i, nil
70 }
71 x |= uint64(b&0x7f) << s
72 s += 7
73 }
74}
diff --git a/vendor/github.com/ulikunitz/xz/crc.go b/vendor/github.com/ulikunitz/xz/crc.go
new file mode 100644
index 0000000..b44dca9
--- /dev/null
+++ b/vendor/github.com/ulikunitz/xz/crc.go
@@ -0,0 +1,54 @@
1// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package xz
6
7import (
8 "hash"
9 "hash/crc32"
10 "hash/crc64"
11)
12
13// crc32Hash implements the hash.Hash32 interface with Sum returning the
14// crc32 value in little-endian encoding.
15type crc32Hash struct {
16 hash.Hash32
17}
18
19// Sum returns the crc32 value as little endian.
20func (h crc32Hash) Sum(b []byte) []byte {
21 p := make([]byte, 4)
22 putUint32LE(p, h.Hash32.Sum32())
23 b = append(b, p...)
24 return b
25}
26
27// newCRC32 returns a CRC-32 hash that returns the 64-bit value in
28// little-endian encoding using the IEEE polynomial.
29func newCRC32() hash.Hash {
30 return crc32Hash{Hash32: crc32.NewIEEE()}
31}
32
33// crc64Hash implements the Hash64 interface with Sum returning the
34// CRC-64 value in little-endian encoding.
35type crc64Hash struct {
36 hash.Hash64
37}
38
39// Sum returns the CRC-64 value in little-endian encoding.
40func (h crc64Hash) Sum(b []byte) []byte {
41 p := make([]byte, 8)
42 putUint64LE(p, h.Hash64.Sum64())
43 b = append(b, p...)
44 return b
45}
46
47// crc64Table is used to create a CRC-64 hash.
48var crc64Table = crc64.MakeTable(crc64.ECMA)
49
50// newCRC64 returns a CRC-64 hash that returns the 64-bit value in
51// little-endian encoding using the ECMA polynomial.
52func newCRC64() hash.Hash {
53 return crc64Hash{Hash64: crc64.New(crc64Table)}
54}
diff --git a/vendor/github.com/ulikunitz/xz/example.go b/vendor/github.com/ulikunitz/xz/example.go
new file mode 100644
index 0000000..855e60a
--- /dev/null
+++ b/vendor/github.com/ulikunitz/xz/example.go
@@ -0,0 +1,40 @@
1// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5// +build ignore
6
7package main
8
9import (
10 "bytes"
11 "io"
12 "log"
13 "os"
14
15 "github.com/ulikunitz/xz"
16)
17
18func main() {
19 const text = "The quick brown fox jumps over the lazy dog.\n"
20 var buf bytes.Buffer
21 // compress text
22 w, err := xz.NewWriter(&buf)
23 if err != nil {
24 log.Fatalf("xz.NewWriter error %s", err)
25 }
26 if _, err := io.WriteString(w, text); err != nil {
27 log.Fatalf("WriteString error %s", err)
28 }
29 if err := w.Close(); err != nil {
30 log.Fatalf("w.Close error %s", err)
31 }
32 // decompress buffer and write output to stdout
33 r, err := xz.NewReader(&buf)
34 if err != nil {
35 log.Fatalf("NewReader error %s", err)
36 }
37 if _, err = io.Copy(os.Stdout, r); err != nil {
38 log.Fatalf("io.Copy error %s", err)
39 }
40}
diff --git a/vendor/github.com/ulikunitz/xz/format.go b/vendor/github.com/ulikunitz/xz/format.go
new file mode 100644
index 0000000..798159c
--- /dev/null
+++ b/vendor/github.com/ulikunitz/xz/format.go
@@ -0,0 +1,728 @@
1// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package xz
6
7import (
8 "bytes"
9 "crypto/sha256"
10 "errors"
11 "fmt"
12 "hash"
13 "hash/crc32"
14 "io"
15
16 "github.com/ulikunitz/xz/lzma"
17)
18
19// allZeros checks whether a given byte slice has only zeros.
20func allZeros(p []byte) bool {
21 for _, c := range p {
22 if c != 0 {
23 return false
24 }
25 }
26 return true
27}
28
29// padLen returns the length of the padding required for the given
30// argument.
31func padLen(n int64) int {
32 k := int(n % 4)
33 if k > 0 {
34 k = 4 - k
35 }
36 return k
37}
38
39/*** Header ***/
40
41// headerMagic stores the magic bytes for the header
42var headerMagic = []byte{0xfd, '7', 'z', 'X', 'Z', 0x00}
43
44// HeaderLen provides the length of the xz file header.
45const HeaderLen = 12
46
47// Constants for the checksum methods supported by xz.
48const (
49 CRC32 byte = 0x1
50 CRC64 = 0x4
51 SHA256 = 0xa
52)
53
54// errInvalidFlags indicates that flags are invalid.
55var errInvalidFlags = errors.New("xz: invalid flags")
56
57// verifyFlags returns the error errInvalidFlags if the value is
58// invalid.
59func verifyFlags(flags byte) error {
60 switch flags {
61 case CRC32, CRC64, SHA256:
62 return nil
63 default:
64 return errInvalidFlags
65 }
66}
67
68// flagstrings maps flag values to strings.
69var flagstrings = map[byte]string{
70 CRC32: "CRC-32",
71 CRC64: "CRC-64",
72 SHA256: "SHA-256",
73}
74
75// flagString returns the string representation for the given flags.
76func flagString(flags byte) string {
77 s, ok := flagstrings[flags]
78 if !ok {
79 return "invalid"
80 }
81 return s
82}
83
84// newHashFunc returns a function that creates hash instances for the
85// hash method encoded in flags.
86func newHashFunc(flags byte) (newHash func() hash.Hash, err error) {
87 switch flags {
88 case CRC32:
89 newHash = newCRC32
90 case CRC64:
91 newHash = newCRC64
92 case SHA256:
93 newHash = sha256.New
94 default:
95 err = errInvalidFlags
96 }
97 return
98}
99
100// header provides the actual content of the xz file header: the flags.
101type header struct {
102 flags byte
103}
104
105// Errors returned by readHeader.
106var errHeaderMagic = errors.New("xz: invalid header magic bytes")
107
108// ValidHeader checks whether data is a correct xz file header. The
109// length of data must be HeaderLen.
110func ValidHeader(data []byte) bool {
111 var h header
112 err := h.UnmarshalBinary(data)
113 return err == nil
114}
115
116// String returns a string representation of the flags.
117func (h header) String() string {
118 return flagString(h.flags)
119}
120
121// UnmarshalBinary reads header from the provided data slice.
122func (h *header) UnmarshalBinary(data []byte) error {
123 // header length
124 if len(data) != HeaderLen {
125 return errors.New("xz: wrong file header length")
126 }
127
128 // magic header
129 if !bytes.Equal(headerMagic, data[:6]) {
130 return errHeaderMagic
131 }
132
133 // checksum
134 crc := crc32.NewIEEE()
135 crc.Write(data[6:8])
136 if uint32LE(data[8:]) != crc.Sum32() {
137 return errors.New("xz: invalid checksum for file header")
138 }
139
140 // stream flags
141 if data[6] != 0 {
142 return errInvalidFlags
143 }
144 flags := data[7]
145 if err := verifyFlags(flags); err != nil {
146 return err
147 }
148
149 h.flags = flags
150 return nil
151}
152
153// MarshalBinary generates the xz file header.
154func (h *header) MarshalBinary() (data []byte, err error) {
155 if err = verifyFlags(h.flags); err != nil {
156 return nil, err
157 }
158
159 data = make([]byte, 12)
160 copy(data, headerMagic)
161 data[7] = h.flags
162
163 crc := crc32.NewIEEE()
164 crc.Write(data[6:8])
165 putUint32LE(data[8:], crc.Sum32())
166
167 return data, nil
168}
169
170/*** Footer ***/
171
172// footerLen defines the length of the footer.
173const footerLen = 12
174
175// footerMagic contains the footer magic bytes.
176var footerMagic = []byte{'Y', 'Z'}
177
178// footer represents the content of the xz file footer.
179type footer struct {
180 indexSize int64
181 flags byte
182}
183
184// String prints a string representation of the footer structure.
185func (f footer) String() string {
186 return fmt.Sprintf("%s index size %d", flagString(f.flags), f.indexSize)
187}
188
189// Minimum and maximum for the size of the index (backward size).
190const (
191 minIndexSize = 4
192 maxIndexSize = (1 << 32) * 4
193)
194
195// MarshalBinary converts footer values into an xz file footer. Note
196// that the footer value is checked for correctness.
197func (f *footer) MarshalBinary() (data []byte, err error) {
198 if err = verifyFlags(f.flags); err != nil {
199 return nil, err
200 }
201 if !(minIndexSize <= f.indexSize && f.indexSize <= maxIndexSize) {
202 return nil, errors.New("xz: index size out of range")
203 }
204 if f.indexSize%4 != 0 {
205 return nil, errors.New(
206 "xz: index size not aligned to four bytes")
207 }
208
209 data = make([]byte, footerLen)
210
211 // backward size (index size)
212 s := (f.indexSize / 4) - 1
213 putUint32LE(data[4:], uint32(s))
214 // flags
215 data[9] = f.flags
216 // footer magic
217 copy(data[10:], footerMagic)
218
219 // CRC-32
220 crc := crc32.NewIEEE()
221 crc.Write(data[4:10])
222 putUint32LE(data, crc.Sum32())
223
224 return data, nil
225}
226
227// UnmarshalBinary sets the footer value by unmarshalling an xz file
228// footer.
229func (f *footer) UnmarshalBinary(data []byte) error {
230 if len(data) != footerLen {
231 return errors.New("xz: wrong footer length")
232 }
233
234 // magic bytes
235 if !bytes.Equal(data[10:], footerMagic) {
236 return errors.New("xz: footer magic invalid")
237 }
238
239 // CRC-32
240 crc := crc32.NewIEEE()
241 crc.Write(data[4:10])
242 if uint32LE(data) != crc.Sum32() {
243 return errors.New("xz: footer checksum error")
244 }
245
246 var g footer
247 // backward size (index size)
248 g.indexSize = (int64(uint32LE(data[4:])) + 1) * 4
249
250 // flags
251 if data[8] != 0 {
252 return errInvalidFlags
253 }
254 g.flags = data[9]
255 if err := verifyFlags(g.flags); err != nil {
256 return err
257 }
258
259 *f = g
260 return nil
261}
262
263/*** Block Header ***/
264
265// blockHeader represents the content of an xz block header.
266type blockHeader struct {
267 compressedSize int64
268 uncompressedSize int64
269 filters []filter
270}
271
272// String converts the block header into a string.
273func (h blockHeader) String() string {
274 var buf bytes.Buffer
275 first := true
276 if h.compressedSize >= 0 {
277 fmt.Fprintf(&buf, "compressed size %d", h.compressedSize)
278 first = false
279 }
280 if h.uncompressedSize >= 0 {
281 if !first {
282 buf.WriteString(" ")
283 }
284 fmt.Fprintf(&buf, "uncompressed size %d", h.uncompressedSize)
285 first = false
286 }
287 for _, f := range h.filters {
288 if !first {
289 buf.WriteString(" ")
290 }
291 fmt.Fprintf(&buf, "filter %s", f)
292 first = false
293 }
294 return buf.String()
295}
296
297// Masks for the block flags.
298const (
299 filterCountMask = 0x03
300 compressedSizePresent = 0x40
301 uncompressedSizePresent = 0x80
302 reservedBlockFlags = 0x3C
303)
304
305// errIndexIndicator signals that an index indicator (0x00) has been found
306// instead of an expected block header indicator.
307var errIndexIndicator = errors.New("xz: found index indicator")
308
309// readBlockHeader reads the block header.
310func readBlockHeader(r io.Reader) (h *blockHeader, n int, err error) {
311 var buf bytes.Buffer
312 buf.Grow(20)
313
314 // block header size
315 z, err := io.CopyN(&buf, r, 1)
316 n = int(z)
317 if err != nil {
318 return nil, n, err
319 }
320 s := buf.Bytes()[0]
321 if s == 0 {
322 return nil, n, errIndexIndicator
323 }
324
325 // read complete header
326 headerLen := (int(s) + 1) * 4
327 buf.Grow(headerLen - 1)
328 z, err = io.CopyN(&buf, r, int64(headerLen-1))
329 n += int(z)
330 if err != nil {
331 return nil, n, err
332 }
333
334 // unmarshal block header
335 h = new(blockHeader)
336 if err = h.UnmarshalBinary(buf.Bytes()); err != nil {
337 return nil, n, err
338 }
339
340 return h, n, nil
341}
342
343// readSizeInBlockHeader reads the uncompressed or compressed size
344// fields in the block header. The present value informs the function
345// whether the respective field is actually present in the header.
346func readSizeInBlockHeader(r io.ByteReader, present bool) (n int64, err error) {
347 if !present {
348 return -1, nil
349 }
350 x, _, err := readUvarint(r)
351 if err != nil {
352 return 0, err
353 }
354 if x >= 1<<63 {
355 return 0, errors.New("xz: size overflow in block header")
356 }
357 return int64(x), nil
358}
359
360// UnmarshalBinary unmarshals the block header.
361func (h *blockHeader) UnmarshalBinary(data []byte) error {
362 // Check header length
363 s := data[0]
364 if data[0] == 0 {
365 return errIndexIndicator
366 }
367 headerLen := (int(s) + 1) * 4
368 if len(data) != headerLen {
369 return fmt.Errorf("xz: data length %d; want %d", len(data),
370 headerLen)
371 }
372 n := headerLen - 4
373
374 // Check CRC-32
375 crc := crc32.NewIEEE()
376 crc.Write(data[:n])
377 if crc.Sum32() != uint32LE(data[n:]) {
378 return errors.New("xz: checksum error for block header")
379 }
380
381 // Block header flags
382 flags := data[1]
383 if flags&reservedBlockFlags != 0 {
384 return errors.New("xz: reserved block header flags set")
385 }
386
387 r := bytes.NewReader(data[2:n])
388
389 // Compressed size
390 var err error
391 h.compressedSize, err = readSizeInBlockHeader(
392 r, flags&compressedSizePresent != 0)
393 if err != nil {
394 return err
395 }
396
397 // Uncompressed size
398 h.uncompressedSize, err = readSizeInBlockHeader(
399 r, flags&uncompressedSizePresent != 0)
400 if err != nil {
401 return err
402 }
403
404 h.filters, err = readFilters(r, int(flags&filterCountMask)+1)
405 if err != nil {
406 return err
407 }
408
409 // Check padding
410 // Since headerLen is a multiple of 4 we don't need to check
411 // alignment.
412 k := r.Len()
413 // The standard spec says that the padding should have not more
414 // than 3 bytes. However we found paddings of 4 or 5 in the
415 // wild. See https://github.com/ulikunitz/xz/pull/11 and
416 // https://github.com/ulikunitz/xz/issues/15
417 //
418 // The only reasonable approach seems to be to ignore the
419 // padding size. We still check that all padding bytes are zero.
420 if !allZeros(data[n-k : n]) {
421 return errPadding
422 }
423 return nil
424}
425
426// MarshalBinary marshals the binary header.
427func (h *blockHeader) MarshalBinary() (data []byte, err error) {
428 if !(minFilters <= len(h.filters) && len(h.filters) <= maxFilters) {
429 return nil, errors.New("xz: filter count wrong")
430 }
431 for i, f := range h.filters {
432 if i < len(h.filters)-1 {
433 if f.id() == lzmaFilterID {
434 return nil, errors.New(
435 "xz: LZMA2 filter is not the last")
436 }
437 } else {
438 // last filter
439 if f.id() != lzmaFilterID {
440 return nil, errors.New("xz: " +
441 "last filter must be the LZMA2 filter")
442 }
443 }
444 }
445
446 var buf bytes.Buffer
447 // header size must set at the end
448 buf.WriteByte(0)
449
450 // flags
451 flags := byte(len(h.filters) - 1)
452 if h.compressedSize >= 0 {
453 flags |= compressedSizePresent
454 }
455 if h.uncompressedSize >= 0 {
456 flags |= uncompressedSizePresent
457 }
458 buf.WriteByte(flags)
459
460 p := make([]byte, 10)
461 if h.compressedSize >= 0 {
462 k := putUvarint(p, uint64(h.compressedSize))
463 buf.Write(p[:k])
464 }
465 if h.uncompressedSize >= 0 {
466 k := putUvarint(p, uint64(h.uncompressedSize))
467 buf.Write(p[:k])
468 }
469
470 for _, f := range h.filters {
471 fp, err := f.MarshalBinary()
472 if err != nil {
473 return nil, err
474 }
475 buf.Write(fp)
476 }
477
478 // padding
479 for i := padLen(int64(buf.Len())); i > 0; i-- {
480 buf.WriteByte(0)
481 }
482
483 // crc place holder
484 buf.Write(p[:4])
485
486 data = buf.Bytes()
487 if len(data)%4 != 0 {
488 panic("data length not aligned")
489 }
490 s := len(data)/4 - 1
491 if !(1 < s && s <= 255) {
492 panic("wrong block header size")
493 }
494 data[0] = byte(s)
495
496 crc := crc32.NewIEEE()
497 crc.Write(data[:len(data)-4])
498 putUint32LE(data[len(data)-4:], crc.Sum32())
499
500 return data, nil
501}
502
503// Constants used for marshalling and unmarshalling filters in the xz
504// block header.
505const (
506 minFilters = 1
507 maxFilters = 4
508 minReservedID = 1 << 62
509)
510
511// filter represents a filter in the block header.
512type filter interface {
513 id() uint64
514 UnmarshalBinary(data []byte) error
515 MarshalBinary() (data []byte, err error)
516 reader(r io.Reader, c *ReaderConfig) (fr io.Reader, err error)
517 writeCloser(w io.WriteCloser, c *WriterConfig) (fw io.WriteCloser, err error)
518 // filter must be last filter
519 last() bool
520}
521
522// readFilter reads a block filter from the block header. At this point
523// in time only the LZMA2 filter is supported.
524func readFilter(r io.Reader) (f filter, err error) {
525 br := lzma.ByteReader(r)
526
527 // index
528 id, _, err := readUvarint(br)
529 if err != nil {
530 return nil, err
531 }
532
533 var data []byte
534 switch id {
535 case lzmaFilterID:
536 data = make([]byte, lzmaFilterLen)
537 data[0] = lzmaFilterID
538 if _, err = io.ReadFull(r, data[1:]); err != nil {
539 return nil, err
540 }
541 f = new(lzmaFilter)
542 default:
543 if id >= minReservedID {
544 return nil, errors.New(
545 "xz: reserved filter id in block stream header")
546 }
547 return nil, errors.New("xz: invalid filter id")
548 }
549 if err = f.UnmarshalBinary(data); err != nil {
550 return nil, err
551 }
552 return f, err
553}
554
555// readFilters reads count filters. At this point in time only the count
556// 1 is supported.
557func readFilters(r io.Reader, count int) (filters []filter, err error) {
558 if count != 1 {
559 return nil, errors.New("xz: unsupported filter count")
560 }
561 f, err := readFilter(r)
562 if err != nil {
563 return nil, err
564 }
565 return []filter{f}, err
566}
567
568// writeFilters writes the filters.
569func writeFilters(w io.Writer, filters []filter) (n int, err error) {
570 for _, f := range filters {
571 p, err := f.MarshalBinary()
572 if err != nil {
573 return n, err
574 }
575 k, err := w.Write(p)
576 n += k
577 if err != nil {
578 return n, err
579 }
580 }
581 return n, nil
582}
583
584/*** Index ***/
585
586// record describes a block in the xz file index.
587type record struct {
588 unpaddedSize int64
589 uncompressedSize int64
590}
591
592// readRecord reads an index record.
593func readRecord(r io.ByteReader) (rec record, n int, err error) {
594 u, k, err := readUvarint(r)
595 n += k
596 if err != nil {
597 return rec, n, err
598 }
599 rec.unpaddedSize = int64(u)
600 if rec.unpaddedSize < 0 {
601 return rec, n, errors.New("xz: unpadded size negative")
602 }
603
604 u, k, err = readUvarint(r)
605 n += k
606 if err != nil {
607 return rec, n, err
608 }
609 rec.uncompressedSize = int64(u)
610 if rec.uncompressedSize < 0 {
611 return rec, n, errors.New("xz: uncompressed size negative")
612 }
613
614 return rec, n, nil
615}
616
617// MarshalBinary converts an index record in its binary encoding.
618func (rec *record) MarshalBinary() (data []byte, err error) {
619 // maximum length of a uvarint is 10
620 p := make([]byte, 20)
621 n := putUvarint(p, uint64(rec.unpaddedSize))
622 n += putUvarint(p[n:], uint64(rec.uncompressedSize))
623 return p[:n], nil
624}
625
626// writeIndex writes the index, a sequence of records.
627func writeIndex(w io.Writer, index []record) (n int64, err error) {
628 crc := crc32.NewIEEE()
629 mw := io.MultiWriter(w, crc)
630
631 // index indicator
632 k, err := mw.Write([]byte{0})
633 n += int64(k)
634 if err != nil {
635 return n, err
636 }
637
638 // number of records
639 p := make([]byte, 10)
640 k = putUvarint(p, uint64(len(index)))
641 k, err = mw.Write(p[:k])
642 n += int64(k)
643 if err != nil {
644 return n, err
645 }
646
647 // list of records
648 for _, rec := range index {
649 p, err := rec.MarshalBinary()
650 if err != nil {
651 return n, err
652 }
653 k, err = mw.Write(p)
654 n += int64(k)
655 if err != nil {
656 return n, err
657 }
658 }
659
660 // index padding
661 k, err = mw.Write(make([]byte, padLen(int64(n))))
662 n += int64(k)
663 if err != nil {
664 return n, err
665 }
666
667 // crc32 checksum
668 putUint32LE(p, crc.Sum32())
669 k, err = w.Write(p[:4])
670 n += int64(k)
671
672 return n, err
673}
674
675// readIndexBody reads the index from the reader. It assumes that the
676// index indicator has already been read.
677func readIndexBody(r io.Reader) (records []record, n int64, err error) {
678 crc := crc32.NewIEEE()
679 // index indicator
680 crc.Write([]byte{0})
681
682 br := lzma.ByteReader(io.TeeReader(r, crc))
683
684 // number of records
685 u, k, err := readUvarint(br)
686 n += int64(k)
687 if err != nil {
688 return nil, n, err
689 }
690 recLen := int(u)
691 if recLen < 0 || uint64(recLen) != u {
692 return nil, n, errors.New("xz: record number overflow")
693 }
694
695 // list of records
696 records = make([]record, recLen)
697 for i := range records {
698 records[i], k, err = readRecord(br)
699 n += int64(k)
700 if err != nil {
701 return nil, n, err
702 }
703 }
704
705 p := make([]byte, padLen(int64(n+1)), 4)
706 k, err = io.ReadFull(br.(io.Reader), p)
707 n += int64(k)
708 if err != nil {
709 return nil, n, err
710 }
711 if !allZeros(p) {
712 return nil, n, errors.New("xz: non-zero byte in index padding")
713 }
714
715 // crc32
716 s := crc.Sum32()
717 p = p[:4]
718 k, err = io.ReadFull(br.(io.Reader), p)
719 n += int64(k)
720 if err != nil {
721 return records, n, err
722 }
723 if uint32LE(p) != s {
724 return nil, n, errors.New("xz: wrong checksum for index")
725 }
726
727 return records, n, nil
728}
diff --git a/vendor/github.com/ulikunitz/xz/fox.xz b/vendor/github.com/ulikunitz/xz/fox.xz
new file mode 100644
index 0000000..4b820bd
--- /dev/null
+++ b/vendor/github.com/ulikunitz/xz/fox.xz
Binary files differ
diff --git a/vendor/github.com/ulikunitz/xz/internal/hash/cyclic_poly.go b/vendor/github.com/ulikunitz/xz/internal/hash/cyclic_poly.go
new file mode 100644
index 0000000..a328878
--- /dev/null
+++ b/vendor/github.com/ulikunitz/xz/internal/hash/cyclic_poly.go
@@ -0,0 +1,181 @@
1// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package hash
6
7// CyclicPoly provides a cyclic polynomial rolling hash.
8type CyclicPoly struct {
9 h uint64
10 p []uint64
11 i int
12}
13
14// ror rotates the unsigned 64-bit integer to right. The argument s must be
15// less than 64.
16func ror(x uint64, s uint) uint64 {
17 return (x >> s) | (x << (64 - s))
18}
19
20// NewCyclicPoly creates a new instance of the CyclicPoly structure. The
21// argument n gives the number of bytes for which a hash will be executed.
22// This number must be positive; the method panics if this isn't the case.
23func NewCyclicPoly(n int) *CyclicPoly {
24 if n < 1 {
25 panic("argument n must be positive")
26 }
27 return &CyclicPoly{p: make([]uint64, 0, n)}
28}
29
30// Len returns the length of the byte sequence for which a hash is generated.
31func (r *CyclicPoly) Len() int {
32 return cap(r.p)
33}
34
35// RollByte hashes the next byte and returns a hash value. The complete becomes
36// available after at least Len() bytes have been hashed.
37func (r *CyclicPoly) RollByte(x byte) uint64 {
38 y := hash[x]
39 if len(r.p) < cap(r.p) {
40 r.h = ror(r.h, 1) ^ y
41 r.p = append(r.p, y)
42 } else {
43 r.h ^= ror(r.p[r.i], uint(cap(r.p)-1))
44 r.h = ror(r.h, 1) ^ y
45 r.p[r.i] = y
46 r.i = (r.i + 1) % cap(r.p)
47 }
48 return r.h
49}
50
51// Stores the hash for the individual bytes.
52var hash = [256]uint64{
53 0x2e4fc3f904065142, 0xc790984cfbc99527,
54 0x879f95eb8c62f187, 0x3b61be86b5021ef2,
55 0x65a896a04196f0a5, 0xc5b307b80470b59e,
56 0xd3bff376a70df14b, 0xc332f04f0b3f1701,
57 0x753b5f0e9abf3e0d, 0xb41538fdfe66ef53,
58 0x1906a10c2c1c0208, 0xfb0c712a03421c0d,
59 0x38be311a65c9552b, 0xfee7ee4ca6445c7e,
60 0x71aadeded184f21e, 0xd73426fccda23b2d,
61 0x29773fb5fb9600b5, 0xce410261cd32981a,
62 0xfe2848b3c62dbc2d, 0x459eaaff6e43e11c,
63 0xc13e35fc9c73a887, 0xf30ed5c201e76dbc,
64 0xa5f10b3910482cea, 0x2945d59be02dfaad,
65 0x06ee334ff70571b5, 0xbabf9d8070f44380,
66 0xee3e2e9912ffd27c, 0x2a7118d1ea6b8ea7,
67 0x26183cb9f7b1664c, 0xea71dac7da068f21,
68 0xea92eca5bd1d0bb7, 0x415595862defcd75,
69 0x248a386023c60648, 0x9cf021ab284b3c8a,
70 0xfc9372df02870f6c, 0x2b92d693eeb3b3fc,
71 0x73e799d139dc6975, 0x7b15ae312486363c,
72 0xb70e5454a2239c80, 0x208e3fb31d3b2263,
73 0x01f563cabb930f44, 0x2ac4533d2a3240d8,
74 0x84231ed1064f6f7c, 0xa9f020977c2a6d19,
75 0x213c227271c20122, 0x09fe8a9a0a03d07a,
76 0x4236dc75bcaf910c, 0x460a8b2bead8f17e,
77 0xd9b27be1aa07055f, 0xd202d5dc4b11c33e,
78 0x70adb010543bea12, 0xcdae938f7ea6f579,
79 0x3f3d870208672f4d, 0x8e6ccbce9d349536,
80 0xe4c0871a389095ae, 0xf5f2a49152bca080,
81 0x9a43f9b97269934e, 0xc17b3753cb6f475c,
82 0xd56d941e8e206bd4, 0xac0a4f3e525eda00,
83 0xa06d5a011912a550, 0x5537ed19537ad1df,
84 0xa32fe713d611449d, 0x2a1d05b47c3b579f,
85 0x991d02dbd30a2a52, 0x39e91e7e28f93eb0,
86 0x40d06adb3e92c9ac, 0x9b9d3afde1c77c97,
87 0x9a3f3f41c02c616f, 0x22ecd4ba00f60c44,
88 0x0b63d5d801708420, 0x8f227ca8f37ffaec,
89 0x0256278670887c24, 0x107e14877dbf540b,
90 0x32c19f2786ac1c05, 0x1df5b12bb4bc9c61,
91 0xc0cac129d0d4c4e2, 0x9fdb52ee9800b001,
92 0x31f601d5d31c48c4, 0x72ff3c0928bcaec7,
93 0xd99264421147eb03, 0x535a2d6d38aefcfe,
94 0x6ba8b4454a916237, 0xfa39366eaae4719c,
95 0x10f00fd7bbb24b6f, 0x5bd23185c76c84d4,
96 0xb22c3d7e1b00d33f, 0x3efc20aa6bc830a8,
97 0xd61c2503fe639144, 0x30ce625441eb92d3,
98 0xe5d34cf359e93100, 0xa8e5aa13f2b9f7a5,
99 0x5c2b8d851ca254a6, 0x68fb6c5e8b0d5fdf,
100 0xc7ea4872c96b83ae, 0x6dd5d376f4392382,
101 0x1be88681aaa9792f, 0xfef465ee1b6c10d9,
102 0x1f98b65ed43fcb2e, 0x4d1ca11eb6e9a9c9,
103 0x7808e902b3857d0b, 0x171c9c4ea4607972,
104 0x58d66274850146df, 0x42b311c10d3981d1,
105 0x647fa8c621c41a4c, 0xf472771c66ddfedc,
106 0x338d27e3f847b46b, 0x6402ce3da97545ce,
107 0x5162db616fc38638, 0x9c83be97bc22a50e,
108 0x2d3d7478a78d5e72, 0xe621a9b938fd5397,
109 0x9454614eb0f81c45, 0x395fb6e742ed39b6,
110 0x77dd9179d06037bf, 0xc478d0fee4d2656d,
111 0x35d9d6cb772007af, 0x83a56e92c883f0f6,
112 0x27937453250c00a1, 0x27bd6ebc3a46a97d,
113 0x9f543bf784342d51, 0xd158f38c48b0ed52,
114 0x8dd8537c045f66b4, 0x846a57230226f6d5,
115 0x6b13939e0c4e7cdf, 0xfca25425d8176758,
116 0x92e5fc6cd52788e6, 0x9992e13d7a739170,
117 0x518246f7a199e8ea, 0xf104c2a71b9979c7,
118 0x86b3ffaabea4768f, 0x6388061cf3e351ad,
119 0x09d9b5295de5bbb5, 0x38bf1638c2599e92,
120 0x1d759846499e148d, 0x4c0ff015e5f96ef4,
121 0xa41a94cfa270f565, 0x42d76f9cb2326c0b,
122 0x0cf385dd3c9c23ba, 0x0508a6c7508d6e7a,
123 0x337523aabbe6cf8d, 0x646bb14001d42b12,
124 0xc178729d138adc74, 0xf900ef4491f24086,
125 0xee1a90d334bb5ac4, 0x9755c92247301a50,
126 0xb999bf7c4ff1b610, 0x6aeeb2f3b21e8fc9,
127 0x0fa8084cf91ac6ff, 0x10d226cf136e6189,
128 0xd302057a07d4fb21, 0x5f03800e20a0fcc3,
129 0x80118d4ae46bd210, 0x58ab61a522843733,
130 0x51edd575c5432a4b, 0x94ee6ff67f9197f7,
131 0x765669e0e5e8157b, 0xa5347830737132f0,
132 0x3ba485a69f01510c, 0x0b247d7b957a01c3,
133 0x1b3d63449fd807dc, 0x0fdc4721c30ad743,
134 0x8b535ed3829b2b14, 0xee41d0cad65d232c,
135 0xe6a99ed97a6a982f, 0x65ac6194c202003d,
136 0x692accf3a70573eb, 0xcc3c02c3e200d5af,
137 0x0d419e8b325914a3, 0x320f160f42c25e40,
138 0x00710d647a51fe7a, 0x3c947692330aed60,
139 0x9288aa280d355a7a, 0xa1806a9b791d1696,
140 0x5d60e38496763da1, 0x6c69e22e613fd0f4,
141 0x977fc2a5aadffb17, 0xfb7bd063fc5a94ba,
142 0x460c17992cbaece1, 0xf7822c5444d3297f,
143 0x344a9790c69b74aa, 0xb80a42e6cae09dce,
144 0x1b1361eaf2b1e757, 0xd84c1e758e236f01,
145 0x88e0b7be347627cc, 0x45246009b7a99490,
146 0x8011c6dd3fe50472, 0xc341d682bffb99d7,
147 0x2511be93808e2d15, 0xd5bc13d7fd739840,
148 0x2a3cd030679ae1ec, 0x8ad9898a4b9ee157,
149 0x3245fef0a8eaf521, 0x3d6d8dbbb427d2b0,
150 0x1ed146d8968b3981, 0x0c6a28bf7d45f3fc,
151 0x4a1fd3dbcee3c561, 0x4210ff6a476bf67e,
152 0xa559cce0d9199aac, 0xde39d47ef3723380,
153 0xe5b69d848ce42e35, 0xefa24296f8e79f52,
154 0x70190b59db9a5afc, 0x26f166cdb211e7bf,
155 0x4deaf2df3c6b8ef5, 0xf171dbdd670f1017,
156 0xb9059b05e9420d90, 0x2f0da855c9388754,
157 0x611d5e9ab77949cc, 0x2912038ac01163f4,
158 0x0231df50402b2fba, 0x45660fc4f3245f58,
159 0xb91cc97c7c8dac50, 0xb72d2aafe4953427,
160 0xfa6463f87e813d6b, 0x4515f7ee95d5c6a2,
161 0x1310e1c1a48d21c3, 0xad48a7810cdd8544,
162 0x4d5bdfefd5c9e631, 0xa43ed43f1fdcb7de,
163 0xe70cfc8fe1ee9626, 0xef4711b0d8dda442,
164 0xb80dd9bd4dab6c93, 0xa23be08d31ba4d93,
165 0x9b37db9d0335a39c, 0x494b6f870f5cfebc,
166 0x6d1b3c1149dda943, 0x372c943a518c1093,
167 0xad27af45e77c09c4, 0x3b6f92b646044604,
168 0xac2917909f5fcf4f, 0x2069a60e977e5557,
169 0x353a469e71014de5, 0x24be356281f55c15,
170 0x2b6d710ba8e9adea, 0x404ad1751c749c29,
171 0xed7311bf23d7f185, 0xba4f6976b4acc43e,
172 0x32d7198d2bc39000, 0xee667019014d6e01,
173 0x494ef3e128d14c83, 0x1f95a152baecd6be,
174 0x201648dff1f483a5, 0x68c28550c8384af6,
175 0x5fc834a6824a7f48, 0x7cd06cb7365eaf28,
176 0xd82bbd95e9b30909, 0x234f0d1694c53f6d,
177 0xd2fb7f4a96d83f4a, 0xff0d5da83acac05e,
178 0xf8f6b97f5585080a, 0x74236084be57b95b,
179 0xa25e40c03bbc36ad, 0x6b6e5c14ce88465b,
180 0x4378ffe93e1528c5, 0x94ca92a17118e2d2,
181}
diff --git a/vendor/github.com/ulikunitz/xz/internal/hash/doc.go b/vendor/github.com/ulikunitz/xz/internal/hash/doc.go
new file mode 100644
index 0000000..f99ec22
--- /dev/null
+++ b/vendor/github.com/ulikunitz/xz/internal/hash/doc.go
@@ -0,0 +1,14 @@
1// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5/*
6Package hash provides rolling hashes.
7
8Rolling hashes have to be used for maintaining the positions of n-byte
9sequences in the dictionary buffer.
10
11The package provides currently the Rabin-Karp rolling hash and a Cyclic
12Polynomial hash. Both support the Hashes method to be used with an interface.
13*/
14package hash
diff --git a/vendor/github.com/ulikunitz/xz/internal/hash/rabin_karp.go b/vendor/github.com/ulikunitz/xz/internal/hash/rabin_karp.go
new file mode 100644
index 0000000..58635b1
--- /dev/null
+++ b/vendor/github.com/ulikunitz/xz/internal/hash/rabin_karp.go
@@ -0,0 +1,66 @@
1// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package hash
6
7// A is the default constant for Robin-Karp rolling hash. This is a random
8// prime.
9const A = 0x97b548add41d5da1
10
11// RabinKarp supports the computation of a rolling hash.
12type RabinKarp struct {
13 A uint64
14 // a^n
15 aOldest uint64
16 h uint64
17 p []byte
18 i int
19}
20
21// NewRabinKarp creates a new RabinKarp value. The argument n defines the
22// length of the byte sequence to be hashed. The default constant will will be
23// used.
24func NewRabinKarp(n int) *RabinKarp {
25 return NewRabinKarpConst(n, A)
26}
27
28// NewRabinKarpConst creates a new RabinKarp value. The argument n defines the
29// length of the byte sequence to be hashed. The argument a provides the
30// constant used to compute the hash.
31func NewRabinKarpConst(n int, a uint64) *RabinKarp {
32 if n <= 0 {
33 panic("number of bytes n must be positive")
34 }
35 aOldest := uint64(1)
36 // There are faster methods. For the small n required by the LZMA
37 // compressor O(n) is sufficient.
38 for i := 0; i < n; i++ {
39 aOldest *= a
40 }
41 return &RabinKarp{
42 A: a, aOldest: aOldest,
43 p: make([]byte, 0, n),
44 }
45}
46
47// Len returns the length of the byte sequence.
48func (r *RabinKarp) Len() int {
49 return cap(r.p)
50}
51
52// RollByte computes the hash after x has been added.
53func (r *RabinKarp) RollByte(x byte) uint64 {
54 if len(r.p) < cap(r.p) {
55 r.h += uint64(x)
56 r.h *= r.A
57 r.p = append(r.p, x)
58 } else {
59 r.h -= uint64(r.p[r.i]) * r.aOldest
60 r.h += uint64(x)
61 r.h *= r.A
62 r.p[r.i] = x
63 r.i = (r.i + 1) % cap(r.p)
64 }
65 return r.h
66}
diff --git a/vendor/github.com/ulikunitz/xz/internal/hash/roller.go b/vendor/github.com/ulikunitz/xz/internal/hash/roller.go
new file mode 100644
index 0000000..ab6a19c
--- /dev/null
+++ b/vendor/github.com/ulikunitz/xz/internal/hash/roller.go
@@ -0,0 +1,29 @@
1// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package hash
6
7// Roller provides an interface for rolling hashes. The hash value will become
8// valid after hash has been called Len times.
9type Roller interface {
10 Len() int
11 RollByte(x byte) uint64
12}
13
14// Hashes computes all hash values for the array p. Note that the state of the
15// roller is changed.
16func Hashes(r Roller, p []byte) []uint64 {
17 n := r.Len()
18 if len(p) < n {
19 return nil
20 }
21 h := make([]uint64, len(p)-n+1)
22 for i := 0; i < n-1; i++ {
23 r.RollByte(p[i])
24 }
25 for i := range h {
26 h[i] = r.RollByte(p[i+n-1])
27 }
28 return h
29}
diff --git a/vendor/github.com/ulikunitz/xz/internal/xlog/xlog.go b/vendor/github.com/ulikunitz/xz/internal/xlog/xlog.go
new file mode 100644
index 0000000..0ba45e8
--- /dev/null
+++ b/vendor/github.com/ulikunitz/xz/internal/xlog/xlog.go
@@ -0,0 +1,457 @@
1// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5// Package xlog provides a simple logging package that allows to disable
6// certain message categories. It defines a type, Logger, with multiple
7// methods for formatting output. The package has also a predefined
8// 'standard' Logger accessible through helper function Print[f|ln],
9// Fatal[f|ln], Panic[f|ln], Warn[f|ln], Print[f|ln] and Debug[f|ln]
10// that are easier to use then creating a Logger manually. That logger
11// writes to standard error and prints the date and time of each logged
12// message, which can be configured using the function SetFlags.
13//
14// The Fatal functions call os.Exit(1) after the message is output
15// unless not suppressed by the flags. The Panic functions call panic
16// after the writing the log message unless suppressed.
17package xlog
18
19import (
20 "fmt"
21 "io"
22 "os"
23 "runtime"
24 "sync"
25 "time"
26)
27
28// The flags define what information is prefixed to each log entry
29// generated by the Logger. The Lno* versions allow the suppression of
30// specific output. The bits are or'ed together to control what will be
31// printed. There is no control over the order of the items printed and
32// the format. The full format is:
33//
34// 2009-01-23 01:23:23.123123 /a/b/c/d.go:23: message
35//
36const (
37 Ldate = 1 << iota // the date: 2009-01-23
38 Ltime // the time: 01:23:23
39 Lmicroseconds // microsecond resolution: 01:23:23.123123
40 Llongfile // full file name and line number: /a/b/c/d.go:23
41 Lshortfile // final file name element and line number: d.go:23
42 Lnopanic // suppresses output from Panic[f|ln] but not the panic call
43 Lnofatal // suppresses output from Fatal[f|ln] but not the exit
44 Lnowarn // suppresses output from Warn[f|ln]
45 Lnoprint // suppresses output from Print[f|ln]
46 Lnodebug // suppresses output from Debug[f|ln]
47 // initial values for the standard logger
48 Lstdflags = Ldate | Ltime | Lnodebug
49)
50
51// A Logger represents an active logging object that generates lines of
52// output to an io.Writer. Each logging operation if not suppressed
53// makes a single call to the Writer's Write method. A Logger can be
54// used simultaneously from multiple goroutines; it guarantees to
55// serialize access to the Writer.
56type Logger struct {
57 mu sync.Mutex // ensures atomic writes; and protects the following
58 // fields
59 prefix string // prefix to write at beginning of each line
60 flag int // properties
61 out io.Writer // destination for output
62 buf []byte // for accumulating text to write
63}
64
65// New creates a new Logger. The out argument sets the destination to
66// which the log output will be written. The prefix appears at the
67// beginning of each log line. The flag argument defines the logging
68// properties.
69func New(out io.Writer, prefix string, flag int) *Logger {
70 return &Logger{out: out, prefix: prefix, flag: flag}
71}
72
73// std is the standard logger used by the package scope functions.
74var std = New(os.Stderr, "", Lstdflags)
75
76// itoa converts the integer to ASCII. A negative widths will avoid
77// zero-padding. The function supports only non-negative integers.
78func itoa(buf *[]byte, i int, wid int) {
79 var u = uint(i)
80 if u == 0 && wid <= 1 {
81 *buf = append(*buf, '0')
82 return
83 }
84 var b [32]byte
85 bp := len(b)
86 for ; u > 0 || wid > 0; u /= 10 {
87 bp--
88 wid--
89 b[bp] = byte(u%10) + '0'
90 }
91 *buf = append(*buf, b[bp:]...)
92}
93
94// formatHeader puts the header into the buf field of the buffer.
95func (l *Logger) formatHeader(t time.Time, file string, line int) {
96 l.buf = append(l.buf, l.prefix...)
97 if l.flag&(Ldate|Ltime|Lmicroseconds) != 0 {
98 if l.flag&Ldate != 0 {
99 year, month, day := t.Date()
100 itoa(&l.buf, year, 4)
101 l.buf = append(l.buf, '-')
102 itoa(&l.buf, int(month), 2)
103 l.buf = append(l.buf, '-')
104 itoa(&l.buf, day, 2)
105 l.buf = append(l.buf, ' ')
106 }
107 if l.flag&(Ltime|Lmicroseconds) != 0 {
108 hour, min, sec := t.Clock()
109 itoa(&l.buf, hour, 2)
110 l.buf = append(l.buf, ':')
111 itoa(&l.buf, min, 2)
112 l.buf = append(l.buf, ':')
113 itoa(&l.buf, sec, 2)
114 if l.flag&Lmicroseconds != 0 {
115 l.buf = append(l.buf, '.')
116 itoa(&l.buf, t.Nanosecond()/1e3, 6)
117 }
118 l.buf = append(l.buf, ' ')
119 }
120 }
121 if l.flag&(Lshortfile|Llongfile) != 0 {
122 if l.flag&Lshortfile != 0 {
123 short := file
124 for i := len(file) - 1; i > 0; i-- {
125 if file[i] == '/' {
126 short = file[i+1:]
127 break
128 }
129 }
130 file = short
131 }
132 l.buf = append(l.buf, file...)
133 l.buf = append(l.buf, ':')
134 itoa(&l.buf, line, -1)
135 l.buf = append(l.buf, ": "...)
136 }
137}
138
139func (l *Logger) output(calldepth int, now time.Time, s string) error {
140 var file string
141 var line int
142 if l.flag&(Lshortfile|Llongfile) != 0 {
143 l.mu.Unlock()
144 var ok bool
145 _, file, line, ok = runtime.Caller(calldepth)
146 if !ok {
147 file = "???"
148 line = 0
149 }
150 l.mu.Lock()
151 }
152 l.buf = l.buf[:0]
153 l.formatHeader(now, file, line)
154 l.buf = append(l.buf, s...)
155 if len(s) == 0 || s[len(s)-1] != '\n' {
156 l.buf = append(l.buf, '\n')
157 }
158 _, err := l.out.Write(l.buf)
159 return err
160}
161
162// Output writes the string s with the header controlled by the flags to
163// the l.out writer. A newline will be appended if s doesn't end in a
164// newline. Calldepth is used to recover the PC, although all current
165// calls of Output use the call depth 2. Access to the function is serialized.
166func (l *Logger) Output(calldepth, noflag int, v ...interface{}) error {
167 now := time.Now()
168 l.mu.Lock()
169 defer l.mu.Unlock()
170 if l.flag&noflag != 0 {
171 return nil
172 }
173 s := fmt.Sprint(v...)
174 return l.output(calldepth+1, now, s)
175}
176
177// Outputf works like output but formats the output like Printf.
178func (l *Logger) Outputf(calldepth int, noflag int, format string, v ...interface{}) error {
179 now := time.Now()
180 l.mu.Lock()
181 defer l.mu.Unlock()
182 if l.flag&noflag != 0 {
183 return nil
184 }
185 s := fmt.Sprintf(format, v...)
186 return l.output(calldepth+1, now, s)
187}
188
189// Outputln works like output but formats the output like Println.
190func (l *Logger) Outputln(calldepth int, noflag int, v ...interface{}) error {
191 now := time.Now()
192 l.mu.Lock()
193 defer l.mu.Unlock()
194 if l.flag&noflag != 0 {
195 return nil
196 }
197 s := fmt.Sprintln(v...)
198 return l.output(calldepth+1, now, s)
199}
200
201// Panic prints the message like Print and calls panic. The printing
202// might be suppressed by the flag Lnopanic.
203func (l *Logger) Panic(v ...interface{}) {
204 l.Output(2, Lnopanic, v...)
205 s := fmt.Sprint(v...)
206 panic(s)
207}
208
209// Panic prints the message like Print and calls panic. The printing
210// might be suppressed by the flag Lnopanic.
211func Panic(v ...interface{}) {
212 std.Output(2, Lnopanic, v...)
213 s := fmt.Sprint(v...)
214 panic(s)
215}
216
217// Panicf prints the message like Printf and calls panic. The printing
218// might be suppressed by the flag Lnopanic.
219func (l *Logger) Panicf(format string, v ...interface{}) {
220 l.Outputf(2, Lnopanic, format, v...)
221 s := fmt.Sprintf(format, v...)
222 panic(s)
223}
224
225// Panicf prints the message like Printf and calls panic. The printing
226// might be suppressed by the flag Lnopanic.
227func Panicf(format string, v ...interface{}) {
228 std.Outputf(2, Lnopanic, format, v...)
229 s := fmt.Sprintf(format, v...)
230 panic(s)
231}
232
233// Panicln prints the message like Println and calls panic. The printing
234// might be suppressed by the flag Lnopanic.
235func (l *Logger) Panicln(v ...interface{}) {
236 l.Outputln(2, Lnopanic, v...)
237 s := fmt.Sprintln(v...)
238 panic(s)
239}
240
241// Panicln prints the message like Println and calls panic. The printing
242// might be suppressed by the flag Lnopanic.
243func Panicln(v ...interface{}) {
244 std.Outputln(2, Lnopanic, v...)
245 s := fmt.Sprintln(v...)
246 panic(s)
247}
248
249// Fatal prints the message like Print and calls os.Exit(1). The
250// printing might be suppressed by the flag Lnofatal.
251func (l *Logger) Fatal(v ...interface{}) {
252 l.Output(2, Lnofatal, v...)
253 os.Exit(1)
254}
255
256// Fatal prints the message like Print and calls os.Exit(1). The
257// printing might be suppressed by the flag Lnofatal.
258func Fatal(v ...interface{}) {
259 std.Output(2, Lnofatal, v...)
260 os.Exit(1)
261}
262
263// Fatalf prints the message like Printf and calls os.Exit(1). The
264// printing might be suppressed by the flag Lnofatal.
265func (l *Logger) Fatalf(format string, v ...interface{}) {
266 l.Outputf(2, Lnofatal, format, v...)
267 os.Exit(1)
268}
269
270// Fatalf prints the message like Printf and calls os.Exit(1). The
271// printing might be suppressed by the flag Lnofatal.
272func Fatalf(format string, v ...interface{}) {
273 std.Outputf(2, Lnofatal, format, v...)
274 os.Exit(1)
275}
276
277// Fatalln prints the message like Println and calls os.Exit(1). The
278// printing might be suppressed by the flag Lnofatal.
279func (l *Logger) Fatalln(format string, v ...interface{}) {
280 l.Outputln(2, Lnofatal, v...)
281 os.Exit(1)
282}
283
284// Fatalln prints the message like Println and calls os.Exit(1). The
285// printing might be suppressed by the flag Lnofatal.
286func Fatalln(format string, v ...interface{}) {
287 std.Outputln(2, Lnofatal, v...)
288 os.Exit(1)
289}
290
291// Warn prints the message like Print. The printing might be suppressed
292// by the flag Lnowarn.
293func (l *Logger) Warn(v ...interface{}) {
294 l.Output(2, Lnowarn, v...)
295}
296
297// Warn prints the message like Print. The printing might be suppressed
298// by the flag Lnowarn.
299func Warn(v ...interface{}) {
300 std.Output(2, Lnowarn, v...)
301}
302
303// Warnf prints the message like Printf. The printing might be suppressed
304// by the flag Lnowarn.
305func (l *Logger) Warnf(format string, v ...interface{}) {
306 l.Outputf(2, Lnowarn, format, v...)
307}
308
309// Warnf prints the message like Printf. The printing might be suppressed
310// by the flag Lnowarn.
311func Warnf(format string, v ...interface{}) {
312 std.Outputf(2, Lnowarn, format, v...)
313}
314
315// Warnln prints the message like Println. The printing might be suppressed
316// by the flag Lnowarn.
317func (l *Logger) Warnln(v ...interface{}) {
318 l.Outputln(2, Lnowarn, v...)
319}
320
321// Warnln prints the message like Println. The printing might be suppressed
322// by the flag Lnowarn.
323func Warnln(v ...interface{}) {
324 std.Outputln(2, Lnowarn, v...)
325}
326
327// Print prints the message like fmt.Print. The printing might be suppressed
328// by the flag Lnoprint.
329func (l *Logger) Print(v ...interface{}) {
330 l.Output(2, Lnoprint, v...)
331}
332
333// Print prints the message like fmt.Print. The printing might be suppressed
334// by the flag Lnoprint.
335func Print(v ...interface{}) {
336 std.Output(2, Lnoprint, v...)
337}
338
339// Printf prints the message like fmt.Printf. The printing might be suppressed
340// by the flag Lnoprint.
341func (l *Logger) Printf(format string, v ...interface{}) {
342 l.Outputf(2, Lnoprint, format, v...)
343}
344
345// Printf prints the message like fmt.Printf. The printing might be suppressed
346// by the flag Lnoprint.
347func Printf(format string, v ...interface{}) {
348 std.Outputf(2, Lnoprint, format, v...)
349}
350
351// Println prints the message like fmt.Println. The printing might be
352// suppressed by the flag Lnoprint.
353func (l *Logger) Println(v ...interface{}) {
354 l.Outputln(2, Lnoprint, v...)
355}
356
357// Println prints the message like fmt.Println. The printing might be
358// suppressed by the flag Lnoprint.
359func Println(v ...interface{}) {
360 std.Outputln(2, Lnoprint, v...)
361}
362
363// Debug prints the message like Print. The printing might be suppressed
364// by the flag Lnodebug.
365func (l *Logger) Debug(v ...interface{}) {
366 l.Output(2, Lnodebug, v...)
367}
368
369// Debug prints the message like Print. The printing might be suppressed
370// by the flag Lnodebug.
371func Debug(v ...interface{}) {
372 std.Output(2, Lnodebug, v...)
373}
374
375// Debugf prints the message like Printf. The printing might be suppressed
376// by the flag Lnodebug.
377func (l *Logger) Debugf(format string, v ...interface{}) {
378 l.Outputf(2, Lnodebug, format, v...)
379}
380
381// Debugf prints the message like Printf. The printing might be suppressed
382// by the flag Lnodebug.
383func Debugf(format string, v ...interface{}) {
384 std.Outputf(2, Lnodebug, format, v...)
385}
386
387// Debugln prints the message like Println. The printing might be suppressed
388// by the flag Lnodebug.
389func (l *Logger) Debugln(v ...interface{}) {
390 l.Outputln(2, Lnodebug, v...)
391}
392
393// Debugln prints the message like Println. The printing might be suppressed
394// by the flag Lnodebug.
395func Debugln(v ...interface{}) {
396 std.Outputln(2, Lnodebug, v...)
397}
398
399// Flags returns the current flags used by the logger.
400func (l *Logger) Flags() int {
401 l.mu.Lock()
402 defer l.mu.Unlock()
403 return l.flag
404}
405
406// Flags returns the current flags used by the standard logger.
407func Flags() int {
408 return std.Flags()
409}
410
411// SetFlags sets the flags of the logger.
412func (l *Logger) SetFlags(flag int) {
413 l.mu.Lock()
414 defer l.mu.Unlock()
415 l.flag = flag
416}
417
418// SetFlags sets the flags for the standard logger.
419func SetFlags(flag int) {
420 std.SetFlags(flag)
421}
422
423// Prefix returns the prefix used by the logger.
424func (l *Logger) Prefix() string {
425 l.mu.Lock()
426 defer l.mu.Unlock()
427 return l.prefix
428}
429
430// Prefix returns the prefix used by the standard logger of the package.
431func Prefix() string {
432 return std.Prefix()
433}
434
435// SetPrefix sets the prefix for the logger.
436func (l *Logger) SetPrefix(prefix string) {
437 l.mu.Lock()
438 defer l.mu.Unlock()
439 l.prefix = prefix
440}
441
442// SetPrefix sets the prefix of the standard logger of the package.
443func SetPrefix(prefix string) {
444 std.SetPrefix(prefix)
445}
446
447// SetOutput sets the output of the logger.
448func (l *Logger) SetOutput(w io.Writer) {
449 l.mu.Lock()
450 defer l.mu.Unlock()
451 l.out = w
452}
453
454// SetOutput sets the output for the standard logger of the package.
455func SetOutput(w io.Writer) {
456 std.SetOutput(w)
457}
diff --git a/vendor/github.com/ulikunitz/xz/lzma/bintree.go b/vendor/github.com/ulikunitz/xz/lzma/bintree.go
new file mode 100644
index 0000000..a781bd1
--- /dev/null
+++ b/vendor/github.com/ulikunitz/xz/lzma/bintree.go
@@ -0,0 +1,523 @@
1// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package lzma
6
7import (
8 "bufio"
9 "errors"
10 "fmt"
11 "io"
12 "unicode"
13)
14
15// node represents a node in the binary tree.
16type node struct {
17 // x is the search value
18 x uint32
19 // p parent node
20 p uint32
21 // l left child
22 l uint32
23 // r right child
24 r uint32
25}
26
27// wordLen is the number of bytes represented by the v field of a node.
28const wordLen = 4
29
30// binTree supports the identification of the next operation based on a
31// binary tree.
32//
33// Nodes will be identified by their index into the ring buffer.
34type binTree struct {
35 dict *encoderDict
36 // ring buffer of nodes
37 node []node
38 // absolute offset of the entry for the next node. Position 4
39 // byte larger.
40 hoff int64
41 // front position in the node ring buffer
42 front uint32
43 // index of the root node
44 root uint32
45 // current x value
46 x uint32
47 // preallocated array
48 data []byte
49}
50
51// null represents the nonexistent index. We can't use zero because it
52// would always exist or we would need to decrease the index for each
53// reference.
54const null uint32 = 1<<32 - 1
55
56// newBinTree initializes the binTree structure. The capacity defines
57// the size of the buffer and defines the maximum distance for which
58// matches will be found.
59func newBinTree(capacity int) (t *binTree, err error) {
60 if capacity < 1 {
61 return nil, errors.New(
62 "newBinTree: capacity must be larger than zero")
63 }
64 if int64(capacity) >= int64(null) {
65 return nil, errors.New(
66 "newBinTree: capacity must less 2^{32}-1")
67 }
68 t = &binTree{
69 node: make([]node, capacity),
70 hoff: -int64(wordLen),
71 root: null,
72 data: make([]byte, maxMatchLen),
73 }
74 return t, nil
75}
76
77func (t *binTree) SetDict(d *encoderDict) { t.dict = d }
78
79// WriteByte writes a single byte into the binary tree.
80func (t *binTree) WriteByte(c byte) error {
81 t.x = (t.x << 8) | uint32(c)
82 t.hoff++
83 if t.hoff < 0 {
84 return nil
85 }
86 v := t.front
87 if int64(v) < t.hoff {
88 // We are overwriting old nodes stored in the tree.
89 t.remove(v)
90 }
91 t.node[v].x = t.x
92 t.add(v)
93 t.front++
94 if int64(t.front) >= int64(len(t.node)) {
95 t.front = 0
96 }
97 return nil
98}
99
100// Writes writes a sequence of bytes into the binTree structure.
101func (t *binTree) Write(p []byte) (n int, err error) {
102 for _, c := range p {
103 t.WriteByte(c)
104 }
105 return len(p), nil
106}
107
108// add puts the node v into the tree. The node must not be part of the
109// tree before.
110func (t *binTree) add(v uint32) {
111 vn := &t.node[v]
112 // Set left and right to null indices.
113 vn.l, vn.r = null, null
114 // If the binary tree is empty make v the root.
115 if t.root == null {
116 t.root = v
117 vn.p = null
118 return
119 }
120 x := vn.x
121 p := t.root
122 // Search for the right leave link and add the new node.
123 for {
124 pn := &t.node[p]
125 if x <= pn.x {
126 if pn.l == null {
127 pn.l = v
128 vn.p = p
129 return
130 }
131 p = pn.l
132 } else {
133 if pn.r == null {
134 pn.r = v
135 vn.p = p
136 return
137 }
138 p = pn.r
139 }
140 }
141}
142
143// parent returns the parent node index of v and the pointer to v value
144// in the parent.
145func (t *binTree) parent(v uint32) (p uint32, ptr *uint32) {
146 if t.root == v {
147 return null, &t.root
148 }
149 p = t.node[v].p
150 if t.node[p].l == v {
151 ptr = &t.node[p].l
152 } else {
153 ptr = &t.node[p].r
154 }
155 return
156}
157
158// Remove node v.
159func (t *binTree) remove(v uint32) {
160 vn := &t.node[v]
161 p, ptr := t.parent(v)
162 l, r := vn.l, vn.r
163 if l == null {
164 // Move the right child up.
165 *ptr = r
166 if r != null {
167 t.node[r].p = p
168 }
169 return
170 }
171 if r == null {
172 // Move the left child up.
173 *ptr = l
174 t.node[l].p = p
175 return
176 }
177
178 // Search the in-order predecessor u.
179 un := &t.node[l]
180 ur := un.r
181 if ur == null {
182 // In order predecessor is l. Move it up.
183 un.r = r
184 t.node[r].p = l
185 un.p = p
186 *ptr = l
187 return
188 }
189 var u uint32
190 for {
191 // Look for the max value in the tree where l is root.
192 u = ur
193 ur = t.node[u].r
194 if ur == null {
195 break
196 }
197 }
198 // replace u with ul
199 un = &t.node[u]
200 ul := un.l
201 up := un.p
202 t.node[up].r = ul
203 if ul != null {
204 t.node[ul].p = up
205 }
206
207 // replace v by u
208 un.l, un.r = l, r
209 t.node[l].p = u
210 t.node[r].p = u
211 *ptr = u
212 un.p = p
213}
214
215// search looks for the node that have the value x or for the nodes that
216// brace it. The node highest in the tree with the value x will be
217// returned. All other nodes with the same value live in left subtree of
218// the returned node.
219func (t *binTree) search(v uint32, x uint32) (a, b uint32) {
220 a, b = null, null
221 if v == null {
222 return
223 }
224 for {
225 vn := &t.node[v]
226 if x <= vn.x {
227 if x == vn.x {
228 return v, v
229 }
230 b = v
231 if vn.l == null {
232 return
233 }
234 v = vn.l
235 } else {
236 a = v
237 if vn.r == null {
238 return
239 }
240 v = vn.r
241 }
242 }
243}
244
245// max returns the node with maximum value in the subtree with v as
246// root.
247func (t *binTree) max(v uint32) uint32 {
248 if v == null {
249 return null
250 }
251 for {
252 r := t.node[v].r
253 if r == null {
254 return v
255 }
256 v = r
257 }
258}
259
260// min returns the node with the minimum value in the subtree with v as
261// root.
262func (t *binTree) min(v uint32) uint32 {
263 if v == null {
264 return null
265 }
266 for {
267 l := t.node[v].l
268 if l == null {
269 return v
270 }
271 v = l
272 }
273}
274
275// pred returns the in-order predecessor of node v.
276func (t *binTree) pred(v uint32) uint32 {
277 if v == null {
278 return null
279 }
280 u := t.max(t.node[v].l)
281 if u != null {
282 return u
283 }
284 for {
285 p := t.node[v].p
286 if p == null {
287 return null
288 }
289 if t.node[p].r == v {
290 return p
291 }
292 v = p
293 }
294}
295
296// succ returns the in-order successor of node v.
297func (t *binTree) succ(v uint32) uint32 {
298 if v == null {
299 return null
300 }
301 u := t.min(t.node[v].r)
302 if u != null {
303 return u
304 }
305 for {
306 p := t.node[v].p
307 if p == null {
308 return null
309 }
310 if t.node[p].l == v {
311 return p
312 }
313 v = p
314 }
315}
316
317// xval converts the first four bytes of a into an 32-bit unsigned
318// integer in big-endian order.
319func xval(a []byte) uint32 {
320 var x uint32
321 switch len(a) {
322 default:
323 x |= uint32(a[3])
324 fallthrough
325 case 3:
326 x |= uint32(a[2]) << 8
327 fallthrough
328 case 2:
329 x |= uint32(a[1]) << 16
330 fallthrough
331 case 1:
332 x |= uint32(a[0]) << 24
333 case 0:
334 }
335 return x
336}
337
338// dumpX converts value x into a four-letter string.
339func dumpX(x uint32) string {
340 a := make([]byte, 4)
341 for i := 0; i < 4; i++ {
342 c := byte(x >> uint((3-i)*8))
343 if unicode.IsGraphic(rune(c)) {
344 a[i] = c
345 } else {
346 a[i] = '.'
347 }
348 }
349 return string(a)
350}
351
352// dumpNode writes a representation of the node v into the io.Writer.
353func (t *binTree) dumpNode(w io.Writer, v uint32, indent int) {
354 if v == null {
355 return
356 }
357
358 vn := &t.node[v]
359
360 t.dumpNode(w, vn.r, indent+2)
361
362 for i := 0; i < indent; i++ {
363 fmt.Fprint(w, " ")
364 }
365 if vn.p == null {
366 fmt.Fprintf(w, "node %d %q parent null\n", v, dumpX(vn.x))
367 } else {
368 fmt.Fprintf(w, "node %d %q parent %d\n", v, dumpX(vn.x), vn.p)
369 }
370
371 t.dumpNode(w, vn.l, indent+2)
372}
373
374// dump prints a representation of the binary tree into the writer.
375func (t *binTree) dump(w io.Writer) error {
376 bw := bufio.NewWriter(w)
377 t.dumpNode(bw, t.root, 0)
378 return bw.Flush()
379}
380
381func (t *binTree) distance(v uint32) int {
382 dist := int(t.front) - int(v)
383 if dist <= 0 {
384 dist += len(t.node)
385 }
386 return dist
387}
388
389type matchParams struct {
390 rep [4]uint32
391 // length when match will be accepted
392 nAccept int
393 // nodes to check
394 check int
395 // finish if length get shorter
396 stopShorter bool
397}
398
399func (t *binTree) match(m match, distIter func() (int, bool), p matchParams,
400) (r match, checked int, accepted bool) {
401 buf := &t.dict.buf
402 for {
403 if checked >= p.check {
404 return m, checked, true
405 }
406 dist, ok := distIter()
407 if !ok {
408 return m, checked, false
409 }
410 checked++
411 if m.n > 0 {
412 i := buf.rear - dist + m.n - 1
413 if i < 0 {
414 i += len(buf.data)
415 } else if i >= len(buf.data) {
416 i -= len(buf.data)
417 }
418 if buf.data[i] != t.data[m.n-1] {
419 if p.stopShorter {
420 return m, checked, false
421 }
422 continue
423 }
424 }
425 n := buf.matchLen(dist, t.data)
426 switch n {
427 case 0:
428 if p.stopShorter {
429 return m, checked, false
430 }
431 continue
432 case 1:
433 if uint32(dist-minDistance) != p.rep[0] {
434 continue
435 }
436 }
437 if n < m.n || (n == m.n && int64(dist) >= m.distance) {
438 continue
439 }
440 m = match{int64(dist), n}
441 if n >= p.nAccept {
442 return m, checked, true
443 }
444 }
445}
446
447func (t *binTree) NextOp(rep [4]uint32) operation {
448 // retrieve maxMatchLen data
449 n, _ := t.dict.buf.Peek(t.data[:maxMatchLen])
450 if n == 0 {
451 panic("no data in buffer")
452 }
453 t.data = t.data[:n]
454
455 var (
456 m match
457 x, u, v uint32
458 iterPred, iterSucc func() (int, bool)
459 )
460 p := matchParams{
461 rep: rep,
462 nAccept: maxMatchLen,
463 check: 32,
464 }
465 i := 4
466 iterSmall := func() (dist int, ok bool) {
467 i--
468 if i <= 0 {
469 return 0, false
470 }
471 return i, true
472 }
473 m, checked, accepted := t.match(m, iterSmall, p)
474 if accepted {
475 goto end
476 }
477 p.check -= checked
478 x = xval(t.data)
479 u, v = t.search(t.root, x)
480 if u == v && len(t.data) == 4 {
481 iter := func() (dist int, ok bool) {
482 if u == null {
483 return 0, false
484 }
485 dist = t.distance(u)
486 u, v = t.search(t.node[u].l, x)
487 if u != v {
488 u = null
489 }
490 return dist, true
491 }
492 m, _, _ = t.match(m, iter, p)
493 goto end
494 }
495 p.stopShorter = true
496 iterSucc = func() (dist int, ok bool) {
497 if v == null {
498 return 0, false
499 }
500 dist = t.distance(v)
501 v = t.succ(v)
502 return dist, true
503 }
504 m, checked, accepted = t.match(m, iterSucc, p)
505 if accepted {
506 goto end
507 }
508 p.check -= checked
509 iterPred = func() (dist int, ok bool) {
510 if u == null {
511 return 0, false
512 }
513 dist = t.distance(u)
514 u = t.pred(u)
515 return dist, true
516 }
517 m, _, _ = t.match(m, iterPred, p)
518end:
519 if m.n == 0 {
520 return lit{t.data[0]}
521 }
522 return m
523}
diff --git a/vendor/github.com/ulikunitz/xz/lzma/bitops.go b/vendor/github.com/ulikunitz/xz/lzma/bitops.go
new file mode 100644
index 0000000..e9bab01
--- /dev/null
+++ b/vendor/github.com/ulikunitz/xz/lzma/bitops.go
@@ -0,0 +1,45 @@
1// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package lzma
6
7/* Naming conventions follows the CodeReviewComments in the Go Wiki. */
8
9// ntz32Const is used by the functions NTZ and NLZ.
10const ntz32Const = 0x04d7651f
11
12// ntz32Table is a helper table for de Bruijn algorithm by Danny Dubé.
13// See Henry S. Warren, Jr. "Hacker's Delight" section 5-1 figure 5-26.
14var ntz32Table = [32]int8{
15 0, 1, 2, 24, 3, 19, 6, 25,
16 22, 4, 20, 10, 16, 7, 12, 26,
17 31, 23, 18, 5, 21, 9, 15, 11,
18 30, 17, 8, 14, 29, 13, 28, 27,
19}
20
21// ntz32 computes the number of trailing zeros for an unsigned 32-bit integer.
22func ntz32(x uint32) int {
23 if x == 0 {
24 return 32
25 }
26 x = (x & -x) * ntz32Const
27 return int(ntz32Table[x>>27])
28}
29
30// nlz32 computes the number of leading zeros for an unsigned 32-bit integer.
31func nlz32(x uint32) int {
32 // Smear left most bit to the right
33 x |= x >> 1
34 x |= x >> 2
35 x |= x >> 4
36 x |= x >> 8
37 x |= x >> 16
38 // Use ntz mechanism to calculate nlz.
39 x++
40 if x == 0 {
41 return 0
42 }
43 x *= ntz32Const
44 return 32 - int(ntz32Table[x>>27])
45}
diff --git a/vendor/github.com/ulikunitz/xz/lzma/breader.go b/vendor/github.com/ulikunitz/xz/lzma/breader.go
new file mode 100644
index 0000000..5350d81
--- /dev/null
+++ b/vendor/github.com/ulikunitz/xz/lzma/breader.go
@@ -0,0 +1,39 @@
1// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package lzma
6
7import (
8 "errors"
9 "io"
10)
11
12// breader provides the ReadByte function for a Reader. It doesn't read
13// more data from the reader than absolutely necessary.
14type breader struct {
15 io.Reader
16 // helper slice to save allocations
17 p []byte
18}
19
20// ByteReader converts an io.Reader into an io.ByteReader.
21func ByteReader(r io.Reader) io.ByteReader {
22 br, ok := r.(io.ByteReader)
23 if !ok {
24 return &breader{r, make([]byte, 1)}
25 }
26 return br
27}
28
29// ReadByte read byte function.
30func (r *breader) ReadByte() (c byte, err error) {
31 n, err := r.Reader.Read(r.p)
32 if n < 1 {
33 if err == nil {
34 err = errors.New("breader.ReadByte: no data")
35 }
36 return 0, err
37 }
38 return r.p[0], nil
39}
diff --git a/vendor/github.com/ulikunitz/xz/lzma/buffer.go b/vendor/github.com/ulikunitz/xz/lzma/buffer.go
new file mode 100644
index 0000000..50e0b6d
--- /dev/null
+++ b/vendor/github.com/ulikunitz/xz/lzma/buffer.go
@@ -0,0 +1,171 @@
1// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package lzma
6
7import (
8 "errors"
9)
10
11// buffer provides a circular buffer of bytes. If the front index equals
12// the rear index the buffer is empty. As a consequence front cannot be
13// equal rear for a full buffer. So a full buffer has a length that is
14// one byte less the the length of the data slice.
15type buffer struct {
16 data []byte
17 front int
18 rear int
19}
20
21// newBuffer creates a buffer with the given size.
22func newBuffer(size int) *buffer {
23 return &buffer{data: make([]byte, size+1)}
24}
25
26// Cap returns the capacity of the buffer.
27func (b *buffer) Cap() int {
28 return len(b.data) - 1
29}
30
31// Resets the buffer. The front and rear index are set to zero.
32func (b *buffer) Reset() {
33 b.front = 0
34 b.rear = 0
35}
36
37// Buffered returns the number of bytes buffered.
38func (b *buffer) Buffered() int {
39 delta := b.front - b.rear
40 if delta < 0 {
41 delta += len(b.data)
42 }
43 return delta
44}
45
46// Available returns the number of bytes available for writing.
47func (b *buffer) Available() int {
48 delta := b.rear - 1 - b.front
49 if delta < 0 {
50 delta += len(b.data)
51 }
52 return delta
53}
54
55// addIndex adds a non-negative integer to the index i and returns the
56// resulting index. The function takes care of wrapping the index as
57// well as potential overflow situations.
58func (b *buffer) addIndex(i int, n int) int {
59 // subtraction of len(b.data) prevents overflow
60 i += n - len(b.data)
61 if i < 0 {
62 i += len(b.data)
63 }
64 return i
65}
66
67// Read reads bytes from the buffer into p and returns the number of
68// bytes read. The function never returns an error but might return less
69// data than requested.
70func (b *buffer) Read(p []byte) (n int, err error) {
71 n, err = b.Peek(p)
72 b.rear = b.addIndex(b.rear, n)
73 return n, err
74}
75
76// Peek reads bytes from the buffer into p without changing the buffer.
77// Peek will never return an error but might return less data than
78// requested.
79func (b *buffer) Peek(p []byte) (n int, err error) {
80 m := b.Buffered()
81 n = len(p)
82 if m < n {
83 n = m
84 p = p[:n]
85 }
86 k := copy(p, b.data[b.rear:])
87 if k < n {
88 copy(p[k:], b.data)
89 }
90 return n, nil
91}
92
93// Discard skips the n next bytes to read from the buffer, returning the
94// bytes discarded.
95//
96// If Discards skips fewer than n bytes, it returns an error.
97func (b *buffer) Discard(n int) (discarded int, err error) {
98 if n < 0 {
99 return 0, errors.New("buffer.Discard: negative argument")
100 }
101 m := b.Buffered()
102 if m < n {
103 n = m
104 err = errors.New(
105 "buffer.Discard: discarded less bytes then requested")
106 }
107 b.rear = b.addIndex(b.rear, n)
108 return n, err
109}
110
111// ErrNoSpace indicates that there is insufficient space for the Write
112// operation.
113var ErrNoSpace = errors.New("insufficient space")
114
115// Write puts data into the buffer. If less bytes are written than
116// requested ErrNoSpace is returned.
117func (b *buffer) Write(p []byte) (n int, err error) {
118 m := b.Available()
119 n = len(p)
120 if m < n {
121 n = m
122 p = p[:m]
123 err = ErrNoSpace
124 }
125 k := copy(b.data[b.front:], p)
126 if k < n {
127 copy(b.data, p[k:])
128 }
129 b.front = b.addIndex(b.front, n)
130 return n, err
131}
132
133// WriteByte writes a single byte into the buffer. The error ErrNoSpace
134// is returned if no single byte is available in the buffer for writing.
135func (b *buffer) WriteByte(c byte) error {
136 if b.Available() < 1 {
137 return ErrNoSpace
138 }
139 b.data[b.front] = c
140 b.front = b.addIndex(b.front, 1)
141 return nil
142}
143
144// prefixLen returns the length of the common prefix of a and b.
145func prefixLen(a, b []byte) int {
146 if len(a) > len(b) {
147 a, b = b, a
148 }
149 for i, c := range a {
150 if b[i] != c {
151 return i
152 }
153 }
154 return len(a)
155}
156
157// matchLen returns the length of the common prefix for the given
158// distance from the rear and the byte slice p.
159func (b *buffer) matchLen(distance int, p []byte) int {
160 var n int
161 i := b.rear - distance
162 if i < 0 {
163 if n = prefixLen(p, b.data[len(b.data)+i:]); n < -i {
164 return n
165 }
166 p = p[n:]
167 i = 0
168 }
169 n += prefixLen(p, b.data[i:])
170 return n
171}
diff --git a/vendor/github.com/ulikunitz/xz/lzma/bytewriter.go b/vendor/github.com/ulikunitz/xz/lzma/bytewriter.go
new file mode 100644
index 0000000..a3696ba
--- /dev/null
+++ b/vendor/github.com/ulikunitz/xz/lzma/bytewriter.go
@@ -0,0 +1,37 @@
1// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package lzma
6
7import (
8 "errors"
9 "io"
10)
11
12// ErrLimit indicates that the limit of the LimitedByteWriter has been
13// reached.
14var ErrLimit = errors.New("limit reached")
15
16// LimitedByteWriter provides a byte writer that can be written until a
17// limit is reached. The field N provides the number of remaining
18// bytes.
19type LimitedByteWriter struct {
20 BW io.ByteWriter
21 N int64
22}
23
24// WriteByte writes a single byte to the limited byte writer. It returns
25// ErrLimit if the limit has been reached. If the byte is successfully
26// written the field N of the LimitedByteWriter will be decremented by
27// one.
28func (l *LimitedByteWriter) WriteByte(c byte) error {
29 if l.N <= 0 {
30 return ErrLimit
31 }
32 if err := l.BW.WriteByte(c); err != nil {
33 return err
34 }
35 l.N--
36 return nil
37}
diff --git a/vendor/github.com/ulikunitz/xz/lzma/decoder.go b/vendor/github.com/ulikunitz/xz/lzma/decoder.go
new file mode 100644
index 0000000..16e14db
--- /dev/null
+++ b/vendor/github.com/ulikunitz/xz/lzma/decoder.go
@@ -0,0 +1,277 @@
1// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package lzma
6
7import (
8 "errors"
9 "fmt"
10 "io"
11)
12
13// decoder decodes a raw LZMA stream without any header.
14type decoder struct {
15 // dictionary; the rear pointer of the buffer will be used for
16 // reading the data.
17 Dict *decoderDict
18 // decoder state
19 State *state
20 // range decoder
21 rd *rangeDecoder
22 // start stores the head value of the dictionary for the LZMA
23 // stream
24 start int64
25 // size of uncompressed data
26 size int64
27 // end-of-stream encountered
28 eos bool
29 // EOS marker found
30 eosMarker bool
31}
32
33// newDecoder creates a new decoder instance. The parameter size provides
34// the expected byte size of the decompressed data. If the size is
35// unknown use a negative value. In that case the decoder will look for
36// a terminating end-of-stream marker.
37func newDecoder(br io.ByteReader, state *state, dict *decoderDict, size int64) (d *decoder, err error) {
38 rd, err := newRangeDecoder(br)
39 if err != nil {
40 return nil, err
41 }
42 d = &decoder{
43 State: state,
44 Dict: dict,
45 rd: rd,
46 size: size,
47 start: dict.pos(),
48 }
49 return d, nil
50}
51
52// Reopen restarts the decoder with a new byte reader and a new size. Reopen
53// resets the Decompressed counter to zero.
54func (d *decoder) Reopen(br io.ByteReader, size int64) error {
55 var err error
56 if d.rd, err = newRangeDecoder(br); err != nil {
57 return err
58 }
59 d.start = d.Dict.pos()
60 d.size = size
61 d.eos = false
62 return nil
63}
64
65// decodeLiteral decodes a single literal from the LZMA stream.
66func (d *decoder) decodeLiteral() (op operation, err error) {
67 litState := d.State.litState(d.Dict.byteAt(1), d.Dict.head)
68 match := d.Dict.byteAt(int(d.State.rep[0]) + 1)
69 s, err := d.State.litCodec.Decode(d.rd, d.State.state, match, litState)
70 if err != nil {
71 return nil, err
72 }
73 return lit{s}, nil
74}
75
76// errEOS indicates that an EOS marker has been found.
77var errEOS = errors.New("EOS marker found")
78
79// readOp decodes the next operation from the compressed stream. It
80// returns the operation. If an explicit end of stream marker is
81// identified the eos error is returned.
82func (d *decoder) readOp() (op operation, err error) {
83 // Value of the end of stream (EOS) marker
84 const eosDist = 1<<32 - 1
85
86 state, state2, posState := d.State.states(d.Dict.head)
87
88 b, err := d.State.isMatch[state2].Decode(d.rd)
89 if err != nil {
90 return nil, err
91 }
92 if b == 0 {
93 // literal
94 op, err := d.decodeLiteral()
95 if err != nil {
96 return nil, err
97 }
98 d.State.updateStateLiteral()
99 return op, nil
100 }
101 b, err = d.State.isRep[state].Decode(d.rd)
102 if err != nil {
103 return nil, err
104 }
105 if b == 0 {
106 // simple match
107 d.State.rep[3], d.State.rep[2], d.State.rep[1] =
108 d.State.rep[2], d.State.rep[1], d.State.rep[0]
109
110 d.State.updateStateMatch()
111 // The length decoder returns the length offset.
112 n, err := d.State.lenCodec.Decode(d.rd, posState)
113 if err != nil {
114 return nil, err
115 }
116 // The dist decoder returns the distance offset. The actual
117 // distance is 1 higher.
118 d.State.rep[0], err = d.State.distCodec.Decode(d.rd, n)
119 if err != nil {
120 return nil, err
121 }
122 if d.State.rep[0] == eosDist {
123 d.eosMarker = true
124 return nil, errEOS
125 }
126 op = match{n: int(n) + minMatchLen,
127 distance: int64(d.State.rep[0]) + minDistance}
128 return op, nil
129 }
130 b, err = d.State.isRepG0[state].Decode(d.rd)
131 if err != nil {
132 return nil, err
133 }
134 dist := d.State.rep[0]
135 if b == 0 {
136 // rep match 0
137 b, err = d.State.isRepG0Long[state2].Decode(d.rd)
138 if err != nil {
139 return nil, err
140 }
141 if b == 0 {
142 d.State.updateStateShortRep()
143 op = match{n: 1, distance: int64(dist) + minDistance}
144 return op, nil
145 }
146 } else {
147 b, err = d.State.isRepG1[state].Decode(d.rd)
148 if err != nil {
149 return nil, err
150 }
151 if b == 0 {
152 dist = d.State.rep[1]
153 } else {
154 b, err = d.State.isRepG2[state].Decode(d.rd)
155 if err != nil {
156 return nil, err
157 }
158 if b == 0 {
159 dist = d.State.rep[2]
160 } else {
161 dist = d.State.rep[3]
162 d.State.rep[3] = d.State.rep[2]
163 }
164 d.State.rep[2] = d.State.rep[1]
165 }
166 d.State.rep[1] = d.State.rep[0]
167 d.State.rep[0] = dist
168 }
169 n, err := d.State.repLenCodec.Decode(d.rd, posState)
170 if err != nil {
171 return nil, err
172 }
173 d.State.updateStateRep()
174 op = match{n: int(n) + minMatchLen, distance: int64(dist) + minDistance}
175 return op, nil
176}
177
178// apply takes the operation and transforms the decoder dictionary accordingly.
179func (d *decoder) apply(op operation) error {
180 var err error
181 switch x := op.(type) {
182 case match:
183 err = d.Dict.writeMatch(x.distance, x.n)
184 case lit:
185 err = d.Dict.WriteByte(x.b)
186 default:
187 panic("op is neither a match nor a literal")
188 }
189 return err
190}
191
192// decompress fills the dictionary unless no space for new data is
193// available. If the end of the LZMA stream has been reached io.EOF will
194// be returned.
195func (d *decoder) decompress() error {
196 if d.eos {
197 return io.EOF
198 }
199 for d.Dict.Available() >= maxMatchLen {
200 op, err := d.readOp()
201 switch err {
202 case nil:
203 break
204 case errEOS:
205 d.eos = true
206 if !d.rd.possiblyAtEnd() {
207 return errDataAfterEOS
208 }
209 if d.size >= 0 && d.size != d.Decompressed() {
210 return errSize
211 }
212 return io.EOF
213 case io.EOF:
214 d.eos = true
215 return io.ErrUnexpectedEOF
216 default:
217 return err
218 }
219 if err = d.apply(op); err != nil {
220 return err
221 }
222 if d.size >= 0 && d.Decompressed() >= d.size {
223 d.eos = true
224 if d.Decompressed() > d.size {
225 return errSize
226 }
227 if !d.rd.possiblyAtEnd() {
228 switch _, err = d.readOp(); err {
229 case nil:
230 return errSize
231 case io.EOF:
232 return io.ErrUnexpectedEOF
233 case errEOS:
234 break
235 default:
236 return err
237 }
238 }
239 return io.EOF
240 }
241 }
242 return nil
243}
244
245// Errors that may be returned while decoding data.
246var (
247 errDataAfterEOS = errors.New("lzma: data after end of stream marker")
248 errSize = errors.New("lzma: wrong uncompressed data size")
249)
250
251// Read reads data from the buffer. If no more data is available io.EOF is
252// returned.
253func (d *decoder) Read(p []byte) (n int, err error) {
254 var k int
255 for {
256 // Read of decoder dict never returns an error.
257 k, err = d.Dict.Read(p[n:])
258 if err != nil {
259 panic(fmt.Errorf("dictionary read error %s", err))
260 }
261 if k == 0 && d.eos {
262 return n, io.EOF
263 }
264 n += k
265 if n >= len(p) {
266 return n, nil
267 }
268 if err = d.decompress(); err != nil && err != io.EOF {
269 return n, err
270 }
271 }
272}
273
274// Decompressed returns the number of bytes decompressed by the decoder.
275func (d *decoder) Decompressed() int64 {
276 return d.Dict.pos() - d.start
277}
diff --git a/vendor/github.com/ulikunitz/xz/lzma/decoderdict.go b/vendor/github.com/ulikunitz/xz/lzma/decoderdict.go
new file mode 100644
index 0000000..564a12b
--- /dev/null
+++ b/vendor/github.com/ulikunitz/xz/lzma/decoderdict.go
@@ -0,0 +1,135 @@
1// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package lzma
6
7import (
8 "errors"
9 "fmt"
10)
11
12// decoderDict provides the dictionary for the decoder. The whole
13// dictionary is used as reader buffer.
14type decoderDict struct {
15 buf buffer
16 head int64
17}
18
19// newDecoderDict creates a new decoder dictionary. The whole dictionary
20// will be used as reader buffer.
21func newDecoderDict(dictCap int) (d *decoderDict, err error) {
22 // lower limit supports easy test cases
23 if !(1 <= dictCap && int64(dictCap) <= MaxDictCap) {
24 return nil, errors.New("lzma: dictCap out of range")
25 }
26 d = &decoderDict{buf: *newBuffer(dictCap)}
27 return d, nil
28}
29
30// Reset clears the dictionary. The read buffer is not changed, so the
31// buffered data can still be read.
32func (d *decoderDict) Reset() {
33 d.head = 0
34}
35
36// WriteByte writes a single byte into the dictionary. It is used to
37// write literals into the dictionary.
38func (d *decoderDict) WriteByte(c byte) error {
39 if err := d.buf.WriteByte(c); err != nil {
40 return err
41 }
42 d.head++
43 return nil
44}
45
46// pos returns the position of the dictionary head.
47func (d *decoderDict) pos() int64 { return d.head }
48
49// dictLen returns the actual length of the dictionary.
50func (d *decoderDict) dictLen() int {
51 capacity := d.buf.Cap()
52 if d.head >= int64(capacity) {
53 return capacity
54 }
55 return int(d.head)
56}
57
58// byteAt returns a byte stored in the dictionary. If the distance is
59// non-positive or exceeds the current length of the dictionary the zero
60// byte is returned.
61func (d *decoderDict) byteAt(dist int) byte {
62 if !(0 < dist && dist <= d.dictLen()) {
63 return 0
64 }
65 i := d.buf.front - dist
66 if i < 0 {
67 i += len(d.buf.data)
68 }
69 return d.buf.data[i]
70}
71
72// writeMatch writes the match at the top of the dictionary. The given
73// distance must point in the current dictionary and the length must not
74// exceed the maximum length 273 supported in LZMA.
75//
76// The error value ErrNoSpace indicates that no space is available in
77// the dictionary for writing. You need to read from the dictionary
78// first.
79func (d *decoderDict) writeMatch(dist int64, length int) error {
80 if !(0 < dist && dist <= int64(d.dictLen())) {
81 return errors.New("writeMatch: distance out of range")
82 }
83 if !(0 < length && length <= maxMatchLen) {
84 return errors.New("writeMatch: length out of range")
85 }
86 if length > d.buf.Available() {
87 return ErrNoSpace
88 }
89 d.head += int64(length)
90
91 i := d.buf.front - int(dist)
92 if i < 0 {
93 i += len(d.buf.data)
94 }
95 for length > 0 {
96 var p []byte
97 if i >= d.buf.front {
98 p = d.buf.data[i:]
99 i = 0
100 } else {
101 p = d.buf.data[i:d.buf.front]
102 i = d.buf.front
103 }
104 if len(p) > length {
105 p = p[:length]
106 }
107 if _, err := d.buf.Write(p); err != nil {
108 panic(fmt.Errorf("d.buf.Write returned error %s", err))
109 }
110 length -= len(p)
111 }
112 return nil
113}
114
115// Write writes the given bytes into the dictionary and advances the
116// head.
117func (d *decoderDict) Write(p []byte) (n int, err error) {
118 n, err = d.buf.Write(p)
119 d.head += int64(n)
120 return n, err
121}
122
123// Available returns the number of available bytes for writing into the
124// decoder dictionary.
125func (d *decoderDict) Available() int { return d.buf.Available() }
126
127// Read reads data from the buffer contained in the decoder dictionary.
128func (d *decoderDict) Read(p []byte) (n int, err error) { return d.buf.Read(p) }
129
130// Buffered returns the number of bytes currently buffered in the
131// decoder dictionary.
132func (d *decoderDict) buffered() int { return d.buf.Buffered() }
133
134// Peek gets data from the buffer without advancing the rear index.
135func (d *decoderDict) peek(p []byte) (n int, err error) { return d.buf.Peek(p) }
diff --git a/vendor/github.com/ulikunitz/xz/lzma/directcodec.go b/vendor/github.com/ulikunitz/xz/lzma/directcodec.go
new file mode 100644
index 0000000..e08eb98
--- /dev/null
+++ b/vendor/github.com/ulikunitz/xz/lzma/directcodec.go
@@ -0,0 +1,49 @@
1// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package lzma
6
7import "fmt"
8
9// directCodec allows the encoding and decoding of values with a fixed number
10// of bits. The number of bits must be in the range [1,32].
11type directCodec byte
12
13// makeDirectCodec creates a directCodec. The function panics if the number of
14// bits is not in the range [1,32].
15func makeDirectCodec(bits int) directCodec {
16 if !(1 <= bits && bits <= 32) {
17 panic(fmt.Errorf("bits=%d out of range", bits))
18 }
19 return directCodec(bits)
20}
21
22// Bits returns the number of bits supported by this codec.
23func (dc directCodec) Bits() int {
24 return int(dc)
25}
26
27// Encode uses the range encoder to encode a value with the fixed number of
28// bits. The most-significant bit is encoded first.
29func (dc directCodec) Encode(e *rangeEncoder, v uint32) error {
30 for i := int(dc) - 1; i >= 0; i-- {
31 if err := e.DirectEncodeBit(v >> uint(i)); err != nil {
32 return err
33 }
34 }
35 return nil
36}
37
38// Decode uses the range decoder to decode a value with the given number of
39// given bits. The most-significant bit is decoded first.
40func (dc directCodec) Decode(d *rangeDecoder) (v uint32, err error) {
41 for i := int(dc) - 1; i >= 0; i-- {
42 x, err := d.DirectDecodeBit()
43 if err != nil {
44 return 0, err
45 }
46 v = (v << 1) | x
47 }
48 return v, nil
49}
diff --git a/vendor/github.com/ulikunitz/xz/lzma/distcodec.go b/vendor/github.com/ulikunitz/xz/lzma/distcodec.go
new file mode 100644
index 0000000..b053a2d
--- /dev/null
+++ b/vendor/github.com/ulikunitz/xz/lzma/distcodec.go
@@ -0,0 +1,156 @@
1// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package lzma
6
7// Constants used by the distance codec.
8const (
9 // minimum supported distance
10 minDistance = 1
11 // maximum supported distance, value is used for the eos marker.
12 maxDistance = 1 << 32
13 // number of the supported len states
14 lenStates = 4
15 // start for the position models
16 startPosModel = 4
17 // first index with align bits support
18 endPosModel = 14
19 // bits for the position slots
20 posSlotBits = 6
21 // number of align bits
22 alignBits = 4
23 // maximum position slot
24 maxPosSlot = 63
25)
26
27// distCodec provides encoding and decoding of distance values.
28type distCodec struct {
29 posSlotCodecs [lenStates]treeCodec
30 posModel [endPosModel - startPosModel]treeReverseCodec
31 alignCodec treeReverseCodec
32}
33
34// deepcopy initializes dc as deep copy of the source.
35func (dc *distCodec) deepcopy(src *distCodec) {
36 if dc == src {
37 return
38 }
39 for i := range dc.posSlotCodecs {
40 dc.posSlotCodecs[i].deepcopy(&src.posSlotCodecs[i])
41 }
42 for i := range dc.posModel {
43 dc.posModel[i].deepcopy(&src.posModel[i])
44 }
45 dc.alignCodec.deepcopy(&src.alignCodec)
46}
47
48// distBits returns the number of bits required to encode dist.
49func distBits(dist uint32) int {
50 if dist < startPosModel {
51 return 6
52 }
53 // slot s > 3, dist d
54 // s = 2(bits(d)-1) + bit(d, bits(d)-2)
55 // s>>1 = bits(d)-1
56 // bits(d) = 32-nlz32(d)
57 // s>>1=31-nlz32(d)
58 // n = 5 + (s>>1) = 36 - nlz32(d)
59 return 36 - nlz32(dist)
60}
61
62// newDistCodec creates a new distance codec.
63func (dc *distCodec) init() {
64 for i := range dc.posSlotCodecs {
65 dc.posSlotCodecs[i] = makeTreeCodec(posSlotBits)
66 }
67 for i := range dc.posModel {
68 posSlot := startPosModel + i
69 bits := (posSlot >> 1) - 1
70 dc.posModel[i] = makeTreeReverseCodec(bits)
71 }
72 dc.alignCodec = makeTreeReverseCodec(alignBits)
73}
74
75// lenState converts the value l to a supported lenState value.
76func lenState(l uint32) uint32 {
77 if l >= lenStates {
78 l = lenStates - 1
79 }
80 return l
81}
82
83// Encode encodes the distance using the parameter l. Dist can have values from
84// the full range of uint32 values. To get the distance offset the actual match
85// distance has to be decreased by 1. A distance offset of 0xffffffff (eos)
86// indicates the end of the stream.
87func (dc *distCodec) Encode(e *rangeEncoder, dist uint32, l uint32) (err error) {
88 // Compute the posSlot using nlz32
89 var posSlot uint32
90 var bits uint32
91 if dist < startPosModel {
92 posSlot = dist
93 } else {
94 bits = uint32(30 - nlz32(dist))
95 posSlot = startPosModel - 2 + (bits << 1)
96 posSlot += (dist >> uint(bits)) & 1
97 }
98
99 if err = dc.posSlotCodecs[lenState(l)].Encode(e, posSlot); err != nil {
100 return
101 }
102
103 switch {
104 case posSlot < startPosModel:
105 return nil
106 case posSlot < endPosModel:
107 tc := &dc.posModel[posSlot-startPosModel]
108 return tc.Encode(dist, e)
109 }
110 dic := directCodec(bits - alignBits)
111 if err = dic.Encode(e, dist>>alignBits); err != nil {
112 return
113 }
114 return dc.alignCodec.Encode(dist, e)
115}
116
117// Decode decodes the distance offset using the parameter l. The dist value
118// 0xffffffff (eos) indicates the end of the stream. Add one to the distance
119// offset to get the actual match distance.
120func (dc *distCodec) Decode(d *rangeDecoder, l uint32) (dist uint32, err error) {
121 posSlot, err := dc.posSlotCodecs[lenState(l)].Decode(d)
122 if err != nil {
123 return
124 }
125
126 // posSlot equals distance
127 if posSlot < startPosModel {
128 return posSlot, nil
129 }
130
131 // posSlot uses the individual models
132 bits := (posSlot >> 1) - 1
133 dist = (2 | (posSlot & 1)) << bits
134 var u uint32
135 if posSlot < endPosModel {
136 tc := &dc.posModel[posSlot-startPosModel]
137 if u, err = tc.Decode(d); err != nil {
138 return 0, err
139 }
140 dist += u
141 return dist, nil
142 }
143
144 // posSlots use direct encoding and a single model for the four align
145 // bits.
146 dic := directCodec(bits - alignBits)
147 if u, err = dic.Decode(d); err != nil {
148 return 0, err
149 }
150 dist += u << alignBits
151 if u, err = dc.alignCodec.Decode(d); err != nil {
152 return 0, err
153 }
154 dist += u
155 return dist, nil
156}
diff --git a/vendor/github.com/ulikunitz/xz/lzma/encoder.go b/vendor/github.com/ulikunitz/xz/lzma/encoder.go
new file mode 100644
index 0000000..18ce009
--- /dev/null
+++ b/vendor/github.com/ulikunitz/xz/lzma/encoder.go
@@ -0,0 +1,268 @@
1// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package lzma
6
7import (
8 "fmt"
9 "io"
10)
11
12// opLenMargin provides the upper limit of the number of bytes required
13// to encode a single operation.
14const opLenMargin = 10
15
16// compressFlags control the compression process.
17type compressFlags uint32
18
19// Values for compressFlags.
20const (
21 // all data should be compressed, even if compression is not
22 // optimal.
23 all compressFlags = 1 << iota
24)
25
26// encoderFlags provide the flags for an encoder.
27type encoderFlags uint32
28
29// Flags for the encoder.
30const (
31 // eosMarker requests an EOS marker to be written.
32 eosMarker encoderFlags = 1 << iota
33)
34
35// Encoder compresses data buffered in the encoder dictionary and writes
36// it into a byte writer.
37type encoder struct {
38 dict *encoderDict
39 state *state
40 re *rangeEncoder
41 start int64
42 // generate eos marker
43 marker bool
44 limit bool
45 margin int
46}
47
48// newEncoder creates a new encoder. If the byte writer must be
49// limited use LimitedByteWriter provided by this package. The flags
50// argument supports the eosMarker flag, controlling whether a
51// terminating end-of-stream marker must be written.
52func newEncoder(bw io.ByteWriter, state *state, dict *encoderDict,
53 flags encoderFlags) (e *encoder, err error) {
54
55 re, err := newRangeEncoder(bw)
56 if err != nil {
57 return nil, err
58 }
59 e = &encoder{
60 dict: dict,
61 state: state,
62 re: re,
63 marker: flags&eosMarker != 0,
64 start: dict.Pos(),
65 margin: opLenMargin,
66 }
67 if e.marker {
68 e.margin += 5
69 }
70 return e, nil
71}
72
73// Write writes the bytes from p into the dictionary. If not enough
74// space is available the data in the dictionary buffer will be
75// compressed to make additional space available. If the limit of the
76// underlying writer has been reached ErrLimit will be returned.
77func (e *encoder) Write(p []byte) (n int, err error) {
78 for {
79 k, err := e.dict.Write(p[n:])
80 n += k
81 if err == ErrNoSpace {
82 if err = e.compress(0); err != nil {
83 return n, err
84 }
85 continue
86 }
87 return n, err
88 }
89}
90
91// Reopen reopens the encoder with a new byte writer.
92func (e *encoder) Reopen(bw io.ByteWriter) error {
93 var err error
94 if e.re, err = newRangeEncoder(bw); err != nil {
95 return err
96 }
97 e.start = e.dict.Pos()
98 e.limit = false
99 return nil
100}
101
102// writeLiteral writes a literal into the LZMA stream
103func (e *encoder) writeLiteral(l lit) error {
104 var err error
105 state, state2, _ := e.state.states(e.dict.Pos())
106 if err = e.state.isMatch[state2].Encode(e.re, 0); err != nil {
107 return err
108 }
109 litState := e.state.litState(e.dict.ByteAt(1), e.dict.Pos())
110 match := e.dict.ByteAt(int(e.state.rep[0]) + 1)
111 err = e.state.litCodec.Encode(e.re, l.b, state, match, litState)
112 if err != nil {
113 return err
114 }
115 e.state.updateStateLiteral()
116 return nil
117}
118
119// iverson implements the Iverson operator as proposed by Donald Knuth in his
120// book Concrete Mathematics.
121func iverson(ok bool) uint32 {
122 if ok {
123 return 1
124 }
125 return 0
126}
127
128// writeMatch writes a repetition operation into the operation stream
129func (e *encoder) writeMatch(m match) error {
130 var err error
131 if !(minDistance <= m.distance && m.distance <= maxDistance) {
132 panic(fmt.Errorf("match distance %d out of range", m.distance))
133 }
134 dist := uint32(m.distance - minDistance)
135 if !(minMatchLen <= m.n && m.n <= maxMatchLen) &&
136 !(dist == e.state.rep[0] && m.n == 1) {
137 panic(fmt.Errorf(
138 "match length %d out of range; dist %d rep[0] %d",
139 m.n, dist, e.state.rep[0]))
140 }
141 state, state2, posState := e.state.states(e.dict.Pos())
142 if err = e.state.isMatch[state2].Encode(e.re, 1); err != nil {
143 return err
144 }
145 g := 0
146 for ; g < 4; g++ {
147 if e.state.rep[g] == dist {
148 break
149 }
150 }
151 b := iverson(g < 4)
152 if err = e.state.isRep[state].Encode(e.re, b); err != nil {
153 return err
154 }
155 n := uint32(m.n - minMatchLen)
156 if b == 0 {
157 // simple match
158 e.state.rep[3], e.state.rep[2], e.state.rep[1], e.state.rep[0] =
159 e.state.rep[2], e.state.rep[1], e.state.rep[0], dist
160 e.state.updateStateMatch()
161 if err = e.state.lenCodec.Encode(e.re, n, posState); err != nil {
162 return err
163 }
164 return e.state.distCodec.Encode(e.re, dist, n)
165 }
166 b = iverson(g != 0)
167 if err = e.state.isRepG0[state].Encode(e.re, b); err != nil {
168 return err
169 }
170 if b == 0 {
171 // g == 0
172 b = iverson(m.n != 1)
173 if err = e.state.isRepG0Long[state2].Encode(e.re, b); err != nil {
174 return err
175 }
176 if b == 0 {
177 e.state.updateStateShortRep()
178 return nil
179 }
180 } else {
181 // g in {1,2,3}
182 b = iverson(g != 1)
183 if err = e.state.isRepG1[state].Encode(e.re, b); err != nil {
184 return err
185 }
186 if b == 1 {
187 // g in {2,3}
188 b = iverson(g != 2)
189 err = e.state.isRepG2[state].Encode(e.re, b)
190 if err != nil {
191 return err
192 }
193 if b == 1 {
194 e.state.rep[3] = e.state.rep[2]
195 }
196 e.state.rep[2] = e.state.rep[1]
197 }
198 e.state.rep[1] = e.state.rep[0]
199 e.state.rep[0] = dist
200 }
201 e.state.updateStateRep()
202 return e.state.repLenCodec.Encode(e.re, n, posState)
203}
204
205// writeOp writes a single operation to the range encoder. The function
206// checks whether there is enough space available to close the LZMA
207// stream.
208func (e *encoder) writeOp(op operation) error {
209 if e.re.Available() < int64(e.margin) {
210 return ErrLimit
211 }
212 switch x := op.(type) {
213 case lit:
214 return e.writeLiteral(x)
215 case match:
216 return e.writeMatch(x)
217 default:
218 panic("unexpected operation")
219 }
220}
221
222// compress compressed data from the dictionary buffer. If the flag all
223// is set, all data in the dictionary buffer will be compressed. The
224// function returns ErrLimit if the underlying writer has reached its
225// limit.
226func (e *encoder) compress(flags compressFlags) error {
227 n := 0
228 if flags&all == 0 {
229 n = maxMatchLen - 1
230 }
231 d := e.dict
232 m := d.m
233 for d.Buffered() > n {
234 op := m.NextOp(e.state.rep)
235 if err := e.writeOp(op); err != nil {
236 return err
237 }
238 d.Discard(op.Len())
239 }
240 return nil
241}
242
243// eosMatch is a pseudo operation that indicates the end of the stream.
244var eosMatch = match{distance: maxDistance, n: minMatchLen}
245
246// Close terminates the LZMA stream. If requested the end-of-stream
247// marker will be written. If the byte writer limit has been or will be
248// reached during compression of the remaining data in the buffer the
249// LZMA stream will be closed and data will remain in the buffer.
250func (e *encoder) Close() error {
251 err := e.compress(all)
252 if err != nil && err != ErrLimit {
253 return err
254 }
255 if e.marker {
256 if err := e.writeMatch(eosMatch); err != nil {
257 return err
258 }
259 }
260 err = e.re.Close()
261 return err
262}
263
264// Compressed returns the number bytes of the input data that been
265// compressed.
266func (e *encoder) Compressed() int64 {
267 return e.dict.Pos() - e.start
268}
diff --git a/vendor/github.com/ulikunitz/xz/lzma/encoderdict.go b/vendor/github.com/ulikunitz/xz/lzma/encoderdict.go
new file mode 100644
index 0000000..9d0fbc7
--- /dev/null
+++ b/vendor/github.com/ulikunitz/xz/lzma/encoderdict.go
@@ -0,0 +1,149 @@
1// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package lzma
6
7import (
8 "errors"
9 "fmt"
10 "io"
11)
12
13// matcher is an interface that supports the identification of the next
14// operation.
15type matcher interface {
16 io.Writer
17 SetDict(d *encoderDict)
18 NextOp(rep [4]uint32) operation
19}
20
21// encoderDict provides the dictionary of the encoder. It includes an
22// addtional buffer atop of the actual dictionary.
23type encoderDict struct {
24 buf buffer
25 m matcher
26 head int64
27 capacity int
28 // preallocated array
29 data [maxMatchLen]byte
30}
31
32// newEncoderDict creates the encoder dictionary. The argument bufSize
33// defines the size of the additional buffer.
34func newEncoderDict(dictCap, bufSize int, m matcher) (d *encoderDict, err error) {
35 if !(1 <= dictCap && int64(dictCap) <= MaxDictCap) {
36 return nil, errors.New(
37 "lzma: dictionary capacity out of range")
38 }
39 if bufSize < 1 {
40 return nil, errors.New(
41 "lzma: buffer size must be larger than zero")
42 }
43 d = &encoderDict{
44 buf: *newBuffer(dictCap + bufSize),
45 capacity: dictCap,
46 m: m,
47 }
48 m.SetDict(d)
49 return d, nil
50}
51
52// Discard discards n bytes. Note that n must not be larger than
53// MaxMatchLen.
54func (d *encoderDict) Discard(n int) {
55 p := d.data[:n]
56 k, _ := d.buf.Read(p)
57 if k < n {
58 panic(fmt.Errorf("lzma: can't discard %d bytes", n))
59 }
60 d.head += int64(n)
61 d.m.Write(p)
62}
63
64// Len returns the data available in the encoder dictionary.
65func (d *encoderDict) Len() int {
66 n := d.buf.Available()
67 if int64(n) > d.head {
68 return int(d.head)
69 }
70 return n
71}
72
73// DictLen returns the actual length of data in the dictionary.
74func (d *encoderDict) DictLen() int {
75 if d.head < int64(d.capacity) {
76 return int(d.head)
77 }
78 return d.capacity
79}
80
81// Available returns the number of bytes that can be written by a
82// following Write call.
83func (d *encoderDict) Available() int {
84 return d.buf.Available() - d.DictLen()
85}
86
87// Write writes data into the dictionary buffer. Note that the position
88// of the dictionary head will not be moved. If there is not enough
89// space in the buffer ErrNoSpace will be returned.
90func (d *encoderDict) Write(p []byte) (n int, err error) {
91 m := d.Available()
92 if len(p) > m {
93 p = p[:m]
94 err = ErrNoSpace
95 }
96 var e error
97 if n, e = d.buf.Write(p); e != nil {
98 err = e
99 }
100 return n, err
101}
102
103// Pos returns the position of the head.
104func (d *encoderDict) Pos() int64 { return d.head }
105
106// ByteAt returns the byte at the given distance.
107func (d *encoderDict) ByteAt(distance int) byte {
108 if !(0 < distance && distance <= d.Len()) {
109 return 0
110 }
111 i := d.buf.rear - distance
112 if i < 0 {
113 i += len(d.buf.data)
114 }
115 return d.buf.data[i]
116}
117
118// CopyN copies the last n bytes from the dictionary into the provided
119// writer. This is used for copying uncompressed data into an
120// uncompressed segment.
121func (d *encoderDict) CopyN(w io.Writer, n int) (written int, err error) {
122 if n <= 0 {
123 return 0, nil
124 }
125 m := d.Len()
126 if n > m {
127 n = m
128 err = ErrNoSpace
129 }
130 i := d.buf.rear - n
131 var e error
132 if i < 0 {
133 i += len(d.buf.data)
134 if written, e = w.Write(d.buf.data[i:]); e != nil {
135 return written, e
136 }
137 i = 0
138 }
139 var k int
140 k, e = w.Write(d.buf.data[i:d.buf.rear])
141 written += k
142 if e != nil {
143 err = e
144 }
145 return written, err
146}
147
148// Buffered returns the number of bytes in the buffer.
149func (d *encoderDict) Buffered() int { return d.buf.Buffered() }
diff --git a/vendor/github.com/ulikunitz/xz/lzma/fox.lzma b/vendor/github.com/ulikunitz/xz/lzma/fox.lzma
new file mode 100644
index 0000000..5edad63
--- /dev/null
+++ b/vendor/github.com/ulikunitz/xz/lzma/fox.lzma
Binary files differ
diff --git a/vendor/github.com/ulikunitz/xz/lzma/hashtable.go b/vendor/github.com/ulikunitz/xz/lzma/hashtable.go
new file mode 100644
index 0000000..d786a97
--- /dev/null
+++ b/vendor/github.com/ulikunitz/xz/lzma/hashtable.go
@@ -0,0 +1,309 @@
1// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package lzma
6
7import (
8 "errors"
9 "fmt"
10
11 "github.com/ulikunitz/xz/internal/hash"
12)
13
14/* For compression we need to find byte sequences that match the byte
15 * sequence at the dictionary head. A hash table is a simple method to
16 * provide this capability.
17 */
18
19// maxMatches limits the number of matches requested from the Matches
20// function. This controls the speed of the overall encoding.
21const maxMatches = 16
22
23// shortDists defines the number of short distances supported by the
24// implementation.
25const shortDists = 8
26
27// The minimum is somehow arbitrary but the maximum is limited by the
28// memory requirements of the hash table.
29const (
30 minTableExponent = 9
31 maxTableExponent = 20
32)
33
34// newRoller contains the function used to create an instance of the
35// hash.Roller.
36var newRoller = func(n int) hash.Roller { return hash.NewCyclicPoly(n) }
37
38// hashTable stores the hash table including the rolling hash method.
39//
40// We implement chained hashing into a circular buffer. Each entry in
41// the circular buffer stores the delta distance to the next position with a
42// word that has the same hash value.
43type hashTable struct {
44 dict *encoderDict
45 // actual hash table
46 t []int64
47 // circular list data with the offset to the next word
48 data []uint32
49 front int
50 // mask for computing the index for the hash table
51 mask uint64
52 // hash offset; initial value is -int64(wordLen)
53 hoff int64
54 // length of the hashed word
55 wordLen int
56 // hash roller for computing the hash values for the Write
57 // method
58 wr hash.Roller
59 // hash roller for computing arbitrary hashes
60 hr hash.Roller
61 // preallocated slices
62 p [maxMatches]int64
63 distances [maxMatches + shortDists]int
64}
65
66// hashTableExponent derives the hash table exponent from the dictionary
67// capacity.
68func hashTableExponent(n uint32) int {
69 e := 30 - nlz32(n)
70 switch {
71 case e < minTableExponent:
72 e = minTableExponent
73 case e > maxTableExponent:
74 e = maxTableExponent
75 }
76 return e
77}
78
79// newHashTable creates a new hash table for words of length wordLen
80func newHashTable(capacity int, wordLen int) (t *hashTable, err error) {
81 if !(0 < capacity) {
82 return nil, errors.New(
83 "newHashTable: capacity must not be negative")
84 }
85 exp := hashTableExponent(uint32(capacity))
86 if !(1 <= wordLen && wordLen <= 4) {
87 return nil, errors.New("newHashTable: " +
88 "argument wordLen out of range")
89 }
90 n := 1 << uint(exp)
91 if n <= 0 {
92 panic("newHashTable: exponent is too large")
93 }
94 t = &hashTable{
95 t: make([]int64, n),
96 data: make([]uint32, capacity),
97 mask: (uint64(1) << uint(exp)) - 1,
98 hoff: -int64(wordLen),
99 wordLen: wordLen,
100 wr: newRoller(wordLen),
101 hr: newRoller(wordLen),
102 }
103 return t, nil
104}
105
106func (t *hashTable) SetDict(d *encoderDict) { t.dict = d }
107
108// buffered returns the number of bytes that are currently hashed.
109func (t *hashTable) buffered() int {
110 n := t.hoff + 1
111 switch {
112 case n <= 0:
113 return 0
114 case n >= int64(len(t.data)):
115 return len(t.data)
116 }
117 return int(n)
118}
119
120// addIndex adds n to an index ensuring that is stays inside the
121// circular buffer for the hash chain.
122func (t *hashTable) addIndex(i, n int) int {
123 i += n - len(t.data)
124 if i < 0 {
125 i += len(t.data)
126 }
127 return i
128}
129
130// putDelta puts the delta instance at the current front of the circular
131// chain buffer.
132func (t *hashTable) putDelta(delta uint32) {
133 t.data[t.front] = delta
134 t.front = t.addIndex(t.front, 1)
135}
136
137// putEntry puts a new entry into the hash table. If there is already a
138// value stored it is moved into the circular chain buffer.
139func (t *hashTable) putEntry(h uint64, pos int64) {
140 if pos < 0 {
141 return
142 }
143 i := h & t.mask
144 old := t.t[i] - 1
145 t.t[i] = pos + 1
146 var delta int64
147 if old >= 0 {
148 delta = pos - old
149 if delta > 1<<32-1 || delta > int64(t.buffered()) {
150 delta = 0
151 }
152 }
153 t.putDelta(uint32(delta))
154}
155
156// WriteByte converts a single byte into a hash and puts them into the hash
157// table.
158func (t *hashTable) WriteByte(b byte) error {
159 h := t.wr.RollByte(b)
160 t.hoff++
161 t.putEntry(h, t.hoff)
162 return nil
163}
164
165// Write converts the bytes provided into hash tables and stores the
166// abbreviated offsets into the hash table. The method will never return an
167// error.
168func (t *hashTable) Write(p []byte) (n int, err error) {
169 for _, b := range p {
170 // WriteByte doesn't generate an error.
171 t.WriteByte(b)
172 }
173 return len(p), nil
174}
175
176// getMatches the matches for a specific hash. The functions returns the
177// number of positions found.
178//
179// TODO: Make a getDistances because that we are actually interested in.
180func (t *hashTable) getMatches(h uint64, positions []int64) (n int) {
181 if t.hoff < 0 || len(positions) == 0 {
182 return 0
183 }
184 buffered := t.buffered()
185 tailPos := t.hoff + 1 - int64(buffered)
186 rear := t.front - buffered
187 if rear >= 0 {
188 rear -= len(t.data)
189 }
190 // get the slot for the hash
191 pos := t.t[h&t.mask] - 1
192 delta := pos - tailPos
193 for {
194 if delta < 0 {
195 return n
196 }
197 positions[n] = tailPos + delta
198 n++
199 if n >= len(positions) {
200 return n
201 }
202 i := rear + int(delta)
203 if i < 0 {
204 i += len(t.data)
205 }
206 u := t.data[i]
207 if u == 0 {
208 return n
209 }
210 delta -= int64(u)
211 }
212}
213
214// hash computes the rolling hash for the word stored in p. For correct
215// results its length must be equal to t.wordLen.
216func (t *hashTable) hash(p []byte) uint64 {
217 var h uint64
218 for _, b := range p {
219 h = t.hr.RollByte(b)
220 }
221 return h
222}
223
224// Matches fills the positions slice with potential matches. The
225// functions returns the number of positions filled into positions. The
226// byte slice p must have word length of the hash table.
227func (t *hashTable) Matches(p []byte, positions []int64) int {
228 if len(p) != t.wordLen {
229 panic(fmt.Errorf(
230 "byte slice must have length %d", t.wordLen))
231 }
232 h := t.hash(p)
233 return t.getMatches(h, positions)
234}
235
236// NextOp identifies the next operation using the hash table.
237//
238// TODO: Use all repetitions to find matches.
239func (t *hashTable) NextOp(rep [4]uint32) operation {
240 // get positions
241 data := t.dict.data[:maxMatchLen]
242 n, _ := t.dict.buf.Peek(data)
243 data = data[:n]
244 var p []int64
245 if n < t.wordLen {
246 p = t.p[:0]
247 } else {
248 p = t.p[:maxMatches]
249 n = t.Matches(data[:t.wordLen], p)
250 p = p[:n]
251 }
252
253 // convert positions in potential distances
254 head := t.dict.head
255 dists := append(t.distances[:0], 1, 2, 3, 4, 5, 6, 7, 8)
256 for _, pos := range p {
257 dis := int(head - pos)
258 if dis > shortDists {
259 dists = append(dists, dis)
260 }
261 }
262
263 // check distances
264 var m match
265 dictLen := t.dict.DictLen()
266 for _, dist := range dists {
267 if dist > dictLen {
268 continue
269 }
270
271 // Here comes a trick. We are only interested in matches
272 // that are longer than the matches we have been found
273 // before. So before we test the whole byte sequence at
274 // the given distance, we test the first byte that would
275 // make the match longer. If it doesn't match the byte
276 // to match, we don't to care any longer.
277 i := t.dict.buf.rear - dist + m.n
278 if i < 0 {
279 i += len(t.dict.buf.data)
280 }
281 if t.dict.buf.data[i] != data[m.n] {
282 // We can't get a longer match. Jump to the next
283 // distance.
284 continue
285 }
286
287 n := t.dict.buf.matchLen(dist, data)
288 switch n {
289 case 0:
290 continue
291 case 1:
292 if uint32(dist-minDistance) != rep[0] {
293 continue
294 }
295 }
296 if n > m.n {
297 m = match{int64(dist), n}
298 if n == len(data) {
299 // No better match will be found.
300 break
301 }
302 }
303 }
304
305 if m.n == 0 {
306 return lit{data[0]}
307 }
308 return m
309}
diff --git a/vendor/github.com/ulikunitz/xz/lzma/header.go b/vendor/github.com/ulikunitz/xz/lzma/header.go
new file mode 100644
index 0000000..bc70896
--- /dev/null
+++ b/vendor/github.com/ulikunitz/xz/lzma/header.go
@@ -0,0 +1,167 @@
1// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package lzma
6
7import (
8 "errors"
9 "fmt"
10)
11
12// uint32LE reads an uint32 integer from a byte slice
13func uint32LE(b []byte) uint32 {
14 x := uint32(b[3]) << 24
15 x |= uint32(b[2]) << 16
16 x |= uint32(b[1]) << 8
17 x |= uint32(b[0])
18 return x
19}
20
21// uint64LE converts the uint64 value stored as little endian to an uint64
22// value.
23func uint64LE(b []byte) uint64 {
24 x := uint64(b[7]) << 56
25 x |= uint64(b[6]) << 48
26 x |= uint64(b[5]) << 40
27 x |= uint64(b[4]) << 32
28 x |= uint64(b[3]) << 24
29 x |= uint64(b[2]) << 16
30 x |= uint64(b[1]) << 8
31 x |= uint64(b[0])
32 return x
33}
34
35// putUint32LE puts an uint32 integer into a byte slice that must have at least
36// a length of 4 bytes.
37func putUint32LE(b []byte, x uint32) {
38 b[0] = byte(x)
39 b[1] = byte(x >> 8)
40 b[2] = byte(x >> 16)
41 b[3] = byte(x >> 24)
42}
43
44// putUint64LE puts the uint64 value into the byte slice as little endian
45// value. The byte slice b must have at least place for 8 bytes.
46func putUint64LE(b []byte, x uint64) {
47 b[0] = byte(x)
48 b[1] = byte(x >> 8)
49 b[2] = byte(x >> 16)
50 b[3] = byte(x >> 24)
51 b[4] = byte(x >> 32)
52 b[5] = byte(x >> 40)
53 b[6] = byte(x >> 48)
54 b[7] = byte(x >> 56)
55}
56
57// noHeaderSize defines the value of the length field in the LZMA header.
58const noHeaderSize uint64 = 1<<64 - 1
59
60// HeaderLen provides the length of the LZMA file header.
61const HeaderLen = 13
62
63// header represents the header of an LZMA file.
64type header struct {
65 properties Properties
66 dictCap int
67 // uncompressed size; negative value if no size is given
68 size int64
69}
70
71// marshalBinary marshals the header.
72func (h *header) marshalBinary() (data []byte, err error) {
73 if err = h.properties.verify(); err != nil {
74 return nil, err
75 }
76 if !(0 <= h.dictCap && int64(h.dictCap) <= MaxDictCap) {
77 return nil, fmt.Errorf("lzma: DictCap %d out of range",
78 h.dictCap)
79 }
80
81 data = make([]byte, 13)
82
83 // property byte
84 data[0] = h.properties.Code()
85
86 // dictionary capacity
87 putUint32LE(data[1:5], uint32(h.dictCap))
88
89 // uncompressed size
90 var s uint64
91 if h.size > 0 {
92 s = uint64(h.size)
93 } else {
94 s = noHeaderSize
95 }
96 putUint64LE(data[5:], s)
97
98 return data, nil
99}
100
101// unmarshalBinary unmarshals the header.
102func (h *header) unmarshalBinary(data []byte) error {
103 if len(data) != HeaderLen {
104 return errors.New("lzma.unmarshalBinary: data has wrong length")
105 }
106
107 // properties
108 var err error
109 if h.properties, err = PropertiesForCode(data[0]); err != nil {
110 return err
111 }
112
113 // dictionary capacity
114 h.dictCap = int(uint32LE(data[1:]))
115 if h.dictCap < 0 {
116 return errors.New(
117 "LZMA header: dictionary capacity exceeds maximum " +
118 "integer")
119 }
120
121 // uncompressed size
122 s := uint64LE(data[5:])
123 if s == noHeaderSize {
124 h.size = -1
125 } else {
126 h.size = int64(s)
127 if h.size < 0 {
128 return errors.New(
129 "LZMA header: uncompressed size " +
130 "out of int64 range")
131 }
132 }
133
134 return nil
135}
136
137// validDictCap checks whether the dictionary capacity is correct. This
138// is used to weed out wrong file headers.
139func validDictCap(dictcap int) bool {
140 if int64(dictcap) == MaxDictCap {
141 return true
142 }
143 for n := uint(10); n < 32; n++ {
144 if dictcap == 1<<n {
145 return true
146 }
147 if dictcap == 1<<n+1<<(n-1) {
148 return true
149 }
150 }
151 return false
152}
153
154// ValidHeader checks for a valid LZMA file header. It allows only
155// dictionary sizes of 2^n or 2^n+2^(n-1) with n >= 10 or 2^32-1. If
156// there is an explicit size it must not exceed 256 GiB. The length of
157// the data argument must be HeaderLen.
158func ValidHeader(data []byte) bool {
159 var h header
160 if err := h.unmarshalBinary(data); err != nil {
161 return false
162 }
163 if !validDictCap(h.dictCap) {
164 return false
165 }
166 return h.size < 0 || h.size <= 1<<38
167}
diff --git a/vendor/github.com/ulikunitz/xz/lzma/header2.go b/vendor/github.com/ulikunitz/xz/lzma/header2.go
new file mode 100644
index 0000000..ac6a71a
--- /dev/null
+++ b/vendor/github.com/ulikunitz/xz/lzma/header2.go
@@ -0,0 +1,398 @@
1// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package lzma
6
7import (
8 "errors"
9 "fmt"
10 "io"
11)
12
13const (
14 // maximum size of compressed data in a chunk
15 maxCompressed = 1 << 16
16 // maximum size of uncompressed data in a chunk
17 maxUncompressed = 1 << 21
18)
19
20// chunkType represents the type of an LZMA2 chunk. Note that this
21// value is an internal representation and no actual encoding of a LZMA2
22// chunk header.
23type chunkType byte
24
25// Possible values for the chunk type.
26const (
27 // end of stream
28 cEOS chunkType = iota
29 // uncompressed; reset dictionary
30 cUD
31 // uncompressed; no reset of dictionary
32 cU
33 // LZMA compressed; no reset
34 cL
35 // LZMA compressed; reset state
36 cLR
37 // LZMA compressed; reset state; new property value
38 cLRN
39 // LZMA compressed; reset state; new property value; reset dictionary
40 cLRND
41)
42
43// chunkTypeStrings provide a string representation for the chunk types.
44var chunkTypeStrings = [...]string{
45 cEOS: "EOS",
46 cU: "U",
47 cUD: "UD",
48 cL: "L",
49 cLR: "LR",
50 cLRN: "LRN",
51 cLRND: "LRND",
52}
53
54// String returns a string representation of the chunk type.
55func (c chunkType) String() string {
56 if !(cEOS <= c && c <= cLRND) {
57 return "unknown"
58 }
59 return chunkTypeStrings[c]
60}
61
62// Actual encodings for the chunk types in the value. Note that the high
63// uncompressed size bits are stored in the header byte additionally.
64const (
65 hEOS = 0
66 hUD = 1
67 hU = 2
68 hL = 1 << 7
69 hLR = 1<<7 | 1<<5
70 hLRN = 1<<7 | 1<<6
71 hLRND = 1<<7 | 1<<6 | 1<<5
72)
73
74// errHeaderByte indicates an unsupported value for the chunk header
75// byte. These bytes starts the variable-length chunk header.
76var errHeaderByte = errors.New("lzma: unsupported chunk header byte")
77
78// headerChunkType converts the header byte into a chunk type. It
79// ignores the uncompressed size bits in the chunk header byte.
80func headerChunkType(h byte) (c chunkType, err error) {
81 if h&hL == 0 {
82 // no compression
83 switch h {
84 case hEOS:
85 c = cEOS
86 case hUD:
87 c = cUD
88 case hU:
89 c = cU
90 default:
91 return 0, errHeaderByte
92 }
93 return
94 }
95 switch h & hLRND {
96 case hL:
97 c = cL
98 case hLR:
99 c = cLR
100 case hLRN:
101 c = cLRN
102 case hLRND:
103 c = cLRND
104 default:
105 return 0, errHeaderByte
106 }
107 return
108}
109
110// uncompressedHeaderLen provides the length of an uncompressed header
111const uncompressedHeaderLen = 3
112
113// headerLen returns the length of the LZMA2 header for a given chunk
114// type.
115func headerLen(c chunkType) int {
116 switch c {
117 case cEOS:
118 return 1
119 case cU, cUD:
120 return uncompressedHeaderLen
121 case cL, cLR:
122 return 5
123 case cLRN, cLRND:
124 return 6
125 }
126 panic(fmt.Errorf("unsupported chunk type %d", c))
127}
128
129// chunkHeader represents the contents of a chunk header.
130type chunkHeader struct {
131 ctype chunkType
132 uncompressed uint32
133 compressed uint16
134 props Properties
135}
136
137// String returns a string representation of the chunk header.
138func (h *chunkHeader) String() string {
139 return fmt.Sprintf("%s %d %d %s", h.ctype, h.uncompressed,
140 h.compressed, &h.props)
141}
142
143// UnmarshalBinary reads the content of the chunk header from the data
144// slice. The slice must have the correct length.
145func (h *chunkHeader) UnmarshalBinary(data []byte) error {
146 if len(data) == 0 {
147 return errors.New("no data")
148 }
149 c, err := headerChunkType(data[0])
150 if err != nil {
151 return err
152 }
153
154 n := headerLen(c)
155 if len(data) < n {
156 return errors.New("incomplete data")
157 }
158 if len(data) > n {
159 return errors.New("invalid data length")
160 }
161
162 *h = chunkHeader{ctype: c}
163 if c == cEOS {
164 return nil
165 }
166
167 h.uncompressed = uint32(uint16BE(data[1:3]))
168 if c <= cU {
169 return nil
170 }
171 h.uncompressed |= uint32(data[0]&^hLRND) << 16
172
173 h.compressed = uint16BE(data[3:5])
174 if c <= cLR {
175 return nil
176 }
177
178 h.props, err = PropertiesForCode(data[5])
179 return err
180}
181
182// MarshalBinary encodes the chunk header value. The function checks
183// whether the content of the chunk header is correct.
184func (h *chunkHeader) MarshalBinary() (data []byte, err error) {
185 if h.ctype > cLRND {
186 return nil, errors.New("invalid chunk type")
187 }
188 if err = h.props.verify(); err != nil {
189 return nil, err
190 }
191
192 data = make([]byte, headerLen(h.ctype))
193
194 switch h.ctype {
195 case cEOS:
196 return data, nil
197 case cUD:
198 data[0] = hUD
199 case cU:
200 data[0] = hU
201 case cL:
202 data[0] = hL
203 case cLR:
204 data[0] = hLR
205 case cLRN:
206 data[0] = hLRN
207 case cLRND:
208 data[0] = hLRND
209 }
210
211 putUint16BE(data[1:3], uint16(h.uncompressed))
212 if h.ctype <= cU {
213 return data, nil
214 }
215 data[0] |= byte(h.uncompressed>>16) &^ hLRND
216
217 putUint16BE(data[3:5], h.compressed)
218 if h.ctype <= cLR {
219 return data, nil
220 }
221
222 data[5] = h.props.Code()
223 return data, nil
224}
225
226// readChunkHeader reads the chunk header from the IO reader.
227func readChunkHeader(r io.Reader) (h *chunkHeader, err error) {
228 p := make([]byte, 1, 6)
229 if _, err = io.ReadFull(r, p); err != nil {
230 return
231 }
232 c, err := headerChunkType(p[0])
233 if err != nil {
234 return
235 }
236 p = p[:headerLen(c)]
237 if _, err = io.ReadFull(r, p[1:]); err != nil {
238 return
239 }
240 h = new(chunkHeader)
241 if err = h.UnmarshalBinary(p); err != nil {
242 return nil, err
243 }
244 return h, nil
245}
246
247// uint16BE converts a big-endian uint16 representation to an uint16
248// value.
249func uint16BE(p []byte) uint16 {
250 return uint16(p[0])<<8 | uint16(p[1])
251}
252
253// putUint16BE puts the big-endian uint16 presentation into the given
254// slice.
255func putUint16BE(p []byte, x uint16) {
256 p[0] = byte(x >> 8)
257 p[1] = byte(x)
258}
259
260// chunkState is used to manage the state of the chunks
261type chunkState byte
262
263// start and stop define the initial and terminating state of the chunk
264// state
265const (
266 start chunkState = 'S'
267 stop = 'T'
268)
269
270// errors for the chunk state handling
271var (
272 errChunkType = errors.New("lzma: unexpected chunk type")
273 errState = errors.New("lzma: wrong chunk state")
274)
275
276// next transitions state based on chunk type input
277func (c *chunkState) next(ctype chunkType) error {
278 switch *c {
279 // start state
280 case 'S':
281 switch ctype {
282 case cEOS:
283 *c = 'T'
284 case cUD:
285 *c = 'R'
286 case cLRND:
287 *c = 'L'
288 default:
289 return errChunkType
290 }
291 // normal LZMA mode
292 case 'L':
293 switch ctype {
294 case cEOS:
295 *c = 'T'
296 case cUD:
297 *c = 'R'
298 case cU:
299 *c = 'U'
300 case cL, cLR, cLRN, cLRND:
301 break
302 default:
303 return errChunkType
304 }
305 // reset required
306 case 'R':
307 switch ctype {
308 case cEOS:
309 *c = 'T'
310 case cUD, cU:
311 break
312 case cLRN, cLRND:
313 *c = 'L'
314 default:
315 return errChunkType
316 }
317 // uncompressed
318 case 'U':
319 switch ctype {
320 case cEOS:
321 *c = 'T'
322 case cUD:
323 *c = 'R'
324 case cU:
325 break
326 case cL, cLR, cLRN, cLRND:
327 *c = 'L'
328 default:
329 return errChunkType
330 }
331 // terminal state
332 case 'T':
333 return errChunkType
334 default:
335 return errState
336 }
337 return nil
338}
339
340// defaultChunkType returns the default chunk type for each chunk state.
341func (c chunkState) defaultChunkType() chunkType {
342 switch c {
343 case 'S':
344 return cLRND
345 case 'L', 'U':
346 return cL
347 case 'R':
348 return cLRN
349 default:
350 // no error
351 return cEOS
352 }
353}
354
355// maxDictCap defines the maximum dictionary capacity supported by the
356// LZMA2 dictionary capacity encoding.
357const maxDictCap = 1<<32 - 1
358
359// maxDictCapCode defines the maximum dictionary capacity code.
360const maxDictCapCode = 40
361
362// The function decodes the dictionary capacity byte, but doesn't change
363// for the correct range of the given byte.
364func decodeDictCap(c byte) int64 {
365 return (2 | int64(c)&1) << (11 + (c>>1)&0x1f)
366}
367
368// DecodeDictCap decodes the encoded dictionary capacity. The function
369// returns an error if the code is out of range.
370func DecodeDictCap(c byte) (n int64, err error) {
371 if c >= maxDictCapCode {
372 if c == maxDictCapCode {
373 return maxDictCap, nil
374 }
375 return 0, errors.New("lzma: invalid dictionary size code")
376 }
377 return decodeDictCap(c), nil
378}
379
380// EncodeDictCap encodes a dictionary capacity. The function returns the
381// code for the capacity that is greater or equal n. If n exceeds the
382// maximum support dictionary capacity, the maximum value is returned.
383func EncodeDictCap(n int64) byte {
384 a, b := byte(0), byte(40)
385 for a < b {
386 c := a + (b-a)>>1
387 m := decodeDictCap(c)
388 if n <= m {
389 if n == m {
390 return c
391 }
392 b = c
393 } else {
394 a = c + 1
395 }
396 }
397 return a
398}
diff --git a/vendor/github.com/ulikunitz/xz/lzma/lengthcodec.go b/vendor/github.com/ulikunitz/xz/lzma/lengthcodec.go
new file mode 100644
index 0000000..e517730
--- /dev/null
+++ b/vendor/github.com/ulikunitz/xz/lzma/lengthcodec.go
@@ -0,0 +1,129 @@
1// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package lzma
6
7import "errors"
8
9// maxPosBits defines the number of bits of the position value that are used to
10// to compute the posState value. The value is used to select the tree codec
11// for length encoding and decoding.
12const maxPosBits = 4
13
14// minMatchLen and maxMatchLen give the minimum and maximum values for
15// encoding and decoding length values. minMatchLen is also used as base
16// for the encoded length values.
17const (
18 minMatchLen = 2
19 maxMatchLen = minMatchLen + 16 + 256 - 1
20)
21
22// lengthCodec support the encoding of the length value.
23type lengthCodec struct {
24 choice [2]prob
25 low [1 << maxPosBits]treeCodec
26 mid [1 << maxPosBits]treeCodec
27 high treeCodec
28}
29
30// deepcopy initializes the lc value as deep copy of the source value.
31func (lc *lengthCodec) deepcopy(src *lengthCodec) {
32 if lc == src {
33 return
34 }
35 lc.choice = src.choice
36 for i := range lc.low {
37 lc.low[i].deepcopy(&src.low[i])
38 }
39 for i := range lc.mid {
40 lc.mid[i].deepcopy(&src.mid[i])
41 }
42 lc.high.deepcopy(&src.high)
43}
44
45// init initializes a new length codec.
46func (lc *lengthCodec) init() {
47 for i := range lc.choice {
48 lc.choice[i] = probInit
49 }
50 for i := range lc.low {
51 lc.low[i] = makeTreeCodec(3)
52 }
53 for i := range lc.mid {
54 lc.mid[i] = makeTreeCodec(3)
55 }
56 lc.high = makeTreeCodec(8)
57}
58
59// lBits gives the number of bits used for the encoding of the l value
60// provided to the range encoder.
61func lBits(l uint32) int {
62 switch {
63 case l < 8:
64 return 4
65 case l < 16:
66 return 5
67 default:
68 return 10
69 }
70}
71
72// Encode encodes the length offset. The length offset l can be compute by
73// subtracting minMatchLen (2) from the actual length.
74//
75// l = length - minMatchLen
76//
77func (lc *lengthCodec) Encode(e *rangeEncoder, l uint32, posState uint32,
78) (err error) {
79 if l > maxMatchLen-minMatchLen {
80 return errors.New("lengthCodec.Encode: l out of range")
81 }
82 if l < 8 {
83 if err = lc.choice[0].Encode(e, 0); err != nil {
84 return
85 }
86 return lc.low[posState].Encode(e, l)
87 }
88 if err = lc.choice[0].Encode(e, 1); err != nil {
89 return
90 }
91 if l < 16 {
92 if err = lc.choice[1].Encode(e, 0); err != nil {
93 return
94 }
95 return lc.mid[posState].Encode(e, l-8)
96 }
97 if err = lc.choice[1].Encode(e, 1); err != nil {
98 return
99 }
100 if err = lc.high.Encode(e, l-16); err != nil {
101 return
102 }
103 return nil
104}
105
106// Decode reads the length offset. Add minMatchLen to compute the actual length
107// to the length offset l.
108func (lc *lengthCodec) Decode(d *rangeDecoder, posState uint32,
109) (l uint32, err error) {
110 var b uint32
111 if b, err = lc.choice[0].Decode(d); err != nil {
112 return
113 }
114 if b == 0 {
115 l, err = lc.low[posState].Decode(d)
116 return
117 }
118 if b, err = lc.choice[1].Decode(d); err != nil {
119 return
120 }
121 if b == 0 {
122 l, err = lc.mid[posState].Decode(d)
123 l += 8
124 return
125 }
126 l, err = lc.high.Decode(d)
127 l += 16
128 return
129}
diff --git a/vendor/github.com/ulikunitz/xz/lzma/literalcodec.go b/vendor/github.com/ulikunitz/xz/lzma/literalcodec.go
new file mode 100644
index 0000000..c949d6e
--- /dev/null
+++ b/vendor/github.com/ulikunitz/xz/lzma/literalcodec.go
@@ -0,0 +1,132 @@
1// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package lzma
6
7// literalCodec supports the encoding of literal. It provides 768 probability
8// values per literal state. The upper 512 probabilities are used with the
9// context of a match bit.
10type literalCodec struct {
11 probs []prob
12}
13
14// deepcopy initializes literal codec c as a deep copy of the source.
15func (c *literalCodec) deepcopy(src *literalCodec) {
16 if c == src {
17 return
18 }
19 c.probs = make([]prob, len(src.probs))
20 copy(c.probs, src.probs)
21}
22
23// init initializes the literal codec.
24func (c *literalCodec) init(lc, lp int) {
25 switch {
26 case !(minLC <= lc && lc <= maxLC):
27 panic("lc out of range")
28 case !(minLP <= lp && lp <= maxLP):
29 panic("lp out of range")
30 }
31 c.probs = make([]prob, 0x300<<uint(lc+lp))
32 for i := range c.probs {
33 c.probs[i] = probInit
34 }
35}
36
37// Encode encodes the byte s using a range encoder as well as the current LZMA
38// encoder state, a match byte and the literal state.
39func (c *literalCodec) Encode(e *rangeEncoder, s byte,
40 state uint32, match byte, litState uint32,
41) (err error) {
42 k := litState * 0x300
43 probs := c.probs[k : k+0x300]
44 symbol := uint32(1)
45 r := uint32(s)
46 if state >= 7 {
47 m := uint32(match)
48 for {
49 matchBit := (m >> 7) & 1
50 m <<= 1
51 bit := (r >> 7) & 1
52 r <<= 1
53 i := ((1 + matchBit) << 8) | symbol
54 if err = probs[i].Encode(e, bit); err != nil {
55 return
56 }
57 symbol = (symbol << 1) | bit
58 if matchBit != bit {
59 break
60 }
61 if symbol >= 0x100 {
62 break
63 }
64 }
65 }
66 for symbol < 0x100 {
67 bit := (r >> 7) & 1
68 r <<= 1
69 if err = probs[symbol].Encode(e, bit); err != nil {
70 return
71 }
72 symbol = (symbol << 1) | bit
73 }
74 return nil
75}
76
77// Decode decodes a literal byte using the range decoder as well as the LZMA
78// state, a match byte, and the literal state.
79func (c *literalCodec) Decode(d *rangeDecoder,
80 state uint32, match byte, litState uint32,
81) (s byte, err error) {
82 k := litState * 0x300
83 probs := c.probs[k : k+0x300]
84 symbol := uint32(1)
85 if state >= 7 {
86 m := uint32(match)
87 for {
88 matchBit := (m >> 7) & 1
89 m <<= 1
90 i := ((1 + matchBit) << 8) | symbol
91 bit, err := d.DecodeBit(&probs[i])
92 if err != nil {
93 return 0, err
94 }
95 symbol = (symbol << 1) | bit
96 if matchBit != bit {
97 break
98 }
99 if symbol >= 0x100 {
100 break
101 }
102 }
103 }
104 for symbol < 0x100 {
105 bit, err := d.DecodeBit(&probs[symbol])
106 if err != nil {
107 return 0, err
108 }
109 symbol = (symbol << 1) | bit
110 }
111 s = byte(symbol - 0x100)
112 return s, nil
113}
114
115// minLC and maxLC define the range for LC values.
116const (
117 minLC = 0
118 maxLC = 8
119)
120
121// minLC and maxLC define the range for LP values.
122const (
123 minLP = 0
124 maxLP = 4
125)
126
127// minState and maxState define a range for the state values stored in
128// the State values.
129const (
130 minState = 0
131 maxState = 11
132)
diff --git a/vendor/github.com/ulikunitz/xz/lzma/matchalgorithm.go b/vendor/github.com/ulikunitz/xz/lzma/matchalgorithm.go
new file mode 100644
index 0000000..4a244eb
--- /dev/null
+++ b/vendor/github.com/ulikunitz/xz/lzma/matchalgorithm.go
@@ -0,0 +1,52 @@
1// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package lzma
6
7import "errors"
8
9// MatchAlgorithm identifies an algorithm to find matches in the
10// dictionary.
11type MatchAlgorithm byte
12
13// Supported matcher algorithms.
14const (
15 HashTable4 MatchAlgorithm = iota
16 BinaryTree
17)
18
19// maStrings are used by the String method.
20var maStrings = map[MatchAlgorithm]string{
21 HashTable4: "HashTable4",
22 BinaryTree: "BinaryTree",
23}
24
25// String returns a string representation of the Matcher.
26func (a MatchAlgorithm) String() string {
27 if s, ok := maStrings[a]; ok {
28 return s
29 }
30 return "unknown"
31}
32
33var errUnsupportedMatchAlgorithm = errors.New(
34 "lzma: unsupported match algorithm value")
35
36// verify checks whether the matcher value is supported.
37func (a MatchAlgorithm) verify() error {
38 if _, ok := maStrings[a]; !ok {
39 return errUnsupportedMatchAlgorithm
40 }
41 return nil
42}
43
44func (a MatchAlgorithm) new(dictCap int) (m matcher, err error) {
45 switch a {
46 case HashTable4:
47 return newHashTable(dictCap, 4)
48 case BinaryTree:
49 return newBinTree(dictCap)
50 }
51 return nil, errUnsupportedMatchAlgorithm
52}
diff --git a/vendor/github.com/ulikunitz/xz/lzma/operation.go b/vendor/github.com/ulikunitz/xz/lzma/operation.go
new file mode 100644
index 0000000..733bb99
--- /dev/null
+++ b/vendor/github.com/ulikunitz/xz/lzma/operation.go
@@ -0,0 +1,80 @@
1// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package lzma
6
7import (
8 "errors"
9 "fmt"
10 "unicode"
11)
12
13// operation represents an operation on the dictionary during encoding or
14// decoding.
15type operation interface {
16 Len() int
17}
18
19// rep represents a repetition at the given distance and the given length
20type match struct {
21 // supports all possible distance values, including the eos marker
22 distance int64
23 // length
24 n int
25}
26
27// verify checks whether the match is valid. If that is not the case an
28// error is returned.
29func (m match) verify() error {
30 if !(minDistance <= m.distance && m.distance <= maxDistance) {
31 return errors.New("distance out of range")
32 }
33 if !(1 <= m.n && m.n <= maxMatchLen) {
34 return errors.New("length out of range")
35 }
36 return nil
37}
38
39// l return the l-value for the match, which is the difference of length
40// n and 2.
41func (m match) l() uint32 {
42 return uint32(m.n - minMatchLen)
43}
44
45// dist returns the dist value for the match, which is one less of the
46// distance stored in the match.
47func (m match) dist() uint32 {
48 return uint32(m.distance - minDistance)
49}
50
51// Len returns the number of bytes matched.
52func (m match) Len() int {
53 return m.n
54}
55
56// String returns a string representation for the repetition.
57func (m match) String() string {
58 return fmt.Sprintf("M{%d,%d}", m.distance, m.n)
59}
60
61// lit represents a single byte literal.
62type lit struct {
63 b byte
64}
65
66// Len returns 1 for the single byte literal.
67func (l lit) Len() int {
68 return 1
69}
70
71// String returns a string representation for the literal.
72func (l lit) String() string {
73 var c byte
74 if unicode.IsPrint(rune(l.b)) {
75 c = l.b
76 } else {
77 c = '.'
78 }
79 return fmt.Sprintf("L{%c/%02x}", c, l.b)
80}
diff --git a/vendor/github.com/ulikunitz/xz/lzma/prob.go b/vendor/github.com/ulikunitz/xz/lzma/prob.go
new file mode 100644
index 0000000..24d50ec
--- /dev/null
+++ b/vendor/github.com/ulikunitz/xz/lzma/prob.go
@@ -0,0 +1,53 @@
1// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package lzma
6
7// movebits defines the number of bits used for the updates of probability
8// values.
9const movebits = 5
10
11// probbits defines the number of bits of a probability value.
12const probbits = 11
13
14// probInit defines 0.5 as initial value for prob values.
15const probInit prob = 1 << (probbits - 1)
16
17// Type prob represents probabilities. The type can also be used to encode and
18// decode single bits.
19type prob uint16
20
21// Dec decreases the probability. The decrease is proportional to the
22// probability value.
23func (p *prob) dec() {
24 *p -= *p >> movebits
25}
26
27// Inc increases the probability. The Increase is proportional to the
28// difference of 1 and the probability value.
29func (p *prob) inc() {
30 *p += ((1 << probbits) - *p) >> movebits
31}
32
33// Computes the new bound for a given range using the probability value.
34func (p prob) bound(r uint32) uint32 {
35 return (r >> probbits) * uint32(p)
36}
37
38// Bits returns 1. One is the number of bits that can be encoded or decoded
39// with a single prob value.
40func (p prob) Bits() int {
41 return 1
42}
43
44// Encode encodes the least-significant bit of v. Note that the p value will be
45// changed.
46func (p *prob) Encode(e *rangeEncoder, v uint32) error {
47 return e.EncodeBit(v, p)
48}
49
50// Decode decodes a single bit. Note that the p value will change.
51func (p *prob) Decode(d *rangeDecoder) (v uint32, err error) {
52 return d.DecodeBit(p)
53}
diff --git a/vendor/github.com/ulikunitz/xz/lzma/properties.go b/vendor/github.com/ulikunitz/xz/lzma/properties.go
new file mode 100644
index 0000000..23418e2
--- /dev/null
+++ b/vendor/github.com/ulikunitz/xz/lzma/properties.go
@@ -0,0 +1,69 @@
1// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package lzma
6
7import (
8 "errors"
9 "fmt"
10)
11
12// maximum and minimum values for the LZMA properties.
13const (
14 minPB = 0
15 maxPB = 4
16)
17
18// maxPropertyCode is the possible maximum of a properties code byte.
19const maxPropertyCode = (maxPB+1)*(maxLP+1)*(maxLC+1) - 1
20
21// Properties contains the parameters LC, LP and PB. The parameter LC
22// defines the number of literal context bits; parameter LP the number
23// of literal position bits and PB the number of position bits.
24type Properties struct {
25 LC int
26 LP int
27 PB int
28}
29
30// String returns the properties in a string representation.
31func (p *Properties) String() string {
32 return fmt.Sprintf("LC %d LP %d PB %d", p.LC, p.LP, p.PB)
33}
34
35// PropertiesForCode converts a properties code byte into a Properties value.
36func PropertiesForCode(code byte) (p Properties, err error) {
37 if code > maxPropertyCode {
38 return p, errors.New("lzma: invalid properties code")
39 }
40 p.LC = int(code % 9)
41 code /= 9
42 p.LP = int(code % 5)
43 code /= 5
44 p.PB = int(code % 5)
45 return p, err
46}
47
48// verify checks the properties for correctness.
49func (p *Properties) verify() error {
50 if p == nil {
51 return errors.New("lzma: properties are nil")
52 }
53 if !(minLC <= p.LC && p.LC <= maxLC) {
54 return errors.New("lzma: lc out of range")
55 }
56 if !(minLP <= p.LP && p.LP <= maxLP) {
57 return errors.New("lzma: lp out of range")
58 }
59 if !(minPB <= p.PB && p.PB <= maxPB) {
60 return errors.New("lzma: pb out of range")
61 }
62 return nil
63}
64
65// Code converts the properties to a byte. The function assumes that
66// the properties components are all in range.
67func (p Properties) Code() byte {
68 return byte((p.PB*5+p.LP)*9 + p.LC)
69}
diff --git a/vendor/github.com/ulikunitz/xz/lzma/rangecodec.go b/vendor/github.com/ulikunitz/xz/lzma/rangecodec.go
new file mode 100644
index 0000000..6361c5e
--- /dev/null
+++ b/vendor/github.com/ulikunitz/xz/lzma/rangecodec.go
@@ -0,0 +1,248 @@
1// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package lzma
6
7import (
8 "errors"
9 "io"
10)
11
12// rangeEncoder implements range encoding of single bits. The low value can
13// overflow therefore we need uint64. The cache value is used to handle
14// overflows.
15type rangeEncoder struct {
16 lbw *LimitedByteWriter
17 nrange uint32
18 low uint64
19 cacheLen int64
20 cache byte
21}
22
23// maxInt64 provides the maximal value of the int64 type
24const maxInt64 = 1<<63 - 1
25
26// newRangeEncoder creates a new range encoder.
27func newRangeEncoder(bw io.ByteWriter) (re *rangeEncoder, err error) {
28 lbw, ok := bw.(*LimitedByteWriter)
29 if !ok {
30 lbw = &LimitedByteWriter{BW: bw, N: maxInt64}
31 }
32 return &rangeEncoder{
33 lbw: lbw,
34 nrange: 0xffffffff,
35 cacheLen: 1}, nil
36}
37
38// Available returns the number of bytes that still can be written. The
39// method takes the bytes that will be currently written by Close into
40// account.
41func (e *rangeEncoder) Available() int64 {
42 return e.lbw.N - (e.cacheLen + 4)
43}
44
45// writeByte writes a single byte to the underlying writer. An error is
46// returned if the limit is reached. The written byte will be counted if
47// the underlying writer doesn't return an error.
48func (e *rangeEncoder) writeByte(c byte) error {
49 if e.Available() < 1 {
50 return ErrLimit
51 }
52 return e.lbw.WriteByte(c)
53}
54
55// DirectEncodeBit encodes the least-significant bit of b with probability 1/2.
56func (e *rangeEncoder) DirectEncodeBit(b uint32) error {
57 e.nrange >>= 1
58 e.low += uint64(e.nrange) & (0 - (uint64(b) & 1))
59
60 // normalize
61 const top = 1 << 24
62 if e.nrange >= top {
63 return nil
64 }
65 e.nrange <<= 8
66 return e.shiftLow()
67}
68
69// EncodeBit encodes the least significant bit of b. The p value will be
70// updated by the function depending on the bit encoded.
71func (e *rangeEncoder) EncodeBit(b uint32, p *prob) error {
72 bound := p.bound(e.nrange)
73 if b&1 == 0 {
74 e.nrange = bound
75 p.inc()
76 } else {
77 e.low += uint64(bound)
78 e.nrange -= bound
79 p.dec()
80 }
81
82 // normalize
83 const top = 1 << 24
84 if e.nrange >= top {
85 return nil
86 }
87 e.nrange <<= 8
88 return e.shiftLow()
89}
90
91// Close writes a complete copy of the low value.
92func (e *rangeEncoder) Close() error {
93 for i := 0; i < 5; i++ {
94 if err := e.shiftLow(); err != nil {
95 return err
96 }
97 }
98 return nil
99}
100
101// shiftLow shifts the low value for 8 bit. The shifted byte is written into
102// the byte writer. The cache value is used to handle overflows.
103func (e *rangeEncoder) shiftLow() error {
104 if uint32(e.low) < 0xff000000 || (e.low>>32) != 0 {
105 tmp := e.cache
106 for {
107 err := e.writeByte(tmp + byte(e.low>>32))
108 if err != nil {
109 return err
110 }
111 tmp = 0xff
112 e.cacheLen--
113 if e.cacheLen <= 0 {
114 if e.cacheLen < 0 {
115 panic("negative cacheLen")
116 }
117 break
118 }
119 }
120 e.cache = byte(uint32(e.low) >> 24)
121 }
122 e.cacheLen++
123 e.low = uint64(uint32(e.low) << 8)
124 return nil
125}
126
127// rangeDecoder decodes single bits of the range encoding stream.
128type rangeDecoder struct {
129 br io.ByteReader
130 nrange uint32
131 code uint32
132}
133
134// init initializes the range decoder, by reading from the byte reader.
135func (d *rangeDecoder) init() error {
136 d.nrange = 0xffffffff
137 d.code = 0
138
139 b, err := d.br.ReadByte()
140 if err != nil {
141 return err
142 }
143 if b != 0 {
144 return errors.New("newRangeDecoder: first byte not zero")
145 }
146
147 for i := 0; i < 4; i++ {
148 if err = d.updateCode(); err != nil {
149 return err
150 }
151 }
152
153 if d.code >= d.nrange {
154 return errors.New("newRangeDecoder: d.code >= d.nrange")
155 }
156
157 return nil
158}
159
160// newRangeDecoder initializes a range decoder. It reads five bytes from the
161// reader and therefore may return an error.
162func newRangeDecoder(br io.ByteReader) (d *rangeDecoder, err error) {
163 d = &rangeDecoder{br: br, nrange: 0xffffffff}
164
165 b, err := d.br.ReadByte()
166 if err != nil {
167 return nil, err
168 }
169 if b != 0 {
170 return nil, errors.New("newRangeDecoder: first byte not zero")
171 }
172
173 for i := 0; i < 4; i++ {
174 if err = d.updateCode(); err != nil {
175 return nil, err
176 }
177 }
178
179 if d.code >= d.nrange {
180 return nil, errors.New("newRangeDecoder: d.code >= d.nrange")
181 }
182
183 return d, nil
184}
185
186// possiblyAtEnd checks whether the decoder may be at the end of the stream.
187func (d *rangeDecoder) possiblyAtEnd() bool {
188 return d.code == 0
189}
190
191// DirectDecodeBit decodes a bit with probability 1/2. The return value b will
192// contain the bit at the least-significant position. All other bits will be
193// zero.
194func (d *rangeDecoder) DirectDecodeBit() (b uint32, err error) {
195 d.nrange >>= 1
196 d.code -= d.nrange
197 t := 0 - (d.code >> 31)
198 d.code += d.nrange & t
199 b = (t + 1) & 1
200
201 // d.code will stay less then d.nrange
202
203 // normalize
204 // assume d.code < d.nrange
205 const top = 1 << 24
206 if d.nrange >= top {
207 return b, nil
208 }
209 d.nrange <<= 8
210 // d.code < d.nrange will be maintained
211 return b, d.updateCode()
212}
213
214// decodeBit decodes a single bit. The bit will be returned at the
215// least-significant position. All other bits will be zero. The probability
216// value will be updated.
217func (d *rangeDecoder) DecodeBit(p *prob) (b uint32, err error) {
218 bound := p.bound(d.nrange)
219 if d.code < bound {
220 d.nrange = bound
221 p.inc()
222 b = 0
223 } else {
224 d.code -= bound
225 d.nrange -= bound
226 p.dec()
227 b = 1
228 }
229 // normalize
230 // assume d.code < d.nrange
231 const top = 1 << 24
232 if d.nrange >= top {
233 return b, nil
234 }
235 d.nrange <<= 8
236 // d.code < d.nrange will be maintained
237 return b, d.updateCode()
238}
239
240// updateCode reads a new byte into the code.
241func (d *rangeDecoder) updateCode() error {
242 b, err := d.br.ReadByte()
243 if err != nil {
244 return err
245 }
246 d.code = (d.code << 8) | uint32(b)
247 return nil
248}
diff --git a/vendor/github.com/ulikunitz/xz/lzma/reader.go b/vendor/github.com/ulikunitz/xz/lzma/reader.go
new file mode 100644
index 0000000..2ef3dca
--- /dev/null
+++ b/vendor/github.com/ulikunitz/xz/lzma/reader.go
@@ -0,0 +1,100 @@
1// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5// Package lzma supports the decoding and encoding of LZMA streams.
6// Reader and Writer support the classic LZMA format. Reader2 and
7// Writer2 support the decoding and encoding of LZMA2 streams.
8//
9// The package is written completely in Go and doesn't rely on any external
10// library.
11package lzma
12
13import (
14 "errors"
15 "io"
16)
17
18// ReaderConfig stores the parameters for the reader of the classic LZMA
19// format.
20type ReaderConfig struct {
21 DictCap int
22}
23
24// fill converts the zero values of the configuration to the default values.
25func (c *ReaderConfig) fill() {
26 if c.DictCap == 0 {
27 c.DictCap = 8 * 1024 * 1024
28 }
29}
30
31// Verify checks the reader configuration for errors. Zero values will
32// be replaced by default values.
33func (c *ReaderConfig) Verify() error {
34 c.fill()
35 if !(MinDictCap <= c.DictCap && int64(c.DictCap) <= MaxDictCap) {
36 return errors.New("lzma: dictionary capacity is out of range")
37 }
38 return nil
39}
40
41// Reader provides a reader for LZMA files or streams.
42type Reader struct {
43 lzma io.Reader
44 h header
45 d *decoder
46}
47
48// NewReader creates a new reader for an LZMA stream using the classic
49// format. NewReader reads and checks the header of the LZMA stream.
50func NewReader(lzma io.Reader) (r *Reader, err error) {
51 return ReaderConfig{}.NewReader(lzma)
52}
53
54// NewReader creates a new reader for an LZMA stream in the classic
55// format. The function reads and verifies the the header of the LZMA
56// stream.
57func (c ReaderConfig) NewReader(lzma io.Reader) (r *Reader, err error) {
58 if err = c.Verify(); err != nil {
59 return nil, err
60 }
61 data := make([]byte, HeaderLen)
62 if _, err := io.ReadFull(lzma, data); err != nil {
63 if err == io.EOF {
64 return nil, errors.New("lzma: unexpected EOF")
65 }
66 return nil, err
67 }
68 r = &Reader{lzma: lzma}
69 if err = r.h.unmarshalBinary(data); err != nil {
70 return nil, err
71 }
72 if r.h.dictCap < MinDictCap {
73 return nil, errors.New("lzma: dictionary capacity too small")
74 }
75 dictCap := r.h.dictCap
76 if c.DictCap > dictCap {
77 dictCap = c.DictCap
78 }
79
80 state := newState(r.h.properties)
81 dict, err := newDecoderDict(dictCap)
82 if err != nil {
83 return nil, err
84 }
85 r.d, err = newDecoder(ByteReader(lzma), state, dict, r.h.size)
86 if err != nil {
87 return nil, err
88 }
89 return r, nil
90}
91
92// EOSMarker indicates that an EOS marker has been encountered.
93func (r *Reader) EOSMarker() bool {
94 return r.d.eosMarker
95}
96
97// Read returns uncompressed data.
98func (r *Reader) Read(p []byte) (n int, err error) {
99 return r.d.Read(p)
100}
diff --git a/vendor/github.com/ulikunitz/xz/lzma/reader2.go b/vendor/github.com/ulikunitz/xz/lzma/reader2.go
new file mode 100644
index 0000000..a55cfaa
--- /dev/null
+++ b/vendor/github.com/ulikunitz/xz/lzma/reader2.go
@@ -0,0 +1,232 @@
1// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package lzma
6
7import (
8 "errors"
9 "io"
10
11 "github.com/ulikunitz/xz/internal/xlog"
12)
13
14// Reader2Config stores the parameters for the LZMA2 reader.
15// format.
16type Reader2Config struct {
17 DictCap int
18}
19
20// fill converts the zero values of the configuration to the default values.
21func (c *Reader2Config) fill() {
22 if c.DictCap == 0 {
23 c.DictCap = 8 * 1024 * 1024
24 }
25}
26
27// Verify checks the reader configuration for errors. Zero configuration values
28// will be replaced by default values.
29func (c *Reader2Config) Verify() error {
30 c.fill()
31 if !(MinDictCap <= c.DictCap && int64(c.DictCap) <= MaxDictCap) {
32 return errors.New("lzma: dictionary capacity is out of range")
33 }
34 return nil
35}
36
37// Reader2 supports the reading of LZMA2 chunk sequences. Note that the
38// first chunk should have a dictionary reset and the first compressed
39// chunk a properties reset. The chunk sequence may not be terminated by
40// an end-of-stream chunk.
41type Reader2 struct {
42 r io.Reader
43 err error
44
45 dict *decoderDict
46 ur *uncompressedReader
47 decoder *decoder
48 chunkReader io.Reader
49
50 cstate chunkState
51 ctype chunkType
52}
53
54// NewReader2 creates a reader for an LZMA2 chunk sequence.
55func NewReader2(lzma2 io.Reader) (r *Reader2, err error) {
56 return Reader2Config{}.NewReader2(lzma2)
57}
58
59// NewReader2 creates an LZMA2 reader using the given configuration.
60func (c Reader2Config) NewReader2(lzma2 io.Reader) (r *Reader2, err error) {
61 if err = c.Verify(); err != nil {
62 return nil, err
63 }
64 r = &Reader2{r: lzma2, cstate: start}
65 r.dict, err = newDecoderDict(c.DictCap)
66 if err != nil {
67 return nil, err
68 }
69 if err = r.startChunk(); err != nil {
70 r.err = err
71 }
72 return r, nil
73}
74
75// uncompressed tests whether the chunk type specifies an uncompressed
76// chunk.
77func uncompressed(ctype chunkType) bool {
78 return ctype == cU || ctype == cUD
79}
80
81// startChunk parses a new chunk.
82func (r *Reader2) startChunk() error {
83 r.chunkReader = nil
84 header, err := readChunkHeader(r.r)
85 if err != nil {
86 if err == io.EOF {
87 err = io.ErrUnexpectedEOF
88 }
89 return err
90 }
91 xlog.Debugf("chunk header %v", header)
92 if err = r.cstate.next(header.ctype); err != nil {
93 return err
94 }
95 if r.cstate == stop {
96 return io.EOF
97 }
98 if header.ctype == cUD || header.ctype == cLRND {
99 r.dict.Reset()
100 }
101 size := int64(header.uncompressed) + 1
102 if uncompressed(header.ctype) {
103 if r.ur != nil {
104 r.ur.Reopen(r.r, size)
105 } else {
106 r.ur = newUncompressedReader(r.r, r.dict, size)
107 }
108 r.chunkReader = r.ur
109 return nil
110 }
111 br := ByteReader(io.LimitReader(r.r, int64(header.compressed)+1))
112 if r.decoder == nil {
113 state := newState(header.props)
114 r.decoder, err = newDecoder(br, state, r.dict, size)
115 if err != nil {
116 return err
117 }
118 r.chunkReader = r.decoder
119 return nil
120 }
121 switch header.ctype {
122 case cLR:
123 r.decoder.State.Reset()
124 case cLRN, cLRND:
125 r.decoder.State = newState(header.props)
126 }
127 err = r.decoder.Reopen(br, size)
128 if err != nil {
129 return err
130 }
131 r.chunkReader = r.decoder
132 return nil
133}
134
135// Read reads data from the LZMA2 chunk sequence.
136func (r *Reader2) Read(p []byte) (n int, err error) {
137 if r.err != nil {
138 return 0, r.err
139 }
140 for n < len(p) {
141 var k int
142 k, err = r.chunkReader.Read(p[n:])
143 n += k
144 if err != nil {
145 if err == io.EOF {
146 err = r.startChunk()
147 if err == nil {
148 continue
149 }
150 }
151 r.err = err
152 return n, err
153 }
154 if k == 0 {
155 r.err = errors.New("lzma: Reader2 doesn't get data")
156 return n, r.err
157 }
158 }
159 return n, nil
160}
161
162// EOS returns whether the LZMA2 stream has been terminated by an
163// end-of-stream chunk.
164func (r *Reader2) EOS() bool {
165 return r.cstate == stop
166}
167
168// uncompressedReader is used to read uncompressed chunks.
169type uncompressedReader struct {
170 lr io.LimitedReader
171 Dict *decoderDict
172 eof bool
173 err error
174}
175
176// newUncompressedReader initializes a new uncompressedReader.
177func newUncompressedReader(r io.Reader, dict *decoderDict, size int64) *uncompressedReader {
178 ur := &uncompressedReader{
179 lr: io.LimitedReader{R: r, N: size},
180 Dict: dict,
181 }
182 return ur
183}
184
185// Reopen reinitializes an uncompressed reader.
186func (ur *uncompressedReader) Reopen(r io.Reader, size int64) {
187 ur.err = nil
188 ur.eof = false
189 ur.lr = io.LimitedReader{R: r, N: size}
190}
191
192// fill reads uncompressed data into the dictionary.
193func (ur *uncompressedReader) fill() error {
194 if !ur.eof {
195 n, err := io.CopyN(ur.Dict, &ur.lr, int64(ur.Dict.Available()))
196 if err != io.EOF {
197 return err
198 }
199 ur.eof = true
200 if n > 0 {
201 return nil
202 }
203 }
204 if ur.lr.N != 0 {
205 return io.ErrUnexpectedEOF
206 }
207 return io.EOF
208}
209
210// Read reads uncompressed data from the limited reader.
211func (ur *uncompressedReader) Read(p []byte) (n int, err error) {
212 if ur.err != nil {
213 return 0, ur.err
214 }
215 for {
216 var k int
217 k, err = ur.Dict.Read(p[n:])
218 n += k
219 if n >= len(p) {
220 return n, nil
221 }
222 if err != nil {
223 break
224 }
225 err = ur.fill()
226 if err != nil {
227 break
228 }
229 }
230 ur.err = err
231 return n, err
232}
diff --git a/vendor/github.com/ulikunitz/xz/lzma/state.go b/vendor/github.com/ulikunitz/xz/lzma/state.go
new file mode 100644
index 0000000..5023510
--- /dev/null
+++ b/vendor/github.com/ulikunitz/xz/lzma/state.go
@@ -0,0 +1,151 @@
1// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package lzma
6
7// states defines the overall state count
8const states = 12
9
10// State maintains the full state of the operation encoding or decoding
11// process.
12type state struct {
13 rep [4]uint32
14 isMatch [states << maxPosBits]prob
15 isRepG0Long [states << maxPosBits]prob
16 isRep [states]prob
17 isRepG0 [states]prob
18 isRepG1 [states]prob
19 isRepG2 [states]prob
20 litCodec literalCodec
21 lenCodec lengthCodec
22 repLenCodec lengthCodec
23 distCodec distCodec
24 state uint32
25 posBitMask uint32
26 Properties Properties
27}
28
29// initProbSlice initializes a slice of probabilities.
30func initProbSlice(p []prob) {
31 for i := range p {
32 p[i] = probInit
33 }
34}
35
36// Reset sets all state information to the original values.
37func (s *state) Reset() {
38 p := s.Properties
39 *s = state{
40 Properties: p,
41 // dict: s.dict,
42 posBitMask: (uint32(1) << uint(p.PB)) - 1,
43 }
44 initProbSlice(s.isMatch[:])
45 initProbSlice(s.isRep[:])
46 initProbSlice(s.isRepG0[:])
47 initProbSlice(s.isRepG1[:])
48 initProbSlice(s.isRepG2[:])
49 initProbSlice(s.isRepG0Long[:])
50 s.litCodec.init(p.LC, p.LP)
51 s.lenCodec.init()
52 s.repLenCodec.init()
53 s.distCodec.init()
54}
55
56// initState initializes the state.
57func initState(s *state, p Properties) {
58 *s = state{Properties: p}
59 s.Reset()
60}
61
62// newState creates a new state from the give Properties.
63func newState(p Properties) *state {
64 s := &state{Properties: p}
65 s.Reset()
66 return s
67}
68
69// deepcopy initializes s as a deep copy of the source.
70func (s *state) deepcopy(src *state) {
71 if s == src {
72 return
73 }
74 s.rep = src.rep
75 s.isMatch = src.isMatch
76 s.isRepG0Long = src.isRepG0Long
77 s.isRep = src.isRep
78 s.isRepG0 = src.isRepG0
79 s.isRepG1 = src.isRepG1
80 s.isRepG2 = src.isRepG2
81 s.litCodec.deepcopy(&src.litCodec)
82 s.lenCodec.deepcopy(&src.lenCodec)
83 s.repLenCodec.deepcopy(&src.repLenCodec)
84 s.distCodec.deepcopy(&src.distCodec)
85 s.state = src.state
86 s.posBitMask = src.posBitMask
87 s.Properties = src.Properties
88}
89
90// cloneState creates a new clone of the give state.
91func cloneState(src *state) *state {
92 s := new(state)
93 s.deepcopy(src)
94 return s
95}
96
97// updateStateLiteral updates the state for a literal.
98func (s *state) updateStateLiteral() {
99 switch {
100 case s.state < 4:
101 s.state = 0
102 return
103 case s.state < 10:
104 s.state -= 3
105 return
106 }
107 s.state -= 6
108}
109
110// updateStateMatch updates the state for a match.
111func (s *state) updateStateMatch() {
112 if s.state < 7 {
113 s.state = 7
114 } else {
115 s.state = 10
116 }
117}
118
119// updateStateRep updates the state for a repetition.
120func (s *state) updateStateRep() {
121 if s.state < 7 {
122 s.state = 8
123 } else {
124 s.state = 11
125 }
126}
127
128// updateStateShortRep updates the state for a short repetition.
129func (s *state) updateStateShortRep() {
130 if s.state < 7 {
131 s.state = 9
132 } else {
133 s.state = 11
134 }
135}
136
137// states computes the states of the operation codec.
138func (s *state) states(dictHead int64) (state1, state2, posState uint32) {
139 state1 = s.state
140 posState = uint32(dictHead) & s.posBitMask
141 state2 = (s.state << maxPosBits) | posState
142 return
143}
144
145// litState computes the literal state.
146func (s *state) litState(prev byte, dictHead int64) uint32 {
147 lp, lc := uint(s.Properties.LP), uint(s.Properties.LC)
148 litState := ((uint32(dictHead) & ((1 << lp) - 1)) << lc) |
149 (uint32(prev) >> (8 - lc))
150 return litState
151}
diff --git a/vendor/github.com/ulikunitz/xz/lzma/treecodecs.go b/vendor/github.com/ulikunitz/xz/lzma/treecodecs.go
new file mode 100644
index 0000000..504b3d7
--- /dev/null
+++ b/vendor/github.com/ulikunitz/xz/lzma/treecodecs.go
@@ -0,0 +1,133 @@
1// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package lzma
6
7// treeCodec encodes or decodes values with a fixed bit size. It is using a
8// tree of probability value. The root of the tree is the most-significant bit.
9type treeCodec struct {
10 probTree
11}
12
13// makeTreeCodec makes a tree codec. The bits value must be inside the range
14// [1,32].
15func makeTreeCodec(bits int) treeCodec {
16 return treeCodec{makeProbTree(bits)}
17}
18
19// deepcopy initializes tc as a deep copy of the source.
20func (tc *treeCodec) deepcopy(src *treeCodec) {
21 tc.probTree.deepcopy(&src.probTree)
22}
23
24// Encode uses the range encoder to encode a fixed-bit-size value.
25func (tc *treeCodec) Encode(e *rangeEncoder, v uint32) (err error) {
26 m := uint32(1)
27 for i := int(tc.bits) - 1; i >= 0; i-- {
28 b := (v >> uint(i)) & 1
29 if err := e.EncodeBit(b, &tc.probs[m]); err != nil {
30 return err
31 }
32 m = (m << 1) | b
33 }
34 return nil
35}
36
37// Decodes uses the range decoder to decode a fixed-bit-size value. Errors may
38// be caused by the range decoder.
39func (tc *treeCodec) Decode(d *rangeDecoder) (v uint32, err error) {
40 m := uint32(1)
41 for j := 0; j < int(tc.bits); j++ {
42 b, err := d.DecodeBit(&tc.probs[m])
43 if err != nil {
44 return 0, err
45 }
46 m = (m << 1) | b
47 }
48 return m - (1 << uint(tc.bits)), nil
49}
50
51// treeReverseCodec is another tree codec, where the least-significant bit is
52// the start of the probability tree.
53type treeReverseCodec struct {
54 probTree
55}
56
57// deepcopy initializes the treeReverseCodec as a deep copy of the
58// source.
59func (tc *treeReverseCodec) deepcopy(src *treeReverseCodec) {
60 tc.probTree.deepcopy(&src.probTree)
61}
62
63// makeTreeReverseCodec creates treeReverseCodec value. The bits argument must
64// be in the range [1,32].
65func makeTreeReverseCodec(bits int) treeReverseCodec {
66 return treeReverseCodec{makeProbTree(bits)}
67}
68
69// Encode uses range encoder to encode a fixed-bit-size value. The range
70// encoder may cause errors.
71func (tc *treeReverseCodec) Encode(v uint32, e *rangeEncoder) (err error) {
72 m := uint32(1)
73 for i := uint(0); i < uint(tc.bits); i++ {
74 b := (v >> i) & 1
75 if err := e.EncodeBit(b, &tc.probs[m]); err != nil {
76 return err
77 }
78 m = (m << 1) | b
79 }
80 return nil
81}
82
83// Decodes uses the range decoder to decode a fixed-bit-size value. Errors
84// returned by the range decoder will be returned.
85func (tc *treeReverseCodec) Decode(d *rangeDecoder) (v uint32, err error) {
86 m := uint32(1)
87 for j := uint(0); j < uint(tc.bits); j++ {
88 b, err := d.DecodeBit(&tc.probs[m])
89 if err != nil {
90 return 0, err
91 }
92 m = (m << 1) | b
93 v |= b << j
94 }
95 return v, nil
96}
97
98// probTree stores enough probability values to be used by the treeEncode and
99// treeDecode methods of the range coder types.
100type probTree struct {
101 probs []prob
102 bits byte
103}
104
105// deepcopy initializes the probTree value as a deep copy of the source.
106func (t *probTree) deepcopy(src *probTree) {
107 if t == src {
108 return
109 }
110 t.probs = make([]prob, len(src.probs))
111 copy(t.probs, src.probs)
112 t.bits = src.bits
113}
114
115// makeProbTree initializes a probTree structure.
116func makeProbTree(bits int) probTree {
117 if !(1 <= bits && bits <= 32) {
118 panic("bits outside of range [1,32]")
119 }
120 t := probTree{
121 bits: byte(bits),
122 probs: make([]prob, 1<<uint(bits)),
123 }
124 for i := range t.probs {
125 t.probs[i] = probInit
126 }
127 return t
128}
129
130// Bits provides the number of bits for the values to de- or encode.
131func (t *probTree) Bits() int {
132 return int(t.bits)
133}
diff --git a/vendor/github.com/ulikunitz/xz/lzma/writer.go b/vendor/github.com/ulikunitz/xz/lzma/writer.go
new file mode 100644
index 0000000..efe34fb
--- /dev/null
+++ b/vendor/github.com/ulikunitz/xz/lzma/writer.go
@@ -0,0 +1,209 @@
1// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package lzma
6
7import (
8 "bufio"
9 "errors"
10 "io"
11)
12
13// MinDictCap and MaxDictCap provide the range of supported dictionary
14// capacities.
15const (
16 MinDictCap = 1 << 12
17 MaxDictCap = 1<<32 - 1
18)
19
20// WriterConfig defines the configuration parameter for a writer.
21type WriterConfig struct {
22 // Properties for the encoding. If the it is nil the value
23 // {LC: 3, LP: 0, PB: 2} will be chosen.
24 Properties *Properties
25 // The capacity of the dictionary. If DictCap is zero, the value
26 // 8 MiB will be chosen.
27 DictCap int
28 // Size of the lookahead buffer; value 0 indicates default size
29 // 4096
30 BufSize int
31 // Match algorithm
32 Matcher MatchAlgorithm
33 // SizeInHeader indicates that the header will contain an
34 // explicit size.
35 SizeInHeader bool
36 // Size of the data to be encoded. A positive value will imply
37 // than an explicit size will be set in the header.
38 Size int64
39 // EOSMarker requests whether the EOSMarker needs to be written.
40 // If no explicit size is been given the EOSMarker will be
41 // set automatically.
42 EOSMarker bool
43}
44
45// fill converts zero-value fields to their explicit default values.
46func (c *WriterConfig) fill() {
47 if c.Properties == nil {
48 c.Properties = &Properties{LC: 3, LP: 0, PB: 2}
49 }
50 if c.DictCap == 0 {
51 c.DictCap = 8 * 1024 * 1024
52 }
53 if c.BufSize == 0 {
54 c.BufSize = 4096
55 }
56 if c.Size > 0 {
57 c.SizeInHeader = true
58 }
59 if !c.SizeInHeader {
60 c.EOSMarker = true
61 }
62}
63
64// Verify checks WriterConfig for errors. Verify will replace zero
65// values with default values.
66func (c *WriterConfig) Verify() error {
67 c.fill()
68 var err error
69 if c == nil {
70 return errors.New("lzma: WriterConfig is nil")
71 }
72 if c.Properties == nil {
73 return errors.New("lzma: WriterConfig has no Properties set")
74 }
75 if err = c.Properties.verify(); err != nil {
76 return err
77 }
78 if !(MinDictCap <= c.DictCap && int64(c.DictCap) <= MaxDictCap) {
79 return errors.New("lzma: dictionary capacity is out of range")
80 }
81 if !(maxMatchLen <= c.BufSize) {
82 return errors.New("lzma: lookahead buffer size too small")
83 }
84 if c.SizeInHeader {
85 if c.Size < 0 {
86 return errors.New("lzma: negative size not supported")
87 }
88 } else if !c.EOSMarker {
89 return errors.New("lzma: EOS marker is required")
90 }
91 if err = c.Matcher.verify(); err != nil {
92 return err
93 }
94
95 return nil
96}
97
98// header returns the header structure for this configuration.
99func (c *WriterConfig) header() header {
100 h := header{
101 properties: *c.Properties,
102 dictCap: c.DictCap,
103 size: -1,
104 }
105 if c.SizeInHeader {
106 h.size = c.Size
107 }
108 return h
109}
110
111// Writer writes an LZMA stream in the classic format.
112type Writer struct {
113 h header
114 bw io.ByteWriter
115 buf *bufio.Writer
116 e *encoder
117}
118
119// NewWriter creates a new LZMA writer for the classic format. The
120// method will write the header to the underlying stream.
121func (c WriterConfig) NewWriter(lzma io.Writer) (w *Writer, err error) {
122 if err = c.Verify(); err != nil {
123 return nil, err
124 }
125 w = &Writer{h: c.header()}
126
127 var ok bool
128 w.bw, ok = lzma.(io.ByteWriter)
129 if !ok {
130 w.buf = bufio.NewWriter(lzma)
131 w.bw = w.buf
132 }
133 state := newState(w.h.properties)
134 m, err := c.Matcher.new(w.h.dictCap)
135 if err != nil {
136 return nil, err
137 }
138 dict, err := newEncoderDict(w.h.dictCap, c.BufSize, m)
139 if err != nil {
140 return nil, err
141 }
142 var flags encoderFlags
143 if c.EOSMarker {
144 flags = eosMarker
145 }
146 if w.e, err = newEncoder(w.bw, state, dict, flags); err != nil {
147 return nil, err
148 }
149
150 if err = w.writeHeader(); err != nil {
151 return nil, err
152 }
153 return w, nil
154}
155
156// NewWriter creates a new LZMA writer using the classic format. The
157// function writes the header to the underlying stream.
158func NewWriter(lzma io.Writer) (w *Writer, err error) {
159 return WriterConfig{}.NewWriter(lzma)
160}
161
162// writeHeader writes the LZMA header into the stream.
163func (w *Writer) writeHeader() error {
164 data, err := w.h.marshalBinary()
165 if err != nil {
166 return err
167 }
168 _, err = w.bw.(io.Writer).Write(data)
169 return err
170}
171
172// Write puts data into the Writer.
173func (w *Writer) Write(p []byte) (n int, err error) {
174 if w.h.size >= 0 {
175 m := w.h.size
176 m -= w.e.Compressed() + int64(w.e.dict.Buffered())
177 if m < 0 {
178 m = 0
179 }
180 if m < int64(len(p)) {
181 p = p[:m]
182 err = ErrNoSpace
183 }
184 }
185 var werr error
186 if n, werr = w.e.Write(p); werr != nil {
187 err = werr
188 }
189 return n, err
190}
191
192// Close closes the writer stream. It ensures that all data from the
193// buffer will be compressed and the LZMA stream will be finished.
194func (w *Writer) Close() error {
195 if w.h.size >= 0 {
196 n := w.e.Compressed() + int64(w.e.dict.Buffered())
197 if n != w.h.size {
198 return errSize
199 }
200 }
201 err := w.e.Close()
202 if w.buf != nil {
203 ferr := w.buf.Flush()
204 if err == nil {
205 err = ferr
206 }
207 }
208 return err
209}
diff --git a/vendor/github.com/ulikunitz/xz/lzma/writer2.go b/vendor/github.com/ulikunitz/xz/lzma/writer2.go
new file mode 100644
index 0000000..7c1afe1
--- /dev/null
+++ b/vendor/github.com/ulikunitz/xz/lzma/writer2.go
@@ -0,0 +1,305 @@
1// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package lzma
6
7import (
8 "bytes"
9 "errors"
10 "io"
11)
12
13// Writer2Config is used to create a Writer2 using parameters.
14type Writer2Config struct {
15 // The properties for the encoding. If the it is nil the value
16 // {LC: 3, LP: 0, PB: 2} will be chosen.
17 Properties *Properties
18 // The capacity of the dictionary. If DictCap is zero, the value
19 // 8 MiB will be chosen.
20 DictCap int
21 // Size of the lookahead buffer; value 0 indicates default size
22 // 4096
23 BufSize int
24 // Match algorithm
25 Matcher MatchAlgorithm
26}
27
28// fill replaces zero values with default values.
29func (c *Writer2Config) fill() {
30 if c.Properties == nil {
31 c.Properties = &Properties{LC: 3, LP: 0, PB: 2}
32 }
33 if c.DictCap == 0 {
34 c.DictCap = 8 * 1024 * 1024
35 }
36 if c.BufSize == 0 {
37 c.BufSize = 4096
38 }
39}
40
41// Verify checks the Writer2Config for correctness. Zero values will be
42// replaced by default values.
43func (c *Writer2Config) Verify() error {
44 c.fill()
45 var err error
46 if c == nil {
47 return errors.New("lzma: WriterConfig is nil")
48 }
49 if c.Properties == nil {
50 return errors.New("lzma: WriterConfig has no Properties set")
51 }
52 if err = c.Properties.verify(); err != nil {
53 return err
54 }
55 if !(MinDictCap <= c.DictCap && int64(c.DictCap) <= MaxDictCap) {
56 return errors.New("lzma: dictionary capacity is out of range")
57 }
58 if !(maxMatchLen <= c.BufSize) {
59 return errors.New("lzma: lookahead buffer size too small")
60 }
61 if c.Properties.LC+c.Properties.LP > 4 {
62 return errors.New("lzma: sum of lc and lp exceeds 4")
63 }
64 if err = c.Matcher.verify(); err != nil {
65 return err
66 }
67 return nil
68}
69
70// Writer2 supports the creation of an LZMA2 stream. But note that
71// written data is buffered, so call Flush or Close to write data to the
72// underlying writer. The Close method writes the end-of-stream marker
73// to the stream. So you may be able to concatenate the output of two
74// writers as long the output of the first writer has only been flushed
75// but not closed.
76//
77// Any change to the fields Properties, DictCap must be done before the
78// first call to Write, Flush or Close.
79type Writer2 struct {
80 w io.Writer
81
82 start *state
83 encoder *encoder
84
85 cstate chunkState
86 ctype chunkType
87
88 buf bytes.Buffer
89 lbw LimitedByteWriter
90}
91
92// NewWriter2 creates an LZMA2 chunk sequence writer with the default
93// parameters and options.
94func NewWriter2(lzma2 io.Writer) (w *Writer2, err error) {
95 return Writer2Config{}.NewWriter2(lzma2)
96}
97
98// NewWriter2 creates a new LZMA2 writer using the given configuration.
99func (c Writer2Config) NewWriter2(lzma2 io.Writer) (w *Writer2, err error) {
100 if err = c.Verify(); err != nil {
101 return nil, err
102 }
103 w = &Writer2{
104 w: lzma2,
105 start: newState(*c.Properties),
106 cstate: start,
107 ctype: start.defaultChunkType(),
108 }
109 w.buf.Grow(maxCompressed)
110 w.lbw = LimitedByteWriter{BW: &w.buf, N: maxCompressed}
111 m, err := c.Matcher.new(c.DictCap)
112 if err != nil {
113 return nil, err
114 }
115 d, err := newEncoderDict(c.DictCap, c.BufSize, m)
116 if err != nil {
117 return nil, err
118 }
119 w.encoder, err = newEncoder(&w.lbw, cloneState(w.start), d, 0)
120 if err != nil {
121 return nil, err
122 }
123 return w, nil
124}
125
126// written returns the number of bytes written to the current chunk
127func (w *Writer2) written() int {
128 if w.encoder == nil {
129 return 0
130 }
131 return int(w.encoder.Compressed()) + w.encoder.dict.Buffered()
132}
133
134// errClosed indicates that the writer is closed.
135var errClosed = errors.New("lzma: writer closed")
136
137// Writes data to LZMA2 stream. Note that written data will be buffered.
138// Use Flush or Close to ensure that data is written to the underlying
139// writer.
140func (w *Writer2) Write(p []byte) (n int, err error) {
141 if w.cstate == stop {
142 return 0, errClosed
143 }
144 for n < len(p) {
145 m := maxUncompressed - w.written()
146 if m <= 0 {
147 panic("lzma: maxUncompressed reached")
148 }
149 var q []byte
150 if n+m < len(p) {
151 q = p[n : n+m]
152 } else {
153 q = p[n:]
154 }
155 k, err := w.encoder.Write(q)
156 n += k
157 if err != nil && err != ErrLimit {
158 return n, err
159 }
160 if err == ErrLimit || k == m {
161 if err = w.flushChunk(); err != nil {
162 return n, err
163 }
164 }
165 }
166 return n, nil
167}
168
169// writeUncompressedChunk writes an uncompressed chunk to the LZMA2
170// stream.
171func (w *Writer2) writeUncompressedChunk() error {
172 u := w.encoder.Compressed()
173 if u <= 0 {
174 return errors.New("lzma: can't write empty uncompressed chunk")
175 }
176 if u > maxUncompressed {
177 panic("overrun of uncompressed data limit")
178 }
179 switch w.ctype {
180 case cLRND:
181 w.ctype = cUD
182 default:
183 w.ctype = cU
184 }
185 w.encoder.state = w.start
186
187 header := chunkHeader{
188 ctype: w.ctype,
189 uncompressed: uint32(u - 1),
190 }
191 hdata, err := header.MarshalBinary()
192 if err != nil {
193 return err
194 }
195 if _, err = w.w.Write(hdata); err != nil {
196 return err
197 }
198 _, err = w.encoder.dict.CopyN(w.w, int(u))
199 return err
200}
201
202// writeCompressedChunk writes a compressed chunk to the underlying
203// writer.
204func (w *Writer2) writeCompressedChunk() error {
205 if w.ctype == cU || w.ctype == cUD {
206 panic("chunk type uncompressed")
207 }
208
209 u := w.encoder.Compressed()
210 if u <= 0 {
211 return errors.New("writeCompressedChunk: empty chunk")
212 }
213 if u > maxUncompressed {
214 panic("overrun of uncompressed data limit")
215 }
216 c := w.buf.Len()
217 if c <= 0 {
218 panic("no compressed data")
219 }
220 if c > maxCompressed {
221 panic("overrun of compressed data limit")
222 }
223 header := chunkHeader{
224 ctype: w.ctype,
225 uncompressed: uint32(u - 1),
226 compressed: uint16(c - 1),
227 props: w.encoder.state.Properties,
228 }
229 hdata, err := header.MarshalBinary()
230 if err != nil {
231 return err
232 }
233 if _, err = w.w.Write(hdata); err != nil {
234 return err
235 }
236 _, err = io.Copy(w.w, &w.buf)
237 return err
238}
239
240// writes a single chunk to the underlying writer.
241func (w *Writer2) writeChunk() error {
242 u := int(uncompressedHeaderLen + w.encoder.Compressed())
243 c := headerLen(w.ctype) + w.buf.Len()
244 if u < c {
245 return w.writeUncompressedChunk()
246 }
247 return w.writeCompressedChunk()
248}
249
250// flushChunk terminates the current chunk. The encoder will be reset
251// to support the next chunk.
252func (w *Writer2) flushChunk() error {
253 if w.written() == 0 {
254 return nil
255 }
256 var err error
257 if err = w.encoder.Close(); err != nil {
258 return err
259 }
260 if err = w.writeChunk(); err != nil {
261 return err
262 }
263 w.buf.Reset()
264 w.lbw.N = maxCompressed
265 if err = w.encoder.Reopen(&w.lbw); err != nil {
266 return err
267 }
268 if err = w.cstate.next(w.ctype); err != nil {
269 return err
270 }
271 w.ctype = w.cstate.defaultChunkType()
272 w.start = cloneState(w.encoder.state)
273 return nil
274}
275
276// Flush writes all buffered data out to the underlying stream. This
277// could result in multiple chunks to be created.
278func (w *Writer2) Flush() error {
279 if w.cstate == stop {
280 return errClosed
281 }
282 for w.written() > 0 {
283 if err := w.flushChunk(); err != nil {
284 return err
285 }
286 }
287 return nil
288}
289
290// Close terminates the LZMA2 stream with an EOS chunk.
291func (w *Writer2) Close() error {
292 if w.cstate == stop {
293 return errClosed
294 }
295 if err := w.Flush(); err != nil {
296 return nil
297 }
298 // write zero byte EOS chunk
299 _, err := w.w.Write([]byte{0})
300 if err != nil {
301 return err
302 }
303 w.cstate = stop
304 return nil
305}
diff --git a/vendor/github.com/ulikunitz/xz/lzmafilter.go b/vendor/github.com/ulikunitz/xz/lzmafilter.go
new file mode 100644
index 0000000..69cf5f7
--- /dev/null
+++ b/vendor/github.com/ulikunitz/xz/lzmafilter.go
@@ -0,0 +1,117 @@
1// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package xz
6
7import (
8 "errors"
9 "fmt"
10 "io"
11
12 "github.com/ulikunitz/xz/lzma"
13)
14
15// LZMA filter constants.
16const (
17 lzmaFilterID = 0x21
18 lzmaFilterLen = 3
19)
20
21// lzmaFilter declares the LZMA2 filter information stored in an xz
22// block header.
23type lzmaFilter struct {
24 dictCap int64
25}
26
27// String returns a representation of the LZMA filter.
28func (f lzmaFilter) String() string {
29 return fmt.Sprintf("LZMA dict cap %#x", f.dictCap)
30}
31
32// id returns the ID for the LZMA2 filter.
33func (f lzmaFilter) id() uint64 { return lzmaFilterID }
34
35// MarshalBinary converts the lzmaFilter in its encoded representation.
36func (f lzmaFilter) MarshalBinary() (data []byte, err error) {
37 c := lzma.EncodeDictCap(f.dictCap)
38 return []byte{lzmaFilterID, 1, c}, nil
39}
40
41// UnmarshalBinary unmarshals the given data representation of the LZMA2
42// filter.
43func (f *lzmaFilter) UnmarshalBinary(data []byte) error {
44 if len(data) != lzmaFilterLen {
45 return errors.New("xz: data for LZMA2 filter has wrong length")
46 }
47 if data[0] != lzmaFilterID {
48 return errors.New("xz: wrong LZMA2 filter id")
49 }
50 if data[1] != 1 {
51 return errors.New("xz: wrong LZMA2 filter size")
52 }
53 dc, err := lzma.DecodeDictCap(data[2])
54 if err != nil {
55 return errors.New("xz: wrong LZMA2 dictionary size property")
56 }
57
58 f.dictCap = dc
59 return nil
60}
61
62// reader creates a new reader for the LZMA2 filter.
63func (f lzmaFilter) reader(r io.Reader, c *ReaderConfig) (fr io.Reader,
64 err error) {
65
66 config := new(lzma.Reader2Config)
67 if c != nil {
68 config.DictCap = c.DictCap
69 }
70 dc := int(f.dictCap)
71 if dc < 1 {
72 return nil, errors.New("xz: LZMA2 filter parameter " +
73 "dictionary capacity overflow")
74 }
75 if dc > config.DictCap {
76 config.DictCap = dc
77 }
78
79 fr, err = config.NewReader2(r)
80 if err != nil {
81 return nil, err
82 }
83 return fr, nil
84}
85
86// writeCloser creates a io.WriteCloser for the LZMA2 filter.
87func (f lzmaFilter) writeCloser(w io.WriteCloser, c *WriterConfig,
88) (fw io.WriteCloser, err error) {
89 config := new(lzma.Writer2Config)
90 if c != nil {
91 *config = lzma.Writer2Config{
92 Properties: c.Properties,
93 DictCap: c.DictCap,
94 BufSize: c.BufSize,
95 Matcher: c.Matcher,
96 }
97 }
98
99 dc := int(f.dictCap)
100 if dc < 1 {
101 return nil, errors.New("xz: LZMA2 filter parameter " +
102 "dictionary capacity overflow")
103 }
104 if dc > config.DictCap {
105 config.DictCap = dc
106 }
107
108 fw, err = config.NewWriter2(w)
109 if err != nil {
110 return nil, err
111 }
112 return fw, nil
113}
114
115// last returns true, because an LZMA2 filter must be the last filter in
116// the filter list.
117func (f lzmaFilter) last() bool { return true }
diff --git a/vendor/github.com/ulikunitz/xz/make-docs b/vendor/github.com/ulikunitz/xz/make-docs
new file mode 100644
index 0000000..a8c612c
--- /dev/null
+++ b/vendor/github.com/ulikunitz/xz/make-docs
@@ -0,0 +1,5 @@
1#!/bin/sh
2
3set -x
4pandoc -t html5 -f markdown -s --css=doc/md.css -o README.html README.md
5pandoc -t html5 -f markdown -s --css=doc/md.css -o TODO.html TODO.md
diff --git a/vendor/github.com/ulikunitz/xz/reader.go b/vendor/github.com/ulikunitz/xz/reader.go
new file mode 100644
index 0000000..0634c6b
--- /dev/null
+++ b/vendor/github.com/ulikunitz/xz/reader.go
@@ -0,0 +1,373 @@
1// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5// Package xz supports the compression and decompression of xz files. It
6// supports version 1.0.4 of the specification without the non-LZMA2
7// filters. See http://tukaani.org/xz/xz-file-format-1.0.4.txt
8package xz
9
10import (
11 "bytes"
12 "errors"
13 "fmt"
14 "hash"
15 "io"
16
17 "github.com/ulikunitz/xz/internal/xlog"
18 "github.com/ulikunitz/xz/lzma"
19)
20
21// ReaderConfig defines the parameters for the xz reader. The
22// SingleStream parameter requests the reader to assume that the
23// underlying stream contains only a single stream.
24type ReaderConfig struct {
25 DictCap int
26 SingleStream bool
27}
28
29// fill replaces all zero values with their default values.
30func (c *ReaderConfig) fill() {
31 if c.DictCap == 0 {
32 c.DictCap = 8 * 1024 * 1024
33 }
34}
35
36// Verify checks the reader parameters for Validity. Zero values will be
37// replaced by default values.
38func (c *ReaderConfig) Verify() error {
39 if c == nil {
40 return errors.New("xz: reader parameters are nil")
41 }
42 lc := lzma.Reader2Config{DictCap: c.DictCap}
43 if err := lc.Verify(); err != nil {
44 return err
45 }
46 return nil
47}
48
49// Reader supports the reading of one or multiple xz streams.
50type Reader struct {
51 ReaderConfig
52
53 xz io.Reader
54 sr *streamReader
55}
56
57// streamReader decodes a single xz stream
58type streamReader struct {
59 ReaderConfig
60
61 xz io.Reader
62 br *blockReader
63 newHash func() hash.Hash
64 h header
65 index []record
66}
67
68// NewReader creates a new xz reader using the default parameters.
69// The function reads and checks the header of the first XZ stream. The
70// reader will process multiple streams including padding.
71func NewReader(xz io.Reader) (r *Reader, err error) {
72 return ReaderConfig{}.NewReader(xz)
73}
74
75// NewReader creates an xz stream reader. The created reader will be
76// able to process multiple streams and padding unless a SingleStream
77// has been set in the reader configuration c.
78func (c ReaderConfig) NewReader(xz io.Reader) (r *Reader, err error) {
79 if err = c.Verify(); err != nil {
80 return nil, err
81 }
82 r = &Reader{
83 ReaderConfig: c,
84 xz: xz,
85 }
86 if r.sr, err = c.newStreamReader(xz); err != nil {
87 if err == io.EOF {
88 err = io.ErrUnexpectedEOF
89 }
90 return nil, err
91 }
92 return r, nil
93}
94
95var errUnexpectedData = errors.New("xz: unexpected data after stream")
96
97// Read reads uncompressed data from the stream.
98func (r *Reader) Read(p []byte) (n int, err error) {
99 for n < len(p) {
100 if r.sr == nil {
101 if r.SingleStream {
102 data := make([]byte, 1)
103 _, err = io.ReadFull(r.xz, data)
104 if err != io.EOF {
105 return n, errUnexpectedData
106 }
107 return n, io.EOF
108 }
109 for {
110 r.sr, err = r.ReaderConfig.newStreamReader(r.xz)
111 if err != errPadding {
112 break
113 }
114 }
115 if err != nil {
116 return n, err
117 }
118 }
119 k, err := r.sr.Read(p[n:])
120 n += k
121 if err != nil {
122 if err == io.EOF {
123 r.sr = nil
124 continue
125 }
126 return n, err
127 }
128 }
129 return n, nil
130}
131
132var errPadding = errors.New("xz: padding (4 zero bytes) encountered")
133
134// newStreamReader creates a new xz stream reader using the given configuration
135// parameters. NewReader reads and checks the header of the xz stream.
136func (c ReaderConfig) newStreamReader(xz io.Reader) (r *streamReader, err error) {
137 if err = c.Verify(); err != nil {
138 return nil, err
139 }
140 data := make([]byte, HeaderLen)
141 if _, err := io.ReadFull(xz, data[:4]); err != nil {
142 return nil, err
143 }
144 if bytes.Equal(data[:4], []byte{0, 0, 0, 0}) {
145 return nil, errPadding
146 }
147 if _, err = io.ReadFull(xz, data[4:]); err != nil {
148 if err == io.EOF {
149 err = io.ErrUnexpectedEOF
150 }
151 return nil, err
152 }
153 r = &streamReader{
154 ReaderConfig: c,
155 xz: xz,
156 index: make([]record, 0, 4),
157 }
158 if err = r.h.UnmarshalBinary(data); err != nil {
159 return nil, err
160 }
161 xlog.Debugf("xz header %s", r.h)
162 if r.newHash, err = newHashFunc(r.h.flags); err != nil {
163 return nil, err
164 }
165 return r, nil
166}
167
168// errIndex indicates an error with the xz file index.
169var errIndex = errors.New("xz: error in xz file index")
170
171// readTail reads the index body and the xz footer.
172func (r *streamReader) readTail() error {
173 index, n, err := readIndexBody(r.xz)
174 if err != nil {
175 if err == io.EOF {
176 err = io.ErrUnexpectedEOF
177 }
178 return err
179 }
180 if len(index) != len(r.index) {
181 return fmt.Errorf("xz: index length is %d; want %d",
182 len(index), len(r.index))
183 }
184 for i, rec := range r.index {
185 if rec != index[i] {
186 return fmt.Errorf("xz: record %d is %v; want %v",
187 i, rec, index[i])
188 }
189 }
190
191 p := make([]byte, footerLen)
192 if _, err = io.ReadFull(r.xz, p); err != nil {
193 if err == io.EOF {
194 err = io.ErrUnexpectedEOF
195 }
196 return err
197 }
198 var f footer
199 if err = f.UnmarshalBinary(p); err != nil {
200 return err
201 }
202 xlog.Debugf("xz footer %s", f)
203 if f.flags != r.h.flags {
204 return errors.New("xz: footer flags incorrect")
205 }
206 if f.indexSize != int64(n)+1 {
207 return errors.New("xz: index size in footer wrong")
208 }
209 return nil
210}
211
212// Read reads actual data from the xz stream.
213func (r *streamReader) Read(p []byte) (n int, err error) {
214 for n < len(p) {
215 if r.br == nil {
216 bh, hlen, err := readBlockHeader(r.xz)
217 if err != nil {
218 if err == errIndexIndicator {
219 if err = r.readTail(); err != nil {
220 return n, err
221 }
222 return n, io.EOF
223 }
224 return n, err
225 }
226 xlog.Debugf("block %v", *bh)
227 r.br, err = r.ReaderConfig.newBlockReader(r.xz, bh,
228 hlen, r.newHash())
229 if err != nil {
230 return n, err
231 }
232 }
233 k, err := r.br.Read(p[n:])
234 n += k
235 if err != nil {
236 if err == io.EOF {
237 r.index = append(r.index, r.br.record())
238 r.br = nil
239 } else {
240 return n, err
241 }
242 }
243 }
244 return n, nil
245}
246
247// countingReader is a reader that counts the bytes read.
248type countingReader struct {
249 r io.Reader
250 n int64
251}
252
253// Read reads data from the wrapped reader and adds it to the n field.
254func (lr *countingReader) Read(p []byte) (n int, err error) {
255 n, err = lr.r.Read(p)
256 lr.n += int64(n)
257 return n, err
258}
259
260// blockReader supports the reading of a block.
261type blockReader struct {
262 lxz countingReader
263 header *blockHeader
264 headerLen int
265 n int64
266 hash hash.Hash
267 r io.Reader
268 err error
269}
270
271// newBlockReader creates a new block reader.
272func (c *ReaderConfig) newBlockReader(xz io.Reader, h *blockHeader,
273 hlen int, hash hash.Hash) (br *blockReader, err error) {
274
275 br = &blockReader{
276 lxz: countingReader{r: xz},
277 header: h,
278 headerLen: hlen,
279 hash: hash,
280 }
281
282 fr, err := c.newFilterReader(&br.lxz, h.filters)
283 if err != nil {
284 return nil, err
285 }
286 br.r = io.TeeReader(fr, br.hash)
287
288 return br, nil
289}
290
291// uncompressedSize returns the uncompressed size of the block.
292func (br *blockReader) uncompressedSize() int64 {
293 return br.n
294}
295
296// compressedSize returns the compressed size of the block.
297func (br *blockReader) compressedSize() int64 {
298 return br.lxz.n
299}
300
301// unpaddedSize computes the unpadded size for the block.
302func (br *blockReader) unpaddedSize() int64 {
303 n := int64(br.headerLen)
304 n += br.compressedSize()
305 n += int64(br.hash.Size())
306 return n
307}
308
309// record returns the index record for the current block.
310func (br *blockReader) record() record {
311 return record{br.unpaddedSize(), br.uncompressedSize()}
312}
313
314// errBlockSize indicates that the size of the block in the block header
315// is wrong.
316var errBlockSize = errors.New("xz: wrong uncompressed size for block")
317
318// Read reads data from the block.
319func (br *blockReader) Read(p []byte) (n int, err error) {
320 n, err = br.r.Read(p)
321 br.n += int64(n)
322
323 u := br.header.uncompressedSize
324 if u >= 0 && br.uncompressedSize() > u {
325 return n, errors.New("xz: wrong uncompressed size for block")
326 }
327 c := br.header.compressedSize
328 if c >= 0 && br.compressedSize() > c {
329 return n, errors.New("xz: wrong compressed size for block")
330 }
331 if err != io.EOF {
332 return n, err
333 }
334 if br.uncompressedSize() < u || br.compressedSize() < c {
335 return n, io.ErrUnexpectedEOF
336 }
337
338 s := br.hash.Size()
339 k := padLen(br.lxz.n)
340 q := make([]byte, k+s, k+2*s)
341 if _, err = io.ReadFull(br.lxz.r, q); err != nil {
342 if err == io.EOF {
343 err = io.ErrUnexpectedEOF
344 }
345 return n, err
346 }
347 if !allZeros(q[:k]) {
348 return n, errors.New("xz: non-zero block padding")
349 }
350 checkSum := q[k:]
351 computedSum := br.hash.Sum(checkSum[s:])
352 if !bytes.Equal(checkSum, computedSum) {
353 return n, errors.New("xz: checksum error for block")
354 }
355 return n, io.EOF
356}
357
358func (c *ReaderConfig) newFilterReader(r io.Reader, f []filter) (fr io.Reader,
359 err error) {
360
361 if err = verifyFilters(f); err != nil {
362 return nil, err
363 }
364
365 fr = r
366 for i := len(f) - 1; i >= 0; i-- {
367 fr, err = f[i].reader(fr, c)
368 if err != nil {
369 return nil, err
370 }
371 }
372 return fr, nil
373}
diff --git a/vendor/github.com/ulikunitz/xz/writer.go b/vendor/github.com/ulikunitz/xz/writer.go
new file mode 100644
index 0000000..c126f70
--- /dev/null
+++ b/vendor/github.com/ulikunitz/xz/writer.go
@@ -0,0 +1,386 @@
1// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package xz
6
7import (
8 "errors"
9 "hash"
10 "io"
11
12 "github.com/ulikunitz/xz/lzma"
13)
14
15// WriterConfig describe the parameters for an xz writer.
16type WriterConfig struct {
17 Properties *lzma.Properties
18 DictCap int
19 BufSize int
20 BlockSize int64
21 // checksum method: CRC32, CRC64 or SHA256
22 CheckSum byte
23 // match algorithm
24 Matcher lzma.MatchAlgorithm
25}
26
27// fill replaces zero values with default values.
28func (c *WriterConfig) fill() {
29 if c.Properties == nil {
30 c.Properties = &lzma.Properties{LC: 3, LP: 0, PB: 2}
31 }
32 if c.DictCap == 0 {
33 c.DictCap = 8 * 1024 * 1024
34 }
35 if c.BufSize == 0 {
36 c.BufSize = 4096
37 }
38 if c.BlockSize == 0 {
39 c.BlockSize = maxInt64
40 }
41 if c.CheckSum == 0 {
42 c.CheckSum = CRC64
43 }
44}
45
46// Verify checks the configuration for errors. Zero values will be
47// replaced by default values.
48func (c *WriterConfig) Verify() error {
49 if c == nil {
50 return errors.New("xz: writer configuration is nil")
51 }
52 c.fill()
53 lc := lzma.Writer2Config{
54 Properties: c.Properties,
55 DictCap: c.DictCap,
56 BufSize: c.BufSize,
57 Matcher: c.Matcher,
58 }
59 if err := lc.Verify(); err != nil {
60 return err
61 }
62 if c.BlockSize <= 0 {
63 return errors.New("xz: block size out of range")
64 }
65 if err := verifyFlags(c.CheckSum); err != nil {
66 return err
67 }
68 return nil
69}
70
71// filters creates the filter list for the given parameters.
72func (c *WriterConfig) filters() []filter {
73 return []filter{&lzmaFilter{int64(c.DictCap)}}
74}
75
76// maxInt64 defines the maximum 64-bit signed integer.
77const maxInt64 = 1<<63 - 1
78
79// verifyFilters checks the filter list for the length and the right
80// sequence of filters.
81func verifyFilters(f []filter) error {
82 if len(f) == 0 {
83 return errors.New("xz: no filters")
84 }
85 if len(f) > 4 {
86 return errors.New("xz: more than four filters")
87 }
88 for _, g := range f[:len(f)-1] {
89 if g.last() {
90 return errors.New("xz: last filter is not last")
91 }
92 }
93 if !f[len(f)-1].last() {
94 return errors.New("xz: wrong last filter")
95 }
96 return nil
97}
98
99// newFilterWriteCloser converts a filter list into a WriteCloser that
100// can be used by a blockWriter.
101func (c *WriterConfig) newFilterWriteCloser(w io.Writer, f []filter) (fw io.WriteCloser, err error) {
102 if err = verifyFilters(f); err != nil {
103 return nil, err
104 }
105 fw = nopWriteCloser(w)
106 for i := len(f) - 1; i >= 0; i-- {
107 fw, err = f[i].writeCloser(fw, c)
108 if err != nil {
109 return nil, err
110 }
111 }
112 return fw, nil
113}
114
115// nopWCloser implements a WriteCloser with a Close method not doing
116// anything.
117type nopWCloser struct {
118 io.Writer
119}
120
121// Close returns nil and doesn't do anything else.
122func (c nopWCloser) Close() error {
123 return nil
124}
125
126// nopWriteCloser converts the Writer into a WriteCloser with a Close
127// function that does nothing beside returning nil.
128func nopWriteCloser(w io.Writer) io.WriteCloser {
129 return nopWCloser{w}
130}
131
132// Writer compresses data written to it. It is an io.WriteCloser.
133type Writer struct {
134 WriterConfig
135
136 xz io.Writer
137 bw *blockWriter
138 newHash func() hash.Hash
139 h header
140 index []record
141 closed bool
142}
143
144// newBlockWriter creates a new block writer writes the header out.
145func (w *Writer) newBlockWriter() error {
146 var err error
147 w.bw, err = w.WriterConfig.newBlockWriter(w.xz, w.newHash())
148 if err != nil {
149 return err
150 }
151 if err = w.bw.writeHeader(w.xz); err != nil {
152 return err
153 }
154 return nil
155}
156
157// closeBlockWriter closes a block writer and records the sizes in the
158// index.
159func (w *Writer) closeBlockWriter() error {
160 var err error
161 if err = w.bw.Close(); err != nil {
162 return err
163 }
164 w.index = append(w.index, w.bw.record())
165 return nil
166}
167
168// NewWriter creates a new xz writer using default parameters.
169func NewWriter(xz io.Writer) (w *Writer, err error) {
170 return WriterConfig{}.NewWriter(xz)
171}
172
173// NewWriter creates a new Writer using the given configuration parameters.
174func (c WriterConfig) NewWriter(xz io.Writer) (w *Writer, err error) {
175 if err = c.Verify(); err != nil {
176 return nil, err
177 }
178 w = &Writer{
179 WriterConfig: c,
180 xz: xz,
181 h: header{c.CheckSum},
182 index: make([]record, 0, 4),
183 }
184 if w.newHash, err = newHashFunc(c.CheckSum); err != nil {
185 return nil, err
186 }
187 data, err := w.h.MarshalBinary()
188 if _, err = xz.Write(data); err != nil {
189 return nil, err
190 }
191 if err = w.newBlockWriter(); err != nil {
192 return nil, err
193 }
194 return w, nil
195
196}
197
198// Write compresses the uncompressed data provided.
199func (w *Writer) Write(p []byte) (n int, err error) {
200 if w.closed {
201 return 0, errClosed
202 }
203 for {
204 k, err := w.bw.Write(p[n:])
205 n += k
206 if err != errNoSpace {
207 return n, err
208 }
209 if err = w.closeBlockWriter(); err != nil {
210 return n, err
211 }
212 if err = w.newBlockWriter(); err != nil {
213 return n, err
214 }
215 }
216}
217
218// Close closes the writer and adds the footer to the Writer. Close
219// doesn't close the underlying writer.
220func (w *Writer) Close() error {
221 if w.closed {
222 return errClosed
223 }
224 w.closed = true
225 var err error
226 if err = w.closeBlockWriter(); err != nil {
227 return err
228 }
229
230 f := footer{flags: w.h.flags}
231 if f.indexSize, err = writeIndex(w.xz, w.index); err != nil {
232 return err
233 }
234 data, err := f.MarshalBinary()
235 if err != nil {
236 return err
237 }
238 if _, err = w.xz.Write(data); err != nil {
239 return err
240 }
241 return nil
242}
243
244// countingWriter is a writer that counts all data written to it.
245type countingWriter struct {
246 w io.Writer
247 n int64
248}
249
250// Write writes data to the countingWriter.
251func (cw *countingWriter) Write(p []byte) (n int, err error) {
252 n, err = cw.w.Write(p)
253 cw.n += int64(n)
254 if err == nil && cw.n < 0 {
255 return n, errors.New("xz: counter overflow")
256 }
257 return
258}
259
260// blockWriter is writes a single block.
261type blockWriter struct {
262 cxz countingWriter
263 // mw combines io.WriteCloser w and the hash.
264 mw io.Writer
265 w io.WriteCloser
266 n int64
267 blockSize int64
268 closed bool
269 headerLen int
270
271 filters []filter
272 hash hash.Hash
273}
274
275// newBlockWriter creates a new block writer.
276func (c *WriterConfig) newBlockWriter(xz io.Writer, hash hash.Hash) (bw *blockWriter, err error) {
277 bw = &blockWriter{
278 cxz: countingWriter{w: xz},
279 blockSize: c.BlockSize,
280 filters: c.filters(),
281 hash: hash,
282 }
283 bw.w, err = c.newFilterWriteCloser(&bw.cxz, bw.filters)
284 if err != nil {
285 return nil, err
286 }
287 bw.mw = io.MultiWriter(bw.w, bw.hash)
288 return bw, nil
289}
290
291// writeHeader writes the header. If the function is called after Close
292// the commpressedSize and uncompressedSize fields will be filled.
293func (bw *blockWriter) writeHeader(w io.Writer) error {
294 h := blockHeader{
295 compressedSize: -1,
296 uncompressedSize: -1,
297 filters: bw.filters,
298 }
299 if bw.closed {
300 h.compressedSize = bw.compressedSize()
301 h.uncompressedSize = bw.uncompressedSize()
302 }
303 data, err := h.MarshalBinary()
304 if err != nil {
305 return err
306 }
307 if _, err = w.Write(data); err != nil {
308 return err
309 }
310 bw.headerLen = len(data)
311 return nil
312}
313
314// compressed size returns the amount of data written to the underlying
315// stream.
316func (bw *blockWriter) compressedSize() int64 {
317 return bw.cxz.n
318}
319
320// uncompressedSize returns the number of data written to the
321// blockWriter
322func (bw *blockWriter) uncompressedSize() int64 {
323 return bw.n
324}
325
326// unpaddedSize returns the sum of the header length, the uncompressed
327// size of the block and the hash size.
328func (bw *blockWriter) unpaddedSize() int64 {
329 if bw.headerLen <= 0 {
330 panic("xz: block header not written")
331 }
332 n := int64(bw.headerLen)
333 n += bw.compressedSize()
334 n += int64(bw.hash.Size())
335 return n
336}
337
338// record returns the record for the current stream. Call Close before
339// calling this method.
340func (bw *blockWriter) record() record {
341 return record{bw.unpaddedSize(), bw.uncompressedSize()}
342}
343
344var errClosed = errors.New("xz: writer already closed")
345
346var errNoSpace = errors.New("xz: no space")
347
348// Write writes uncompressed data to the block writer.
349func (bw *blockWriter) Write(p []byte) (n int, err error) {
350 if bw.closed {
351 return 0, errClosed
352 }
353
354 t := bw.blockSize - bw.n
355 if int64(len(p)) > t {
356 err = errNoSpace
357 p = p[:t]
358 }
359
360 var werr error
361 n, werr = bw.mw.Write(p)
362 bw.n += int64(n)
363 if werr != nil {
364 return n, werr
365 }
366 return n, err
367}
368
369// Close closes the writer.
370func (bw *blockWriter) Close() error {
371 if bw.closed {
372 return errClosed
373 }
374 bw.closed = true
375 if err := bw.w.Close(); err != nil {
376 return err
377 }
378 s := bw.hash.Size()
379 k := padLen(bw.cxz.n)
380 p := make([]byte, k+s)
381 bw.hash.Sum(p[k:k])
382 if _, err := bw.cxz.w.Write(p); err != nil {
383 return err
384 }
385 return nil
386}