aboutsummaryrefslogtreecommitdiffhomepage
path: root/vendor/github.com/ulikunitz/xz/lzma
diff options
context:
space:
mode:
authorAlex Pilon <apilon@hashicorp.com>2019-02-22 18:24:37 -0500
committerAlex Pilon <apilon@hashicorp.com>2019-02-22 18:24:37 -0500
commit15c0b25d011f37e7c20aeca9eaf461f78285b8d9 (patch)
tree255c250a5c9d4801c74092d33b7337d8c14438ff /vendor/github.com/ulikunitz/xz/lzma
parent07971ca38143c5faf951d152fba370ddcbe26ad5 (diff)
downloadterraform-provider-statuscake-15c0b25d011f37e7c20aeca9eaf461f78285b8d9.tar.gz
terraform-provider-statuscake-15c0b25d011f37e7c20aeca9eaf461f78285b8d9.tar.zst
terraform-provider-statuscake-15c0b25d011f37e7c20aeca9eaf461f78285b8d9.zip
deps: github.com/hashicorp/terraform@sdk-v0.11-with-go-modules
Updated via: go get github.com/hashicorp/terraform@sdk-v0.11-with-go-modules and go mod tidy
Diffstat (limited to 'vendor/github.com/ulikunitz/xz/lzma')
-rw-r--r--vendor/github.com/ulikunitz/xz/lzma/bintree.go523
-rw-r--r--vendor/github.com/ulikunitz/xz/lzma/bitops.go45
-rw-r--r--vendor/github.com/ulikunitz/xz/lzma/breader.go39
-rw-r--r--vendor/github.com/ulikunitz/xz/lzma/buffer.go171
-rw-r--r--vendor/github.com/ulikunitz/xz/lzma/bytewriter.go37
-rw-r--r--vendor/github.com/ulikunitz/xz/lzma/decoder.go277
-rw-r--r--vendor/github.com/ulikunitz/xz/lzma/decoderdict.go135
-rw-r--r--vendor/github.com/ulikunitz/xz/lzma/directcodec.go49
-rw-r--r--vendor/github.com/ulikunitz/xz/lzma/distcodec.go156
-rw-r--r--vendor/github.com/ulikunitz/xz/lzma/encoder.go268
-rw-r--r--vendor/github.com/ulikunitz/xz/lzma/encoderdict.go149
-rw-r--r--vendor/github.com/ulikunitz/xz/lzma/fox.lzmabin0 -> 67 bytes
-rw-r--r--vendor/github.com/ulikunitz/xz/lzma/hashtable.go309
-rw-r--r--vendor/github.com/ulikunitz/xz/lzma/header.go167
-rw-r--r--vendor/github.com/ulikunitz/xz/lzma/header2.go398
-rw-r--r--vendor/github.com/ulikunitz/xz/lzma/lengthcodec.go129
-rw-r--r--vendor/github.com/ulikunitz/xz/lzma/literalcodec.go132
-rw-r--r--vendor/github.com/ulikunitz/xz/lzma/matchalgorithm.go52
-rw-r--r--vendor/github.com/ulikunitz/xz/lzma/operation.go80
-rw-r--r--vendor/github.com/ulikunitz/xz/lzma/prob.go53
-rw-r--r--vendor/github.com/ulikunitz/xz/lzma/properties.go69
-rw-r--r--vendor/github.com/ulikunitz/xz/lzma/rangecodec.go248
-rw-r--r--vendor/github.com/ulikunitz/xz/lzma/reader.go100
-rw-r--r--vendor/github.com/ulikunitz/xz/lzma/reader2.go232
-rw-r--r--vendor/github.com/ulikunitz/xz/lzma/state.go151
-rw-r--r--vendor/github.com/ulikunitz/xz/lzma/treecodecs.go133
-rw-r--r--vendor/github.com/ulikunitz/xz/lzma/writer.go209
-rw-r--r--vendor/github.com/ulikunitz/xz/lzma/writer2.go305
28 files changed, 4616 insertions, 0 deletions
diff --git a/vendor/github.com/ulikunitz/xz/lzma/bintree.go b/vendor/github.com/ulikunitz/xz/lzma/bintree.go
new file mode 100644
index 0000000..a781bd1
--- /dev/null
+++ b/vendor/github.com/ulikunitz/xz/lzma/bintree.go
@@ -0,0 +1,523 @@
1// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package lzma
6
7import (
8 "bufio"
9 "errors"
10 "fmt"
11 "io"
12 "unicode"
13)
14
15// node represents a node in the binary tree.
16type node struct {
17 // x is the search value
18 x uint32
19 // p parent node
20 p uint32
21 // l left child
22 l uint32
23 // r right child
24 r uint32
25}
26
27// wordLen is the number of bytes represented by the v field of a node.
28const wordLen = 4
29
30// binTree supports the identification of the next operation based on a
31// binary tree.
32//
33// Nodes will be identified by their index into the ring buffer.
34type binTree struct {
35 dict *encoderDict
36 // ring buffer of nodes
37 node []node
38 // absolute offset of the entry for the next node. Position 4
39 // byte larger.
40 hoff int64
41 // front position in the node ring buffer
42 front uint32
43 // index of the root node
44 root uint32
45 // current x value
46 x uint32
47 // preallocated array
48 data []byte
49}
50
51// null represents the nonexistent index. We can't use zero because it
52// would always exist or we would need to decrease the index for each
53// reference.
54const null uint32 = 1<<32 - 1
55
56// newBinTree initializes the binTree structure. The capacity defines
57// the size of the buffer and defines the maximum distance for which
58// matches will be found.
59func newBinTree(capacity int) (t *binTree, err error) {
60 if capacity < 1 {
61 return nil, errors.New(
62 "newBinTree: capacity must be larger than zero")
63 }
64 if int64(capacity) >= int64(null) {
65 return nil, errors.New(
66 "newBinTree: capacity must less 2^{32}-1")
67 }
68 t = &binTree{
69 node: make([]node, capacity),
70 hoff: -int64(wordLen),
71 root: null,
72 data: make([]byte, maxMatchLen),
73 }
74 return t, nil
75}
76
77func (t *binTree) SetDict(d *encoderDict) { t.dict = d }
78
79// WriteByte writes a single byte into the binary tree.
80func (t *binTree) WriteByte(c byte) error {
81 t.x = (t.x << 8) | uint32(c)
82 t.hoff++
83 if t.hoff < 0 {
84 return nil
85 }
86 v := t.front
87 if int64(v) < t.hoff {
88 // We are overwriting old nodes stored in the tree.
89 t.remove(v)
90 }
91 t.node[v].x = t.x
92 t.add(v)
93 t.front++
94 if int64(t.front) >= int64(len(t.node)) {
95 t.front = 0
96 }
97 return nil
98}
99
100// Writes writes a sequence of bytes into the binTree structure.
101func (t *binTree) Write(p []byte) (n int, err error) {
102 for _, c := range p {
103 t.WriteByte(c)
104 }
105 return len(p), nil
106}
107
108// add puts the node v into the tree. The node must not be part of the
109// tree before.
110func (t *binTree) add(v uint32) {
111 vn := &t.node[v]
112 // Set left and right to null indices.
113 vn.l, vn.r = null, null
114 // If the binary tree is empty make v the root.
115 if t.root == null {
116 t.root = v
117 vn.p = null
118 return
119 }
120 x := vn.x
121 p := t.root
122 // Search for the right leave link and add the new node.
123 for {
124 pn := &t.node[p]
125 if x <= pn.x {
126 if pn.l == null {
127 pn.l = v
128 vn.p = p
129 return
130 }
131 p = pn.l
132 } else {
133 if pn.r == null {
134 pn.r = v
135 vn.p = p
136 return
137 }
138 p = pn.r
139 }
140 }
141}
142
143// parent returns the parent node index of v and the pointer to v value
144// in the parent.
145func (t *binTree) parent(v uint32) (p uint32, ptr *uint32) {
146 if t.root == v {
147 return null, &t.root
148 }
149 p = t.node[v].p
150 if t.node[p].l == v {
151 ptr = &t.node[p].l
152 } else {
153 ptr = &t.node[p].r
154 }
155 return
156}
157
158// Remove node v.
159func (t *binTree) remove(v uint32) {
160 vn := &t.node[v]
161 p, ptr := t.parent(v)
162 l, r := vn.l, vn.r
163 if l == null {
164 // Move the right child up.
165 *ptr = r
166 if r != null {
167 t.node[r].p = p
168 }
169 return
170 }
171 if r == null {
172 // Move the left child up.
173 *ptr = l
174 t.node[l].p = p
175 return
176 }
177
178 // Search the in-order predecessor u.
179 un := &t.node[l]
180 ur := un.r
181 if ur == null {
182 // In order predecessor is l. Move it up.
183 un.r = r
184 t.node[r].p = l
185 un.p = p
186 *ptr = l
187 return
188 }
189 var u uint32
190 for {
191 // Look for the max value in the tree where l is root.
192 u = ur
193 ur = t.node[u].r
194 if ur == null {
195 break
196 }
197 }
198 // replace u with ul
199 un = &t.node[u]
200 ul := un.l
201 up := un.p
202 t.node[up].r = ul
203 if ul != null {
204 t.node[ul].p = up
205 }
206
207 // replace v by u
208 un.l, un.r = l, r
209 t.node[l].p = u
210 t.node[r].p = u
211 *ptr = u
212 un.p = p
213}
214
215// search looks for the node that have the value x or for the nodes that
216// brace it. The node highest in the tree with the value x will be
217// returned. All other nodes with the same value live in left subtree of
218// the returned node.
219func (t *binTree) search(v uint32, x uint32) (a, b uint32) {
220 a, b = null, null
221 if v == null {
222 return
223 }
224 for {
225 vn := &t.node[v]
226 if x <= vn.x {
227 if x == vn.x {
228 return v, v
229 }
230 b = v
231 if vn.l == null {
232 return
233 }
234 v = vn.l
235 } else {
236 a = v
237 if vn.r == null {
238 return
239 }
240 v = vn.r
241 }
242 }
243}
244
245// max returns the node with maximum value in the subtree with v as
246// root.
247func (t *binTree) max(v uint32) uint32 {
248 if v == null {
249 return null
250 }
251 for {
252 r := t.node[v].r
253 if r == null {
254 return v
255 }
256 v = r
257 }
258}
259
260// min returns the node with the minimum value in the subtree with v as
261// root.
262func (t *binTree) min(v uint32) uint32 {
263 if v == null {
264 return null
265 }
266 for {
267 l := t.node[v].l
268 if l == null {
269 return v
270 }
271 v = l
272 }
273}
274
275// pred returns the in-order predecessor of node v.
276func (t *binTree) pred(v uint32) uint32 {
277 if v == null {
278 return null
279 }
280 u := t.max(t.node[v].l)
281 if u != null {
282 return u
283 }
284 for {
285 p := t.node[v].p
286 if p == null {
287 return null
288 }
289 if t.node[p].r == v {
290 return p
291 }
292 v = p
293 }
294}
295
296// succ returns the in-order successor of node v.
297func (t *binTree) succ(v uint32) uint32 {
298 if v == null {
299 return null
300 }
301 u := t.min(t.node[v].r)
302 if u != null {
303 return u
304 }
305 for {
306 p := t.node[v].p
307 if p == null {
308 return null
309 }
310 if t.node[p].l == v {
311 return p
312 }
313 v = p
314 }
315}
316
317// xval converts the first four bytes of a into an 32-bit unsigned
318// integer in big-endian order.
319func xval(a []byte) uint32 {
320 var x uint32
321 switch len(a) {
322 default:
323 x |= uint32(a[3])
324 fallthrough
325 case 3:
326 x |= uint32(a[2]) << 8
327 fallthrough
328 case 2:
329 x |= uint32(a[1]) << 16
330 fallthrough
331 case 1:
332 x |= uint32(a[0]) << 24
333 case 0:
334 }
335 return x
336}
337
338// dumpX converts value x into a four-letter string.
339func dumpX(x uint32) string {
340 a := make([]byte, 4)
341 for i := 0; i < 4; i++ {
342 c := byte(x >> uint((3-i)*8))
343 if unicode.IsGraphic(rune(c)) {
344 a[i] = c
345 } else {
346 a[i] = '.'
347 }
348 }
349 return string(a)
350}
351
352// dumpNode writes a representation of the node v into the io.Writer.
353func (t *binTree) dumpNode(w io.Writer, v uint32, indent int) {
354 if v == null {
355 return
356 }
357
358 vn := &t.node[v]
359
360 t.dumpNode(w, vn.r, indent+2)
361
362 for i := 0; i < indent; i++ {
363 fmt.Fprint(w, " ")
364 }
365 if vn.p == null {
366 fmt.Fprintf(w, "node %d %q parent null\n", v, dumpX(vn.x))
367 } else {
368 fmt.Fprintf(w, "node %d %q parent %d\n", v, dumpX(vn.x), vn.p)
369 }
370
371 t.dumpNode(w, vn.l, indent+2)
372}
373
374// dump prints a representation of the binary tree into the writer.
375func (t *binTree) dump(w io.Writer) error {
376 bw := bufio.NewWriter(w)
377 t.dumpNode(bw, t.root, 0)
378 return bw.Flush()
379}
380
381func (t *binTree) distance(v uint32) int {
382 dist := int(t.front) - int(v)
383 if dist <= 0 {
384 dist += len(t.node)
385 }
386 return dist
387}
388
389type matchParams struct {
390 rep [4]uint32
391 // length when match will be accepted
392 nAccept int
393 // nodes to check
394 check int
395 // finish if length get shorter
396 stopShorter bool
397}
398
399func (t *binTree) match(m match, distIter func() (int, bool), p matchParams,
400) (r match, checked int, accepted bool) {
401 buf := &t.dict.buf
402 for {
403 if checked >= p.check {
404 return m, checked, true
405 }
406 dist, ok := distIter()
407 if !ok {
408 return m, checked, false
409 }
410 checked++
411 if m.n > 0 {
412 i := buf.rear - dist + m.n - 1
413 if i < 0 {
414 i += len(buf.data)
415 } else if i >= len(buf.data) {
416 i -= len(buf.data)
417 }
418 if buf.data[i] != t.data[m.n-1] {
419 if p.stopShorter {
420 return m, checked, false
421 }
422 continue
423 }
424 }
425 n := buf.matchLen(dist, t.data)
426 switch n {
427 case 0:
428 if p.stopShorter {
429 return m, checked, false
430 }
431 continue
432 case 1:
433 if uint32(dist-minDistance) != p.rep[0] {
434 continue
435 }
436 }
437 if n < m.n || (n == m.n && int64(dist) >= m.distance) {
438 continue
439 }
440 m = match{int64(dist), n}
441 if n >= p.nAccept {
442 return m, checked, true
443 }
444 }
445}
446
447func (t *binTree) NextOp(rep [4]uint32) operation {
448 // retrieve maxMatchLen data
449 n, _ := t.dict.buf.Peek(t.data[:maxMatchLen])
450 if n == 0 {
451 panic("no data in buffer")
452 }
453 t.data = t.data[:n]
454
455 var (
456 m match
457 x, u, v uint32
458 iterPred, iterSucc func() (int, bool)
459 )
460 p := matchParams{
461 rep: rep,
462 nAccept: maxMatchLen,
463 check: 32,
464 }
465 i := 4
466 iterSmall := func() (dist int, ok bool) {
467 i--
468 if i <= 0 {
469 return 0, false
470 }
471 return i, true
472 }
473 m, checked, accepted := t.match(m, iterSmall, p)
474 if accepted {
475 goto end
476 }
477 p.check -= checked
478 x = xval(t.data)
479 u, v = t.search(t.root, x)
480 if u == v && len(t.data) == 4 {
481 iter := func() (dist int, ok bool) {
482 if u == null {
483 return 0, false
484 }
485 dist = t.distance(u)
486 u, v = t.search(t.node[u].l, x)
487 if u != v {
488 u = null
489 }
490 return dist, true
491 }
492 m, _, _ = t.match(m, iter, p)
493 goto end
494 }
495 p.stopShorter = true
496 iterSucc = func() (dist int, ok bool) {
497 if v == null {
498 return 0, false
499 }
500 dist = t.distance(v)
501 v = t.succ(v)
502 return dist, true
503 }
504 m, checked, accepted = t.match(m, iterSucc, p)
505 if accepted {
506 goto end
507 }
508 p.check -= checked
509 iterPred = func() (dist int, ok bool) {
510 if u == null {
511 return 0, false
512 }
513 dist = t.distance(u)
514 u = t.pred(u)
515 return dist, true
516 }
517 m, _, _ = t.match(m, iterPred, p)
518end:
519 if m.n == 0 {
520 return lit{t.data[0]}
521 }
522 return m
523}
diff --git a/vendor/github.com/ulikunitz/xz/lzma/bitops.go b/vendor/github.com/ulikunitz/xz/lzma/bitops.go
new file mode 100644
index 0000000..e9bab01
--- /dev/null
+++ b/vendor/github.com/ulikunitz/xz/lzma/bitops.go
@@ -0,0 +1,45 @@
1// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package lzma
6
7/* Naming conventions follows the CodeReviewComments in the Go Wiki. */
8
9// ntz32Const is used by the functions NTZ and NLZ.
10const ntz32Const = 0x04d7651f
11
12// ntz32Table is a helper table for de Bruijn algorithm by Danny Dubé.
13// See Henry S. Warren, Jr. "Hacker's Delight" section 5-1 figure 5-26.
14var ntz32Table = [32]int8{
15 0, 1, 2, 24, 3, 19, 6, 25,
16 22, 4, 20, 10, 16, 7, 12, 26,
17 31, 23, 18, 5, 21, 9, 15, 11,
18 30, 17, 8, 14, 29, 13, 28, 27,
19}
20
21// ntz32 computes the number of trailing zeros for an unsigned 32-bit integer.
22func ntz32(x uint32) int {
23 if x == 0 {
24 return 32
25 }
26 x = (x & -x) * ntz32Const
27 return int(ntz32Table[x>>27])
28}
29
30// nlz32 computes the number of leading zeros for an unsigned 32-bit integer.
31func nlz32(x uint32) int {
32 // Smear left most bit to the right
33 x |= x >> 1
34 x |= x >> 2
35 x |= x >> 4
36 x |= x >> 8
37 x |= x >> 16
38 // Use ntz mechanism to calculate nlz.
39 x++
40 if x == 0 {
41 return 0
42 }
43 x *= ntz32Const
44 return 32 - int(ntz32Table[x>>27])
45}
diff --git a/vendor/github.com/ulikunitz/xz/lzma/breader.go b/vendor/github.com/ulikunitz/xz/lzma/breader.go
new file mode 100644
index 0000000..5350d81
--- /dev/null
+++ b/vendor/github.com/ulikunitz/xz/lzma/breader.go
@@ -0,0 +1,39 @@
1// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package lzma
6
7import (
8 "errors"
9 "io"
10)
11
12// breader provides the ReadByte function for a Reader. It doesn't read
13// more data from the reader than absolutely necessary.
14type breader struct {
15 io.Reader
16 // helper slice to save allocations
17 p []byte
18}
19
20// ByteReader converts an io.Reader into an io.ByteReader.
21func ByteReader(r io.Reader) io.ByteReader {
22 br, ok := r.(io.ByteReader)
23 if !ok {
24 return &breader{r, make([]byte, 1)}
25 }
26 return br
27}
28
29// ReadByte read byte function.
30func (r *breader) ReadByte() (c byte, err error) {
31 n, err := r.Reader.Read(r.p)
32 if n < 1 {
33 if err == nil {
34 err = errors.New("breader.ReadByte: no data")
35 }
36 return 0, err
37 }
38 return r.p[0], nil
39}
diff --git a/vendor/github.com/ulikunitz/xz/lzma/buffer.go b/vendor/github.com/ulikunitz/xz/lzma/buffer.go
new file mode 100644
index 0000000..50e0b6d
--- /dev/null
+++ b/vendor/github.com/ulikunitz/xz/lzma/buffer.go
@@ -0,0 +1,171 @@
1// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package lzma
6
7import (
8 "errors"
9)
10
11// buffer provides a circular buffer of bytes. If the front index equals
12// the rear index the buffer is empty. As a consequence front cannot be
13// equal rear for a full buffer. So a full buffer has a length that is
14// one byte less the the length of the data slice.
15type buffer struct {
16 data []byte
17 front int
18 rear int
19}
20
21// newBuffer creates a buffer with the given size.
22func newBuffer(size int) *buffer {
23 return &buffer{data: make([]byte, size+1)}
24}
25
26// Cap returns the capacity of the buffer.
27func (b *buffer) Cap() int {
28 return len(b.data) - 1
29}
30
31// Resets the buffer. The front and rear index are set to zero.
32func (b *buffer) Reset() {
33 b.front = 0
34 b.rear = 0
35}
36
37// Buffered returns the number of bytes buffered.
38func (b *buffer) Buffered() int {
39 delta := b.front - b.rear
40 if delta < 0 {
41 delta += len(b.data)
42 }
43 return delta
44}
45
46// Available returns the number of bytes available for writing.
47func (b *buffer) Available() int {
48 delta := b.rear - 1 - b.front
49 if delta < 0 {
50 delta += len(b.data)
51 }
52 return delta
53}
54
55// addIndex adds a non-negative integer to the index i and returns the
56// resulting index. The function takes care of wrapping the index as
57// well as potential overflow situations.
58func (b *buffer) addIndex(i int, n int) int {
59 // subtraction of len(b.data) prevents overflow
60 i += n - len(b.data)
61 if i < 0 {
62 i += len(b.data)
63 }
64 return i
65}
66
67// Read reads bytes from the buffer into p and returns the number of
68// bytes read. The function never returns an error but might return less
69// data than requested.
70func (b *buffer) Read(p []byte) (n int, err error) {
71 n, err = b.Peek(p)
72 b.rear = b.addIndex(b.rear, n)
73 return n, err
74}
75
76// Peek reads bytes from the buffer into p without changing the buffer.
77// Peek will never return an error but might return less data than
78// requested.
79func (b *buffer) Peek(p []byte) (n int, err error) {
80 m := b.Buffered()
81 n = len(p)
82 if m < n {
83 n = m
84 p = p[:n]
85 }
86 k := copy(p, b.data[b.rear:])
87 if k < n {
88 copy(p[k:], b.data)
89 }
90 return n, nil
91}
92
93// Discard skips the n next bytes to read from the buffer, returning the
94// bytes discarded.
95//
96// If Discards skips fewer than n bytes, it returns an error.
97func (b *buffer) Discard(n int) (discarded int, err error) {
98 if n < 0 {
99 return 0, errors.New("buffer.Discard: negative argument")
100 }
101 m := b.Buffered()
102 if m < n {
103 n = m
104 err = errors.New(
105 "buffer.Discard: discarded less bytes then requested")
106 }
107 b.rear = b.addIndex(b.rear, n)
108 return n, err
109}
110
111// ErrNoSpace indicates that there is insufficient space for the Write
112// operation.
113var ErrNoSpace = errors.New("insufficient space")
114
115// Write puts data into the buffer. If less bytes are written than
116// requested ErrNoSpace is returned.
117func (b *buffer) Write(p []byte) (n int, err error) {
118 m := b.Available()
119 n = len(p)
120 if m < n {
121 n = m
122 p = p[:m]
123 err = ErrNoSpace
124 }
125 k := copy(b.data[b.front:], p)
126 if k < n {
127 copy(b.data, p[k:])
128 }
129 b.front = b.addIndex(b.front, n)
130 return n, err
131}
132
133// WriteByte writes a single byte into the buffer. The error ErrNoSpace
134// is returned if no single byte is available in the buffer for writing.
135func (b *buffer) WriteByte(c byte) error {
136 if b.Available() < 1 {
137 return ErrNoSpace
138 }
139 b.data[b.front] = c
140 b.front = b.addIndex(b.front, 1)
141 return nil
142}
143
144// prefixLen returns the length of the common prefix of a and b.
145func prefixLen(a, b []byte) int {
146 if len(a) > len(b) {
147 a, b = b, a
148 }
149 for i, c := range a {
150 if b[i] != c {
151 return i
152 }
153 }
154 return len(a)
155}
156
157// matchLen returns the length of the common prefix for the given
158// distance from the rear and the byte slice p.
159func (b *buffer) matchLen(distance int, p []byte) int {
160 var n int
161 i := b.rear - distance
162 if i < 0 {
163 if n = prefixLen(p, b.data[len(b.data)+i:]); n < -i {
164 return n
165 }
166 p = p[n:]
167 i = 0
168 }
169 n += prefixLen(p, b.data[i:])
170 return n
171}
diff --git a/vendor/github.com/ulikunitz/xz/lzma/bytewriter.go b/vendor/github.com/ulikunitz/xz/lzma/bytewriter.go
new file mode 100644
index 0000000..a3696ba
--- /dev/null
+++ b/vendor/github.com/ulikunitz/xz/lzma/bytewriter.go
@@ -0,0 +1,37 @@
1// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package lzma
6
7import (
8 "errors"
9 "io"
10)
11
12// ErrLimit indicates that the limit of the LimitedByteWriter has been
13// reached.
14var ErrLimit = errors.New("limit reached")
15
16// LimitedByteWriter provides a byte writer that can be written until a
17// limit is reached. The field N provides the number of remaining
18// bytes.
19type LimitedByteWriter struct {
20 BW io.ByteWriter
21 N int64
22}
23
24// WriteByte writes a single byte to the limited byte writer. It returns
25// ErrLimit if the limit has been reached. If the byte is successfully
26// written the field N of the LimitedByteWriter will be decremented by
27// one.
28func (l *LimitedByteWriter) WriteByte(c byte) error {
29 if l.N <= 0 {
30 return ErrLimit
31 }
32 if err := l.BW.WriteByte(c); err != nil {
33 return err
34 }
35 l.N--
36 return nil
37}
diff --git a/vendor/github.com/ulikunitz/xz/lzma/decoder.go b/vendor/github.com/ulikunitz/xz/lzma/decoder.go
new file mode 100644
index 0000000..16e14db
--- /dev/null
+++ b/vendor/github.com/ulikunitz/xz/lzma/decoder.go
@@ -0,0 +1,277 @@
1// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package lzma
6
7import (
8 "errors"
9 "fmt"
10 "io"
11)
12
13// decoder decodes a raw LZMA stream without any header.
14type decoder struct {
15 // dictionary; the rear pointer of the buffer will be used for
16 // reading the data.
17 Dict *decoderDict
18 // decoder state
19 State *state
20 // range decoder
21 rd *rangeDecoder
22 // start stores the head value of the dictionary for the LZMA
23 // stream
24 start int64
25 // size of uncompressed data
26 size int64
27 // end-of-stream encountered
28 eos bool
29 // EOS marker found
30 eosMarker bool
31}
32
33// newDecoder creates a new decoder instance. The parameter size provides
34// the expected byte size of the decompressed data. If the size is
35// unknown use a negative value. In that case the decoder will look for
36// a terminating end-of-stream marker.
37func newDecoder(br io.ByteReader, state *state, dict *decoderDict, size int64) (d *decoder, err error) {
38 rd, err := newRangeDecoder(br)
39 if err != nil {
40 return nil, err
41 }
42 d = &decoder{
43 State: state,
44 Dict: dict,
45 rd: rd,
46 size: size,
47 start: dict.pos(),
48 }
49 return d, nil
50}
51
52// Reopen restarts the decoder with a new byte reader and a new size. Reopen
53// resets the Decompressed counter to zero.
54func (d *decoder) Reopen(br io.ByteReader, size int64) error {
55 var err error
56 if d.rd, err = newRangeDecoder(br); err != nil {
57 return err
58 }
59 d.start = d.Dict.pos()
60 d.size = size
61 d.eos = false
62 return nil
63}
64
65// decodeLiteral decodes a single literal from the LZMA stream.
66func (d *decoder) decodeLiteral() (op operation, err error) {
67 litState := d.State.litState(d.Dict.byteAt(1), d.Dict.head)
68 match := d.Dict.byteAt(int(d.State.rep[0]) + 1)
69 s, err := d.State.litCodec.Decode(d.rd, d.State.state, match, litState)
70 if err != nil {
71 return nil, err
72 }
73 return lit{s}, nil
74}
75
76// errEOS indicates that an EOS marker has been found.
77var errEOS = errors.New("EOS marker found")
78
79// readOp decodes the next operation from the compressed stream. It
80// returns the operation. If an explicit end of stream marker is
81// identified the eos error is returned.
82func (d *decoder) readOp() (op operation, err error) {
83 // Value of the end of stream (EOS) marker
84 const eosDist = 1<<32 - 1
85
86 state, state2, posState := d.State.states(d.Dict.head)
87
88 b, err := d.State.isMatch[state2].Decode(d.rd)
89 if err != nil {
90 return nil, err
91 }
92 if b == 0 {
93 // literal
94 op, err := d.decodeLiteral()
95 if err != nil {
96 return nil, err
97 }
98 d.State.updateStateLiteral()
99 return op, nil
100 }
101 b, err = d.State.isRep[state].Decode(d.rd)
102 if err != nil {
103 return nil, err
104 }
105 if b == 0 {
106 // simple match
107 d.State.rep[3], d.State.rep[2], d.State.rep[1] =
108 d.State.rep[2], d.State.rep[1], d.State.rep[0]
109
110 d.State.updateStateMatch()
111 // The length decoder returns the length offset.
112 n, err := d.State.lenCodec.Decode(d.rd, posState)
113 if err != nil {
114 return nil, err
115 }
116 // The dist decoder returns the distance offset. The actual
117 // distance is 1 higher.
118 d.State.rep[0], err = d.State.distCodec.Decode(d.rd, n)
119 if err != nil {
120 return nil, err
121 }
122 if d.State.rep[0] == eosDist {
123 d.eosMarker = true
124 return nil, errEOS
125 }
126 op = match{n: int(n) + minMatchLen,
127 distance: int64(d.State.rep[0]) + minDistance}
128 return op, nil
129 }
130 b, err = d.State.isRepG0[state].Decode(d.rd)
131 if err != nil {
132 return nil, err
133 }
134 dist := d.State.rep[0]
135 if b == 0 {
136 // rep match 0
137 b, err = d.State.isRepG0Long[state2].Decode(d.rd)
138 if err != nil {
139 return nil, err
140 }
141 if b == 0 {
142 d.State.updateStateShortRep()
143 op = match{n: 1, distance: int64(dist) + minDistance}
144 return op, nil
145 }
146 } else {
147 b, err = d.State.isRepG1[state].Decode(d.rd)
148 if err != nil {
149 return nil, err
150 }
151 if b == 0 {
152 dist = d.State.rep[1]
153 } else {
154 b, err = d.State.isRepG2[state].Decode(d.rd)
155 if err != nil {
156 return nil, err
157 }
158 if b == 0 {
159 dist = d.State.rep[2]
160 } else {
161 dist = d.State.rep[3]
162 d.State.rep[3] = d.State.rep[2]
163 }
164 d.State.rep[2] = d.State.rep[1]
165 }
166 d.State.rep[1] = d.State.rep[0]
167 d.State.rep[0] = dist
168 }
169 n, err := d.State.repLenCodec.Decode(d.rd, posState)
170 if err != nil {
171 return nil, err
172 }
173 d.State.updateStateRep()
174 op = match{n: int(n) + minMatchLen, distance: int64(dist) + minDistance}
175 return op, nil
176}
177
178// apply takes the operation and transforms the decoder dictionary accordingly.
179func (d *decoder) apply(op operation) error {
180 var err error
181 switch x := op.(type) {
182 case match:
183 err = d.Dict.writeMatch(x.distance, x.n)
184 case lit:
185 err = d.Dict.WriteByte(x.b)
186 default:
187 panic("op is neither a match nor a literal")
188 }
189 return err
190}
191
192// decompress fills the dictionary unless no space for new data is
193// available. If the end of the LZMA stream has been reached io.EOF will
194// be returned.
195func (d *decoder) decompress() error {
196 if d.eos {
197 return io.EOF
198 }
199 for d.Dict.Available() >= maxMatchLen {
200 op, err := d.readOp()
201 switch err {
202 case nil:
203 break
204 case errEOS:
205 d.eos = true
206 if !d.rd.possiblyAtEnd() {
207 return errDataAfterEOS
208 }
209 if d.size >= 0 && d.size != d.Decompressed() {
210 return errSize
211 }
212 return io.EOF
213 case io.EOF:
214 d.eos = true
215 return io.ErrUnexpectedEOF
216 default:
217 return err
218 }
219 if err = d.apply(op); err != nil {
220 return err
221 }
222 if d.size >= 0 && d.Decompressed() >= d.size {
223 d.eos = true
224 if d.Decompressed() > d.size {
225 return errSize
226 }
227 if !d.rd.possiblyAtEnd() {
228 switch _, err = d.readOp(); err {
229 case nil:
230 return errSize
231 case io.EOF:
232 return io.ErrUnexpectedEOF
233 case errEOS:
234 break
235 default:
236 return err
237 }
238 }
239 return io.EOF
240 }
241 }
242 return nil
243}
244
245// Errors that may be returned while decoding data.
246var (
247 errDataAfterEOS = errors.New("lzma: data after end of stream marker")
248 errSize = errors.New("lzma: wrong uncompressed data size")
249)
250
251// Read reads data from the buffer. If no more data is available io.EOF is
252// returned.
253func (d *decoder) Read(p []byte) (n int, err error) {
254 var k int
255 for {
256 // Read of decoder dict never returns an error.
257 k, err = d.Dict.Read(p[n:])
258 if err != nil {
259 panic(fmt.Errorf("dictionary read error %s", err))
260 }
261 if k == 0 && d.eos {
262 return n, io.EOF
263 }
264 n += k
265 if n >= len(p) {
266 return n, nil
267 }
268 if err = d.decompress(); err != nil && err != io.EOF {
269 return n, err
270 }
271 }
272}
273
274// Decompressed returns the number of bytes decompressed by the decoder.
275func (d *decoder) Decompressed() int64 {
276 return d.Dict.pos() - d.start
277}
diff --git a/vendor/github.com/ulikunitz/xz/lzma/decoderdict.go b/vendor/github.com/ulikunitz/xz/lzma/decoderdict.go
new file mode 100644
index 0000000..564a12b
--- /dev/null
+++ b/vendor/github.com/ulikunitz/xz/lzma/decoderdict.go
@@ -0,0 +1,135 @@
1// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package lzma
6
7import (
8 "errors"
9 "fmt"
10)
11
12// decoderDict provides the dictionary for the decoder. The whole
13// dictionary is used as reader buffer.
14type decoderDict struct {
15 buf buffer
16 head int64
17}
18
19// newDecoderDict creates a new decoder dictionary. The whole dictionary
20// will be used as reader buffer.
21func newDecoderDict(dictCap int) (d *decoderDict, err error) {
22 // lower limit supports easy test cases
23 if !(1 <= dictCap && int64(dictCap) <= MaxDictCap) {
24 return nil, errors.New("lzma: dictCap out of range")
25 }
26 d = &decoderDict{buf: *newBuffer(dictCap)}
27 return d, nil
28}
29
30// Reset clears the dictionary. The read buffer is not changed, so the
31// buffered data can still be read.
32func (d *decoderDict) Reset() {
33 d.head = 0
34}
35
36// WriteByte writes a single byte into the dictionary. It is used to
37// write literals into the dictionary.
38func (d *decoderDict) WriteByte(c byte) error {
39 if err := d.buf.WriteByte(c); err != nil {
40 return err
41 }
42 d.head++
43 return nil
44}
45
46// pos returns the position of the dictionary head.
47func (d *decoderDict) pos() int64 { return d.head }
48
49// dictLen returns the actual length of the dictionary.
50func (d *decoderDict) dictLen() int {
51 capacity := d.buf.Cap()
52 if d.head >= int64(capacity) {
53 return capacity
54 }
55 return int(d.head)
56}
57
58// byteAt returns a byte stored in the dictionary. If the distance is
59// non-positive or exceeds the current length of the dictionary the zero
60// byte is returned.
61func (d *decoderDict) byteAt(dist int) byte {
62 if !(0 < dist && dist <= d.dictLen()) {
63 return 0
64 }
65 i := d.buf.front - dist
66 if i < 0 {
67 i += len(d.buf.data)
68 }
69 return d.buf.data[i]
70}
71
72// writeMatch writes the match at the top of the dictionary. The given
73// distance must point in the current dictionary and the length must not
74// exceed the maximum length 273 supported in LZMA.
75//
76// The error value ErrNoSpace indicates that no space is available in
77// the dictionary for writing. You need to read from the dictionary
78// first.
79func (d *decoderDict) writeMatch(dist int64, length int) error {
80 if !(0 < dist && dist <= int64(d.dictLen())) {
81 return errors.New("writeMatch: distance out of range")
82 }
83 if !(0 < length && length <= maxMatchLen) {
84 return errors.New("writeMatch: length out of range")
85 }
86 if length > d.buf.Available() {
87 return ErrNoSpace
88 }
89 d.head += int64(length)
90
91 i := d.buf.front - int(dist)
92 if i < 0 {
93 i += len(d.buf.data)
94 }
95 for length > 0 {
96 var p []byte
97 if i >= d.buf.front {
98 p = d.buf.data[i:]
99 i = 0
100 } else {
101 p = d.buf.data[i:d.buf.front]
102 i = d.buf.front
103 }
104 if len(p) > length {
105 p = p[:length]
106 }
107 if _, err := d.buf.Write(p); err != nil {
108 panic(fmt.Errorf("d.buf.Write returned error %s", err))
109 }
110 length -= len(p)
111 }
112 return nil
113}
114
115// Write writes the given bytes into the dictionary and advances the
116// head.
117func (d *decoderDict) Write(p []byte) (n int, err error) {
118 n, err = d.buf.Write(p)
119 d.head += int64(n)
120 return n, err
121}
122
123// Available returns the number of available bytes for writing into the
124// decoder dictionary.
125func (d *decoderDict) Available() int { return d.buf.Available() }
126
127// Read reads data from the buffer contained in the decoder dictionary.
128func (d *decoderDict) Read(p []byte) (n int, err error) { return d.buf.Read(p) }
129
130// Buffered returns the number of bytes currently buffered in the
131// decoder dictionary.
132func (d *decoderDict) buffered() int { return d.buf.Buffered() }
133
134// Peek gets data from the buffer without advancing the rear index.
135func (d *decoderDict) peek(p []byte) (n int, err error) { return d.buf.Peek(p) }
diff --git a/vendor/github.com/ulikunitz/xz/lzma/directcodec.go b/vendor/github.com/ulikunitz/xz/lzma/directcodec.go
new file mode 100644
index 0000000..e08eb98
--- /dev/null
+++ b/vendor/github.com/ulikunitz/xz/lzma/directcodec.go
@@ -0,0 +1,49 @@
1// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package lzma
6
7import "fmt"
8
9// directCodec allows the encoding and decoding of values with a fixed number
10// of bits. The number of bits must be in the range [1,32].
11type directCodec byte
12
13// makeDirectCodec creates a directCodec. The function panics if the number of
14// bits is not in the range [1,32].
15func makeDirectCodec(bits int) directCodec {
16 if !(1 <= bits && bits <= 32) {
17 panic(fmt.Errorf("bits=%d out of range", bits))
18 }
19 return directCodec(bits)
20}
21
22// Bits returns the number of bits supported by this codec.
23func (dc directCodec) Bits() int {
24 return int(dc)
25}
26
27// Encode uses the range encoder to encode a value with the fixed number of
28// bits. The most-significant bit is encoded first.
29func (dc directCodec) Encode(e *rangeEncoder, v uint32) error {
30 for i := int(dc) - 1; i >= 0; i-- {
31 if err := e.DirectEncodeBit(v >> uint(i)); err != nil {
32 return err
33 }
34 }
35 return nil
36}
37
38// Decode uses the range decoder to decode a value with the given number of
39// given bits. The most-significant bit is decoded first.
40func (dc directCodec) Decode(d *rangeDecoder) (v uint32, err error) {
41 for i := int(dc) - 1; i >= 0; i-- {
42 x, err := d.DirectDecodeBit()
43 if err != nil {
44 return 0, err
45 }
46 v = (v << 1) | x
47 }
48 return v, nil
49}
diff --git a/vendor/github.com/ulikunitz/xz/lzma/distcodec.go b/vendor/github.com/ulikunitz/xz/lzma/distcodec.go
new file mode 100644
index 0000000..b053a2d
--- /dev/null
+++ b/vendor/github.com/ulikunitz/xz/lzma/distcodec.go
@@ -0,0 +1,156 @@
1// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package lzma
6
7// Constants used by the distance codec.
8const (
9 // minimum supported distance
10 minDistance = 1
11 // maximum supported distance, value is used for the eos marker.
12 maxDistance = 1 << 32
13 // number of the supported len states
14 lenStates = 4
15 // start for the position models
16 startPosModel = 4
17 // first index with align bits support
18 endPosModel = 14
19 // bits for the position slots
20 posSlotBits = 6
21 // number of align bits
22 alignBits = 4
23 // maximum position slot
24 maxPosSlot = 63
25)
26
27// distCodec provides encoding and decoding of distance values.
28type distCodec struct {
29 posSlotCodecs [lenStates]treeCodec
30 posModel [endPosModel - startPosModel]treeReverseCodec
31 alignCodec treeReverseCodec
32}
33
34// deepcopy initializes dc as deep copy of the source.
35func (dc *distCodec) deepcopy(src *distCodec) {
36 if dc == src {
37 return
38 }
39 for i := range dc.posSlotCodecs {
40 dc.posSlotCodecs[i].deepcopy(&src.posSlotCodecs[i])
41 }
42 for i := range dc.posModel {
43 dc.posModel[i].deepcopy(&src.posModel[i])
44 }
45 dc.alignCodec.deepcopy(&src.alignCodec)
46}
47
48// distBits returns the number of bits required to encode dist.
49func distBits(dist uint32) int {
50 if dist < startPosModel {
51 return 6
52 }
53 // slot s > 3, dist d
54 // s = 2(bits(d)-1) + bit(d, bits(d)-2)
55 // s>>1 = bits(d)-1
56 // bits(d) = 32-nlz32(d)
57 // s>>1=31-nlz32(d)
58 // n = 5 + (s>>1) = 36 - nlz32(d)
59 return 36 - nlz32(dist)
60}
61
62// newDistCodec creates a new distance codec.
63func (dc *distCodec) init() {
64 for i := range dc.posSlotCodecs {
65 dc.posSlotCodecs[i] = makeTreeCodec(posSlotBits)
66 }
67 for i := range dc.posModel {
68 posSlot := startPosModel + i
69 bits := (posSlot >> 1) - 1
70 dc.posModel[i] = makeTreeReverseCodec(bits)
71 }
72 dc.alignCodec = makeTreeReverseCodec(alignBits)
73}
74
75// lenState converts the value l to a supported lenState value.
76func lenState(l uint32) uint32 {
77 if l >= lenStates {
78 l = lenStates - 1
79 }
80 return l
81}
82
83// Encode encodes the distance using the parameter l. Dist can have values from
84// the full range of uint32 values. To get the distance offset the actual match
85// distance has to be decreased by 1. A distance offset of 0xffffffff (eos)
86// indicates the end of the stream.
87func (dc *distCodec) Encode(e *rangeEncoder, dist uint32, l uint32) (err error) {
88 // Compute the posSlot using nlz32
89 var posSlot uint32
90 var bits uint32
91 if dist < startPosModel {
92 posSlot = dist
93 } else {
94 bits = uint32(30 - nlz32(dist))
95 posSlot = startPosModel - 2 + (bits << 1)
96 posSlot += (dist >> uint(bits)) & 1
97 }
98
99 if err = dc.posSlotCodecs[lenState(l)].Encode(e, posSlot); err != nil {
100 return
101 }
102
103 switch {
104 case posSlot < startPosModel:
105 return nil
106 case posSlot < endPosModel:
107 tc := &dc.posModel[posSlot-startPosModel]
108 return tc.Encode(dist, e)
109 }
110 dic := directCodec(bits - alignBits)
111 if err = dic.Encode(e, dist>>alignBits); err != nil {
112 return
113 }
114 return dc.alignCodec.Encode(dist, e)
115}
116
117// Decode decodes the distance offset using the parameter l. The dist value
118// 0xffffffff (eos) indicates the end of the stream. Add one to the distance
119// offset to get the actual match distance.
120func (dc *distCodec) Decode(d *rangeDecoder, l uint32) (dist uint32, err error) {
121 posSlot, err := dc.posSlotCodecs[lenState(l)].Decode(d)
122 if err != nil {
123 return
124 }
125
126 // posSlot equals distance
127 if posSlot < startPosModel {
128 return posSlot, nil
129 }
130
131 // posSlot uses the individual models
132 bits := (posSlot >> 1) - 1
133 dist = (2 | (posSlot & 1)) << bits
134 var u uint32
135 if posSlot < endPosModel {
136 tc := &dc.posModel[posSlot-startPosModel]
137 if u, err = tc.Decode(d); err != nil {
138 return 0, err
139 }
140 dist += u
141 return dist, nil
142 }
143
144 // posSlots use direct encoding and a single model for the four align
145 // bits.
146 dic := directCodec(bits - alignBits)
147 if u, err = dic.Decode(d); err != nil {
148 return 0, err
149 }
150 dist += u << alignBits
151 if u, err = dc.alignCodec.Decode(d); err != nil {
152 return 0, err
153 }
154 dist += u
155 return dist, nil
156}
diff --git a/vendor/github.com/ulikunitz/xz/lzma/encoder.go b/vendor/github.com/ulikunitz/xz/lzma/encoder.go
new file mode 100644
index 0000000..18ce009
--- /dev/null
+++ b/vendor/github.com/ulikunitz/xz/lzma/encoder.go
@@ -0,0 +1,268 @@
1// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package lzma
6
7import (
8 "fmt"
9 "io"
10)
11
12// opLenMargin provides the upper limit of the number of bytes required
13// to encode a single operation.
14const opLenMargin = 10
15
16// compressFlags control the compression process.
17type compressFlags uint32
18
19// Values for compressFlags.
20const (
21 // all data should be compressed, even if compression is not
22 // optimal.
23 all compressFlags = 1 << iota
24)
25
26// encoderFlags provide the flags for an encoder.
27type encoderFlags uint32
28
29// Flags for the encoder.
30const (
31 // eosMarker requests an EOS marker to be written.
32 eosMarker encoderFlags = 1 << iota
33)
34
35// Encoder compresses data buffered in the encoder dictionary and writes
36// it into a byte writer.
37type encoder struct {
38 dict *encoderDict
39 state *state
40 re *rangeEncoder
41 start int64
42 // generate eos marker
43 marker bool
44 limit bool
45 margin int
46}
47
48// newEncoder creates a new encoder. If the byte writer must be
49// limited use LimitedByteWriter provided by this package. The flags
50// argument supports the eosMarker flag, controlling whether a
51// terminating end-of-stream marker must be written.
52func newEncoder(bw io.ByteWriter, state *state, dict *encoderDict,
53 flags encoderFlags) (e *encoder, err error) {
54
55 re, err := newRangeEncoder(bw)
56 if err != nil {
57 return nil, err
58 }
59 e = &encoder{
60 dict: dict,
61 state: state,
62 re: re,
63 marker: flags&eosMarker != 0,
64 start: dict.Pos(),
65 margin: opLenMargin,
66 }
67 if e.marker {
68 e.margin += 5
69 }
70 return e, nil
71}
72
73// Write writes the bytes from p into the dictionary. If not enough
74// space is available the data in the dictionary buffer will be
75// compressed to make additional space available. If the limit of the
76// underlying writer has been reached ErrLimit will be returned.
77func (e *encoder) Write(p []byte) (n int, err error) {
78 for {
79 k, err := e.dict.Write(p[n:])
80 n += k
81 if err == ErrNoSpace {
82 if err = e.compress(0); err != nil {
83 return n, err
84 }
85 continue
86 }
87 return n, err
88 }
89}
90
91// Reopen reopens the encoder with a new byte writer.
92func (e *encoder) Reopen(bw io.ByteWriter) error {
93 var err error
94 if e.re, err = newRangeEncoder(bw); err != nil {
95 return err
96 }
97 e.start = e.dict.Pos()
98 e.limit = false
99 return nil
100}
101
102// writeLiteral writes a literal into the LZMA stream
103func (e *encoder) writeLiteral(l lit) error {
104 var err error
105 state, state2, _ := e.state.states(e.dict.Pos())
106 if err = e.state.isMatch[state2].Encode(e.re, 0); err != nil {
107 return err
108 }
109 litState := e.state.litState(e.dict.ByteAt(1), e.dict.Pos())
110 match := e.dict.ByteAt(int(e.state.rep[0]) + 1)
111 err = e.state.litCodec.Encode(e.re, l.b, state, match, litState)
112 if err != nil {
113 return err
114 }
115 e.state.updateStateLiteral()
116 return nil
117}
118
119// iverson implements the Iverson operator as proposed by Donald Knuth in his
120// book Concrete Mathematics.
121func iverson(ok bool) uint32 {
122 if ok {
123 return 1
124 }
125 return 0
126}
127
128// writeMatch writes a repetition operation into the operation stream
129func (e *encoder) writeMatch(m match) error {
130 var err error
131 if !(minDistance <= m.distance && m.distance <= maxDistance) {
132 panic(fmt.Errorf("match distance %d out of range", m.distance))
133 }
134 dist := uint32(m.distance - minDistance)
135 if !(minMatchLen <= m.n && m.n <= maxMatchLen) &&
136 !(dist == e.state.rep[0] && m.n == 1) {
137 panic(fmt.Errorf(
138 "match length %d out of range; dist %d rep[0] %d",
139 m.n, dist, e.state.rep[0]))
140 }
141 state, state2, posState := e.state.states(e.dict.Pos())
142 if err = e.state.isMatch[state2].Encode(e.re, 1); err != nil {
143 return err
144 }
145 g := 0
146 for ; g < 4; g++ {
147 if e.state.rep[g] == dist {
148 break
149 }
150 }
151 b := iverson(g < 4)
152 if err = e.state.isRep[state].Encode(e.re, b); err != nil {
153 return err
154 }
155 n := uint32(m.n - minMatchLen)
156 if b == 0 {
157 // simple match
158 e.state.rep[3], e.state.rep[2], e.state.rep[1], e.state.rep[0] =
159 e.state.rep[2], e.state.rep[1], e.state.rep[0], dist
160 e.state.updateStateMatch()
161 if err = e.state.lenCodec.Encode(e.re, n, posState); err != nil {
162 return err
163 }
164 return e.state.distCodec.Encode(e.re, dist, n)
165 }
166 b = iverson(g != 0)
167 if err = e.state.isRepG0[state].Encode(e.re, b); err != nil {
168 return err
169 }
170 if b == 0 {
171 // g == 0
172 b = iverson(m.n != 1)
173 if err = e.state.isRepG0Long[state2].Encode(e.re, b); err != nil {
174 return err
175 }
176 if b == 0 {
177 e.state.updateStateShortRep()
178 return nil
179 }
180 } else {
181 // g in {1,2,3}
182 b = iverson(g != 1)
183 if err = e.state.isRepG1[state].Encode(e.re, b); err != nil {
184 return err
185 }
186 if b == 1 {
187 // g in {2,3}
188 b = iverson(g != 2)
189 err = e.state.isRepG2[state].Encode(e.re, b)
190 if err != nil {
191 return err
192 }
193 if b == 1 {
194 e.state.rep[3] = e.state.rep[2]
195 }
196 e.state.rep[2] = e.state.rep[1]
197 }
198 e.state.rep[1] = e.state.rep[0]
199 e.state.rep[0] = dist
200 }
201 e.state.updateStateRep()
202 return e.state.repLenCodec.Encode(e.re, n, posState)
203}
204
205// writeOp writes a single operation to the range encoder. The function
206// checks whether there is enough space available to close the LZMA
207// stream.
208func (e *encoder) writeOp(op operation) error {
209 if e.re.Available() < int64(e.margin) {
210 return ErrLimit
211 }
212 switch x := op.(type) {
213 case lit:
214 return e.writeLiteral(x)
215 case match:
216 return e.writeMatch(x)
217 default:
218 panic("unexpected operation")
219 }
220}
221
222// compress compressed data from the dictionary buffer. If the flag all
223// is set, all data in the dictionary buffer will be compressed. The
224// function returns ErrLimit if the underlying writer has reached its
225// limit.
226func (e *encoder) compress(flags compressFlags) error {
227 n := 0
228 if flags&all == 0 {
229 n = maxMatchLen - 1
230 }
231 d := e.dict
232 m := d.m
233 for d.Buffered() > n {
234 op := m.NextOp(e.state.rep)
235 if err := e.writeOp(op); err != nil {
236 return err
237 }
238 d.Discard(op.Len())
239 }
240 return nil
241}
242
243// eosMatch is a pseudo operation that indicates the end of the stream.
244var eosMatch = match{distance: maxDistance, n: minMatchLen}
245
246// Close terminates the LZMA stream. If requested the end-of-stream
247// marker will be written. If the byte writer limit has been or will be
248// reached during compression of the remaining data in the buffer the
249// LZMA stream will be closed and data will remain in the buffer.
250func (e *encoder) Close() error {
251 err := e.compress(all)
252 if err != nil && err != ErrLimit {
253 return err
254 }
255 if e.marker {
256 if err := e.writeMatch(eosMatch); err != nil {
257 return err
258 }
259 }
260 err = e.re.Close()
261 return err
262}
263
264// Compressed returns the number bytes of the input data that been
265// compressed.
266func (e *encoder) Compressed() int64 {
267 return e.dict.Pos() - e.start
268}
diff --git a/vendor/github.com/ulikunitz/xz/lzma/encoderdict.go b/vendor/github.com/ulikunitz/xz/lzma/encoderdict.go
new file mode 100644
index 0000000..9d0fbc7
--- /dev/null
+++ b/vendor/github.com/ulikunitz/xz/lzma/encoderdict.go
@@ -0,0 +1,149 @@
1// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package lzma
6
7import (
8 "errors"
9 "fmt"
10 "io"
11)
12
13// matcher is an interface that supports the identification of the next
14// operation.
15type matcher interface {
16 io.Writer
17 SetDict(d *encoderDict)
18 NextOp(rep [4]uint32) operation
19}
20
21// encoderDict provides the dictionary of the encoder. It includes an
22// addtional buffer atop of the actual dictionary.
23type encoderDict struct {
24 buf buffer
25 m matcher
26 head int64
27 capacity int
28 // preallocated array
29 data [maxMatchLen]byte
30}
31
32// newEncoderDict creates the encoder dictionary. The argument bufSize
33// defines the size of the additional buffer.
34func newEncoderDict(dictCap, bufSize int, m matcher) (d *encoderDict, err error) {
35 if !(1 <= dictCap && int64(dictCap) <= MaxDictCap) {
36 return nil, errors.New(
37 "lzma: dictionary capacity out of range")
38 }
39 if bufSize < 1 {
40 return nil, errors.New(
41 "lzma: buffer size must be larger than zero")
42 }
43 d = &encoderDict{
44 buf: *newBuffer(dictCap + bufSize),
45 capacity: dictCap,
46 m: m,
47 }
48 m.SetDict(d)
49 return d, nil
50}
51
52// Discard discards n bytes. Note that n must not be larger than
53// MaxMatchLen.
54func (d *encoderDict) Discard(n int) {
55 p := d.data[:n]
56 k, _ := d.buf.Read(p)
57 if k < n {
58 panic(fmt.Errorf("lzma: can't discard %d bytes", n))
59 }
60 d.head += int64(n)
61 d.m.Write(p)
62}
63
64// Len returns the data available in the encoder dictionary.
65func (d *encoderDict) Len() int {
66 n := d.buf.Available()
67 if int64(n) > d.head {
68 return int(d.head)
69 }
70 return n
71}
72
73// DictLen returns the actual length of data in the dictionary.
74func (d *encoderDict) DictLen() int {
75 if d.head < int64(d.capacity) {
76 return int(d.head)
77 }
78 return d.capacity
79}
80
81// Available returns the number of bytes that can be written by a
82// following Write call.
83func (d *encoderDict) Available() int {
84 return d.buf.Available() - d.DictLen()
85}
86
87// Write writes data into the dictionary buffer. Note that the position
88// of the dictionary head will not be moved. If there is not enough
89// space in the buffer ErrNoSpace will be returned.
90func (d *encoderDict) Write(p []byte) (n int, err error) {
91 m := d.Available()
92 if len(p) > m {
93 p = p[:m]
94 err = ErrNoSpace
95 }
96 var e error
97 if n, e = d.buf.Write(p); e != nil {
98 err = e
99 }
100 return n, err
101}
102
103// Pos returns the position of the head.
104func (d *encoderDict) Pos() int64 { return d.head }
105
106// ByteAt returns the byte at the given distance.
107func (d *encoderDict) ByteAt(distance int) byte {
108 if !(0 < distance && distance <= d.Len()) {
109 return 0
110 }
111 i := d.buf.rear - distance
112 if i < 0 {
113 i += len(d.buf.data)
114 }
115 return d.buf.data[i]
116}
117
118// CopyN copies the last n bytes from the dictionary into the provided
119// writer. This is used for copying uncompressed data into an
120// uncompressed segment.
121func (d *encoderDict) CopyN(w io.Writer, n int) (written int, err error) {
122 if n <= 0 {
123 return 0, nil
124 }
125 m := d.Len()
126 if n > m {
127 n = m
128 err = ErrNoSpace
129 }
130 i := d.buf.rear - n
131 var e error
132 if i < 0 {
133 i += len(d.buf.data)
134 if written, e = w.Write(d.buf.data[i:]); e != nil {
135 return written, e
136 }
137 i = 0
138 }
139 var k int
140 k, e = w.Write(d.buf.data[i:d.buf.rear])
141 written += k
142 if e != nil {
143 err = e
144 }
145 return written, err
146}
147
148// Buffered returns the number of bytes in the buffer.
149func (d *encoderDict) Buffered() int { return d.buf.Buffered() }
diff --git a/vendor/github.com/ulikunitz/xz/lzma/fox.lzma b/vendor/github.com/ulikunitz/xz/lzma/fox.lzma
new file mode 100644
index 0000000..5edad63
--- /dev/null
+++ b/vendor/github.com/ulikunitz/xz/lzma/fox.lzma
Binary files differ
diff --git a/vendor/github.com/ulikunitz/xz/lzma/hashtable.go b/vendor/github.com/ulikunitz/xz/lzma/hashtable.go
new file mode 100644
index 0000000..d786a97
--- /dev/null
+++ b/vendor/github.com/ulikunitz/xz/lzma/hashtable.go
@@ -0,0 +1,309 @@
1// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package lzma
6
7import (
8 "errors"
9 "fmt"
10
11 "github.com/ulikunitz/xz/internal/hash"
12)
13
14/* For compression we need to find byte sequences that match the byte
15 * sequence at the dictionary head. A hash table is a simple method to
16 * provide this capability.
17 */
18
19// maxMatches limits the number of matches requested from the Matches
20// function. This controls the speed of the overall encoding.
21const maxMatches = 16
22
23// shortDists defines the number of short distances supported by the
24// implementation.
25const shortDists = 8
26
27// The minimum is somehow arbitrary but the maximum is limited by the
28// memory requirements of the hash table.
29const (
30 minTableExponent = 9
31 maxTableExponent = 20
32)
33
34// newRoller contains the function used to create an instance of the
35// hash.Roller.
36var newRoller = func(n int) hash.Roller { return hash.NewCyclicPoly(n) }
37
38// hashTable stores the hash table including the rolling hash method.
39//
40// We implement chained hashing into a circular buffer. Each entry in
41// the circular buffer stores the delta distance to the next position with a
42// word that has the same hash value.
43type hashTable struct {
44 dict *encoderDict
45 // actual hash table
46 t []int64
47 // circular list data with the offset to the next word
48 data []uint32
49 front int
50 // mask for computing the index for the hash table
51 mask uint64
52 // hash offset; initial value is -int64(wordLen)
53 hoff int64
54 // length of the hashed word
55 wordLen int
56 // hash roller for computing the hash values for the Write
57 // method
58 wr hash.Roller
59 // hash roller for computing arbitrary hashes
60 hr hash.Roller
61 // preallocated slices
62 p [maxMatches]int64
63 distances [maxMatches + shortDists]int
64}
65
66// hashTableExponent derives the hash table exponent from the dictionary
67// capacity.
68func hashTableExponent(n uint32) int {
69 e := 30 - nlz32(n)
70 switch {
71 case e < minTableExponent:
72 e = minTableExponent
73 case e > maxTableExponent:
74 e = maxTableExponent
75 }
76 return e
77}
78
79// newHashTable creates a new hash table for words of length wordLen
80func newHashTable(capacity int, wordLen int) (t *hashTable, err error) {
81 if !(0 < capacity) {
82 return nil, errors.New(
83 "newHashTable: capacity must not be negative")
84 }
85 exp := hashTableExponent(uint32(capacity))
86 if !(1 <= wordLen && wordLen <= 4) {
87 return nil, errors.New("newHashTable: " +
88 "argument wordLen out of range")
89 }
90 n := 1 << uint(exp)
91 if n <= 0 {
92 panic("newHashTable: exponent is too large")
93 }
94 t = &hashTable{
95 t: make([]int64, n),
96 data: make([]uint32, capacity),
97 mask: (uint64(1) << uint(exp)) - 1,
98 hoff: -int64(wordLen),
99 wordLen: wordLen,
100 wr: newRoller(wordLen),
101 hr: newRoller(wordLen),
102 }
103 return t, nil
104}
105
106func (t *hashTable) SetDict(d *encoderDict) { t.dict = d }
107
108// buffered returns the number of bytes that are currently hashed.
109func (t *hashTable) buffered() int {
110 n := t.hoff + 1
111 switch {
112 case n <= 0:
113 return 0
114 case n >= int64(len(t.data)):
115 return len(t.data)
116 }
117 return int(n)
118}
119
120// addIndex adds n to an index ensuring that is stays inside the
121// circular buffer for the hash chain.
122func (t *hashTable) addIndex(i, n int) int {
123 i += n - len(t.data)
124 if i < 0 {
125 i += len(t.data)
126 }
127 return i
128}
129
130// putDelta puts the delta instance at the current front of the circular
131// chain buffer.
132func (t *hashTable) putDelta(delta uint32) {
133 t.data[t.front] = delta
134 t.front = t.addIndex(t.front, 1)
135}
136
137// putEntry puts a new entry into the hash table. If there is already a
138// value stored it is moved into the circular chain buffer.
139func (t *hashTable) putEntry(h uint64, pos int64) {
140 if pos < 0 {
141 return
142 }
143 i := h & t.mask
144 old := t.t[i] - 1
145 t.t[i] = pos + 1
146 var delta int64
147 if old >= 0 {
148 delta = pos - old
149 if delta > 1<<32-1 || delta > int64(t.buffered()) {
150 delta = 0
151 }
152 }
153 t.putDelta(uint32(delta))
154}
155
156// WriteByte converts a single byte into a hash and puts them into the hash
157// table.
158func (t *hashTable) WriteByte(b byte) error {
159 h := t.wr.RollByte(b)
160 t.hoff++
161 t.putEntry(h, t.hoff)
162 return nil
163}
164
165// Write converts the bytes provided into hash tables and stores the
166// abbreviated offsets into the hash table. The method will never return an
167// error.
168func (t *hashTable) Write(p []byte) (n int, err error) {
169 for _, b := range p {
170 // WriteByte doesn't generate an error.
171 t.WriteByte(b)
172 }
173 return len(p), nil
174}
175
176// getMatches the matches for a specific hash. The functions returns the
177// number of positions found.
178//
179// TODO: Make a getDistances because that we are actually interested in.
180func (t *hashTable) getMatches(h uint64, positions []int64) (n int) {
181 if t.hoff < 0 || len(positions) == 0 {
182 return 0
183 }
184 buffered := t.buffered()
185 tailPos := t.hoff + 1 - int64(buffered)
186 rear := t.front - buffered
187 if rear >= 0 {
188 rear -= len(t.data)
189 }
190 // get the slot for the hash
191 pos := t.t[h&t.mask] - 1
192 delta := pos - tailPos
193 for {
194 if delta < 0 {
195 return n
196 }
197 positions[n] = tailPos + delta
198 n++
199 if n >= len(positions) {
200 return n
201 }
202 i := rear + int(delta)
203 if i < 0 {
204 i += len(t.data)
205 }
206 u := t.data[i]
207 if u == 0 {
208 return n
209 }
210 delta -= int64(u)
211 }
212}
213
214// hash computes the rolling hash for the word stored in p. For correct
215// results its length must be equal to t.wordLen.
216func (t *hashTable) hash(p []byte) uint64 {
217 var h uint64
218 for _, b := range p {
219 h = t.hr.RollByte(b)
220 }
221 return h
222}
223
224// Matches fills the positions slice with potential matches. The
225// functions returns the number of positions filled into positions. The
226// byte slice p must have word length of the hash table.
227func (t *hashTable) Matches(p []byte, positions []int64) int {
228 if len(p) != t.wordLen {
229 panic(fmt.Errorf(
230 "byte slice must have length %d", t.wordLen))
231 }
232 h := t.hash(p)
233 return t.getMatches(h, positions)
234}
235
236// NextOp identifies the next operation using the hash table.
237//
238// TODO: Use all repetitions to find matches.
239func (t *hashTable) NextOp(rep [4]uint32) operation {
240 // get positions
241 data := t.dict.data[:maxMatchLen]
242 n, _ := t.dict.buf.Peek(data)
243 data = data[:n]
244 var p []int64
245 if n < t.wordLen {
246 p = t.p[:0]
247 } else {
248 p = t.p[:maxMatches]
249 n = t.Matches(data[:t.wordLen], p)
250 p = p[:n]
251 }
252
253 // convert positions in potential distances
254 head := t.dict.head
255 dists := append(t.distances[:0], 1, 2, 3, 4, 5, 6, 7, 8)
256 for _, pos := range p {
257 dis := int(head - pos)
258 if dis > shortDists {
259 dists = append(dists, dis)
260 }
261 }
262
263 // check distances
264 var m match
265 dictLen := t.dict.DictLen()
266 for _, dist := range dists {
267 if dist > dictLen {
268 continue
269 }
270
271 // Here comes a trick. We are only interested in matches
272 // that are longer than the matches we have been found
273 // before. So before we test the whole byte sequence at
274 // the given distance, we test the first byte that would
275 // make the match longer. If it doesn't match the byte
276 // to match, we don't to care any longer.
277 i := t.dict.buf.rear - dist + m.n
278 if i < 0 {
279 i += len(t.dict.buf.data)
280 }
281 if t.dict.buf.data[i] != data[m.n] {
282 // We can't get a longer match. Jump to the next
283 // distance.
284 continue
285 }
286
287 n := t.dict.buf.matchLen(dist, data)
288 switch n {
289 case 0:
290 continue
291 case 1:
292 if uint32(dist-minDistance) != rep[0] {
293 continue
294 }
295 }
296 if n > m.n {
297 m = match{int64(dist), n}
298 if n == len(data) {
299 // No better match will be found.
300 break
301 }
302 }
303 }
304
305 if m.n == 0 {
306 return lit{data[0]}
307 }
308 return m
309}
diff --git a/vendor/github.com/ulikunitz/xz/lzma/header.go b/vendor/github.com/ulikunitz/xz/lzma/header.go
new file mode 100644
index 0000000..bc70896
--- /dev/null
+++ b/vendor/github.com/ulikunitz/xz/lzma/header.go
@@ -0,0 +1,167 @@
1// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package lzma
6
7import (
8 "errors"
9 "fmt"
10)
11
12// uint32LE reads an uint32 integer from a byte slice
13func uint32LE(b []byte) uint32 {
14 x := uint32(b[3]) << 24
15 x |= uint32(b[2]) << 16
16 x |= uint32(b[1]) << 8
17 x |= uint32(b[0])
18 return x
19}
20
21// uint64LE converts the uint64 value stored as little endian to an uint64
22// value.
23func uint64LE(b []byte) uint64 {
24 x := uint64(b[7]) << 56
25 x |= uint64(b[6]) << 48
26 x |= uint64(b[5]) << 40
27 x |= uint64(b[4]) << 32
28 x |= uint64(b[3]) << 24
29 x |= uint64(b[2]) << 16
30 x |= uint64(b[1]) << 8
31 x |= uint64(b[0])
32 return x
33}
34
35// putUint32LE puts an uint32 integer into a byte slice that must have at least
36// a length of 4 bytes.
37func putUint32LE(b []byte, x uint32) {
38 b[0] = byte(x)
39 b[1] = byte(x >> 8)
40 b[2] = byte(x >> 16)
41 b[3] = byte(x >> 24)
42}
43
44// putUint64LE puts the uint64 value into the byte slice as little endian
45// value. The byte slice b must have at least place for 8 bytes.
46func putUint64LE(b []byte, x uint64) {
47 b[0] = byte(x)
48 b[1] = byte(x >> 8)
49 b[2] = byte(x >> 16)
50 b[3] = byte(x >> 24)
51 b[4] = byte(x >> 32)
52 b[5] = byte(x >> 40)
53 b[6] = byte(x >> 48)
54 b[7] = byte(x >> 56)
55}
56
57// noHeaderSize defines the value of the length field in the LZMA header.
58const noHeaderSize uint64 = 1<<64 - 1
59
60// HeaderLen provides the length of the LZMA file header.
61const HeaderLen = 13
62
63// header represents the header of an LZMA file.
64type header struct {
65 properties Properties
66 dictCap int
67 // uncompressed size; negative value if no size is given
68 size int64
69}
70
71// marshalBinary marshals the header.
72func (h *header) marshalBinary() (data []byte, err error) {
73 if err = h.properties.verify(); err != nil {
74 return nil, err
75 }
76 if !(0 <= h.dictCap && int64(h.dictCap) <= MaxDictCap) {
77 return nil, fmt.Errorf("lzma: DictCap %d out of range",
78 h.dictCap)
79 }
80
81 data = make([]byte, 13)
82
83 // property byte
84 data[0] = h.properties.Code()
85
86 // dictionary capacity
87 putUint32LE(data[1:5], uint32(h.dictCap))
88
89 // uncompressed size
90 var s uint64
91 if h.size > 0 {
92 s = uint64(h.size)
93 } else {
94 s = noHeaderSize
95 }
96 putUint64LE(data[5:], s)
97
98 return data, nil
99}
100
101// unmarshalBinary unmarshals the header.
102func (h *header) unmarshalBinary(data []byte) error {
103 if len(data) != HeaderLen {
104 return errors.New("lzma.unmarshalBinary: data has wrong length")
105 }
106
107 // properties
108 var err error
109 if h.properties, err = PropertiesForCode(data[0]); err != nil {
110 return err
111 }
112
113 // dictionary capacity
114 h.dictCap = int(uint32LE(data[1:]))
115 if h.dictCap < 0 {
116 return errors.New(
117 "LZMA header: dictionary capacity exceeds maximum " +
118 "integer")
119 }
120
121 // uncompressed size
122 s := uint64LE(data[5:])
123 if s == noHeaderSize {
124 h.size = -1
125 } else {
126 h.size = int64(s)
127 if h.size < 0 {
128 return errors.New(
129 "LZMA header: uncompressed size " +
130 "out of int64 range")
131 }
132 }
133
134 return nil
135}
136
137// validDictCap checks whether the dictionary capacity is correct. This
138// is used to weed out wrong file headers.
139func validDictCap(dictcap int) bool {
140 if int64(dictcap) == MaxDictCap {
141 return true
142 }
143 for n := uint(10); n < 32; n++ {
144 if dictcap == 1<<n {
145 return true
146 }
147 if dictcap == 1<<n+1<<(n-1) {
148 return true
149 }
150 }
151 return false
152}
153
154// ValidHeader checks for a valid LZMA file header. It allows only
155// dictionary sizes of 2^n or 2^n+2^(n-1) with n >= 10 or 2^32-1. If
156// there is an explicit size it must not exceed 256 GiB. The length of
157// the data argument must be HeaderLen.
158func ValidHeader(data []byte) bool {
159 var h header
160 if err := h.unmarshalBinary(data); err != nil {
161 return false
162 }
163 if !validDictCap(h.dictCap) {
164 return false
165 }
166 return h.size < 0 || h.size <= 1<<38
167}
diff --git a/vendor/github.com/ulikunitz/xz/lzma/header2.go b/vendor/github.com/ulikunitz/xz/lzma/header2.go
new file mode 100644
index 0000000..ac6a71a
--- /dev/null
+++ b/vendor/github.com/ulikunitz/xz/lzma/header2.go
@@ -0,0 +1,398 @@
1// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package lzma
6
7import (
8 "errors"
9 "fmt"
10 "io"
11)
12
13const (
14 // maximum size of compressed data in a chunk
15 maxCompressed = 1 << 16
16 // maximum size of uncompressed data in a chunk
17 maxUncompressed = 1 << 21
18)
19
20// chunkType represents the type of an LZMA2 chunk. Note that this
21// value is an internal representation and no actual encoding of a LZMA2
22// chunk header.
23type chunkType byte
24
25// Possible values for the chunk type.
26const (
27 // end of stream
28 cEOS chunkType = iota
29 // uncompressed; reset dictionary
30 cUD
31 // uncompressed; no reset of dictionary
32 cU
33 // LZMA compressed; no reset
34 cL
35 // LZMA compressed; reset state
36 cLR
37 // LZMA compressed; reset state; new property value
38 cLRN
39 // LZMA compressed; reset state; new property value; reset dictionary
40 cLRND
41)
42
43// chunkTypeStrings provide a string representation for the chunk types.
44var chunkTypeStrings = [...]string{
45 cEOS: "EOS",
46 cU: "U",
47 cUD: "UD",
48 cL: "L",
49 cLR: "LR",
50 cLRN: "LRN",
51 cLRND: "LRND",
52}
53
54// String returns a string representation of the chunk type.
55func (c chunkType) String() string {
56 if !(cEOS <= c && c <= cLRND) {
57 return "unknown"
58 }
59 return chunkTypeStrings[c]
60}
61
62// Actual encodings for the chunk types in the value. Note that the high
63// uncompressed size bits are stored in the header byte additionally.
64const (
65 hEOS = 0
66 hUD = 1
67 hU = 2
68 hL = 1 << 7
69 hLR = 1<<7 | 1<<5
70 hLRN = 1<<7 | 1<<6
71 hLRND = 1<<7 | 1<<6 | 1<<5
72)
73
74// errHeaderByte indicates an unsupported value for the chunk header
75// byte. These bytes starts the variable-length chunk header.
76var errHeaderByte = errors.New("lzma: unsupported chunk header byte")
77
78// headerChunkType converts the header byte into a chunk type. It
79// ignores the uncompressed size bits in the chunk header byte.
80func headerChunkType(h byte) (c chunkType, err error) {
81 if h&hL == 0 {
82 // no compression
83 switch h {
84 case hEOS:
85 c = cEOS
86 case hUD:
87 c = cUD
88 case hU:
89 c = cU
90 default:
91 return 0, errHeaderByte
92 }
93 return
94 }
95 switch h & hLRND {
96 case hL:
97 c = cL
98 case hLR:
99 c = cLR
100 case hLRN:
101 c = cLRN
102 case hLRND:
103 c = cLRND
104 default:
105 return 0, errHeaderByte
106 }
107 return
108}
109
110// uncompressedHeaderLen provides the length of an uncompressed header
111const uncompressedHeaderLen = 3
112
113// headerLen returns the length of the LZMA2 header for a given chunk
114// type.
115func headerLen(c chunkType) int {
116 switch c {
117 case cEOS:
118 return 1
119 case cU, cUD:
120 return uncompressedHeaderLen
121 case cL, cLR:
122 return 5
123 case cLRN, cLRND:
124 return 6
125 }
126 panic(fmt.Errorf("unsupported chunk type %d", c))
127}
128
129// chunkHeader represents the contents of a chunk header.
130type chunkHeader struct {
131 ctype chunkType
132 uncompressed uint32
133 compressed uint16
134 props Properties
135}
136
137// String returns a string representation of the chunk header.
138func (h *chunkHeader) String() string {
139 return fmt.Sprintf("%s %d %d %s", h.ctype, h.uncompressed,
140 h.compressed, &h.props)
141}
142
143// UnmarshalBinary reads the content of the chunk header from the data
144// slice. The slice must have the correct length.
145func (h *chunkHeader) UnmarshalBinary(data []byte) error {
146 if len(data) == 0 {
147 return errors.New("no data")
148 }
149 c, err := headerChunkType(data[0])
150 if err != nil {
151 return err
152 }
153
154 n := headerLen(c)
155 if len(data) < n {
156 return errors.New("incomplete data")
157 }
158 if len(data) > n {
159 return errors.New("invalid data length")
160 }
161
162 *h = chunkHeader{ctype: c}
163 if c == cEOS {
164 return nil
165 }
166
167 h.uncompressed = uint32(uint16BE(data[1:3]))
168 if c <= cU {
169 return nil
170 }
171 h.uncompressed |= uint32(data[0]&^hLRND) << 16
172
173 h.compressed = uint16BE(data[3:5])
174 if c <= cLR {
175 return nil
176 }
177
178 h.props, err = PropertiesForCode(data[5])
179 return err
180}
181
182// MarshalBinary encodes the chunk header value. The function checks
183// whether the content of the chunk header is correct.
184func (h *chunkHeader) MarshalBinary() (data []byte, err error) {
185 if h.ctype > cLRND {
186 return nil, errors.New("invalid chunk type")
187 }
188 if err = h.props.verify(); err != nil {
189 return nil, err
190 }
191
192 data = make([]byte, headerLen(h.ctype))
193
194 switch h.ctype {
195 case cEOS:
196 return data, nil
197 case cUD:
198 data[0] = hUD
199 case cU:
200 data[0] = hU
201 case cL:
202 data[0] = hL
203 case cLR:
204 data[0] = hLR
205 case cLRN:
206 data[0] = hLRN
207 case cLRND:
208 data[0] = hLRND
209 }
210
211 putUint16BE(data[1:3], uint16(h.uncompressed))
212 if h.ctype <= cU {
213 return data, nil
214 }
215 data[0] |= byte(h.uncompressed>>16) &^ hLRND
216
217 putUint16BE(data[3:5], h.compressed)
218 if h.ctype <= cLR {
219 return data, nil
220 }
221
222 data[5] = h.props.Code()
223 return data, nil
224}
225
226// readChunkHeader reads the chunk header from the IO reader.
227func readChunkHeader(r io.Reader) (h *chunkHeader, err error) {
228 p := make([]byte, 1, 6)
229 if _, err = io.ReadFull(r, p); err != nil {
230 return
231 }
232 c, err := headerChunkType(p[0])
233 if err != nil {
234 return
235 }
236 p = p[:headerLen(c)]
237 if _, err = io.ReadFull(r, p[1:]); err != nil {
238 return
239 }
240 h = new(chunkHeader)
241 if err = h.UnmarshalBinary(p); err != nil {
242 return nil, err
243 }
244 return h, nil
245}
246
247// uint16BE converts a big-endian uint16 representation to an uint16
248// value.
249func uint16BE(p []byte) uint16 {
250 return uint16(p[0])<<8 | uint16(p[1])
251}
252
253// putUint16BE puts the big-endian uint16 presentation into the given
254// slice.
255func putUint16BE(p []byte, x uint16) {
256 p[0] = byte(x >> 8)
257 p[1] = byte(x)
258}
259
260// chunkState is used to manage the state of the chunks
261type chunkState byte
262
263// start and stop define the initial and terminating state of the chunk
264// state
265const (
266 start chunkState = 'S'
267 stop = 'T'
268)
269
270// errors for the chunk state handling
271var (
272 errChunkType = errors.New("lzma: unexpected chunk type")
273 errState = errors.New("lzma: wrong chunk state")
274)
275
276// next transitions state based on chunk type input
277func (c *chunkState) next(ctype chunkType) error {
278 switch *c {
279 // start state
280 case 'S':
281 switch ctype {
282 case cEOS:
283 *c = 'T'
284 case cUD:
285 *c = 'R'
286 case cLRND:
287 *c = 'L'
288 default:
289 return errChunkType
290 }
291 // normal LZMA mode
292 case 'L':
293 switch ctype {
294 case cEOS:
295 *c = 'T'
296 case cUD:
297 *c = 'R'
298 case cU:
299 *c = 'U'
300 case cL, cLR, cLRN, cLRND:
301 break
302 default:
303 return errChunkType
304 }
305 // reset required
306 case 'R':
307 switch ctype {
308 case cEOS:
309 *c = 'T'
310 case cUD, cU:
311 break
312 case cLRN, cLRND:
313 *c = 'L'
314 default:
315 return errChunkType
316 }
317 // uncompressed
318 case 'U':
319 switch ctype {
320 case cEOS:
321 *c = 'T'
322 case cUD:
323 *c = 'R'
324 case cU:
325 break
326 case cL, cLR, cLRN, cLRND:
327 *c = 'L'
328 default:
329 return errChunkType
330 }
331 // terminal state
332 case 'T':
333 return errChunkType
334 default:
335 return errState
336 }
337 return nil
338}
339
340// defaultChunkType returns the default chunk type for each chunk state.
341func (c chunkState) defaultChunkType() chunkType {
342 switch c {
343 case 'S':
344 return cLRND
345 case 'L', 'U':
346 return cL
347 case 'R':
348 return cLRN
349 default:
350 // no error
351 return cEOS
352 }
353}
354
355// maxDictCap defines the maximum dictionary capacity supported by the
356// LZMA2 dictionary capacity encoding.
357const maxDictCap = 1<<32 - 1
358
359// maxDictCapCode defines the maximum dictionary capacity code.
360const maxDictCapCode = 40
361
362// The function decodes the dictionary capacity byte, but doesn't change
363// for the correct range of the given byte.
364func decodeDictCap(c byte) int64 {
365 return (2 | int64(c)&1) << (11 + (c>>1)&0x1f)
366}
367
368// DecodeDictCap decodes the encoded dictionary capacity. The function
369// returns an error if the code is out of range.
370func DecodeDictCap(c byte) (n int64, err error) {
371 if c >= maxDictCapCode {
372 if c == maxDictCapCode {
373 return maxDictCap, nil
374 }
375 return 0, errors.New("lzma: invalid dictionary size code")
376 }
377 return decodeDictCap(c), nil
378}
379
380// EncodeDictCap encodes a dictionary capacity. The function returns the
381// code for the capacity that is greater or equal n. If n exceeds the
382// maximum support dictionary capacity, the maximum value is returned.
383func EncodeDictCap(n int64) byte {
384 a, b := byte(0), byte(40)
385 for a < b {
386 c := a + (b-a)>>1
387 m := decodeDictCap(c)
388 if n <= m {
389 if n == m {
390 return c
391 }
392 b = c
393 } else {
394 a = c + 1
395 }
396 }
397 return a
398}
diff --git a/vendor/github.com/ulikunitz/xz/lzma/lengthcodec.go b/vendor/github.com/ulikunitz/xz/lzma/lengthcodec.go
new file mode 100644
index 0000000..e517730
--- /dev/null
+++ b/vendor/github.com/ulikunitz/xz/lzma/lengthcodec.go
@@ -0,0 +1,129 @@
1// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package lzma
6
7import "errors"
8
9// maxPosBits defines the number of bits of the position value that are used to
10// to compute the posState value. The value is used to select the tree codec
11// for length encoding and decoding.
12const maxPosBits = 4
13
14// minMatchLen and maxMatchLen give the minimum and maximum values for
15// encoding and decoding length values. minMatchLen is also used as base
16// for the encoded length values.
17const (
18 minMatchLen = 2
19 maxMatchLen = minMatchLen + 16 + 256 - 1
20)
21
22// lengthCodec support the encoding of the length value.
23type lengthCodec struct {
24 choice [2]prob
25 low [1 << maxPosBits]treeCodec
26 mid [1 << maxPosBits]treeCodec
27 high treeCodec
28}
29
30// deepcopy initializes the lc value as deep copy of the source value.
31func (lc *lengthCodec) deepcopy(src *lengthCodec) {
32 if lc == src {
33 return
34 }
35 lc.choice = src.choice
36 for i := range lc.low {
37 lc.low[i].deepcopy(&src.low[i])
38 }
39 for i := range lc.mid {
40 lc.mid[i].deepcopy(&src.mid[i])
41 }
42 lc.high.deepcopy(&src.high)
43}
44
45// init initializes a new length codec.
46func (lc *lengthCodec) init() {
47 for i := range lc.choice {
48 lc.choice[i] = probInit
49 }
50 for i := range lc.low {
51 lc.low[i] = makeTreeCodec(3)
52 }
53 for i := range lc.mid {
54 lc.mid[i] = makeTreeCodec(3)
55 }
56 lc.high = makeTreeCodec(8)
57}
58
59// lBits gives the number of bits used for the encoding of the l value
60// provided to the range encoder.
61func lBits(l uint32) int {
62 switch {
63 case l < 8:
64 return 4
65 case l < 16:
66 return 5
67 default:
68 return 10
69 }
70}
71
72// Encode encodes the length offset. The length offset l can be compute by
73// subtracting minMatchLen (2) from the actual length.
74//
75// l = length - minMatchLen
76//
77func (lc *lengthCodec) Encode(e *rangeEncoder, l uint32, posState uint32,
78) (err error) {
79 if l > maxMatchLen-minMatchLen {
80 return errors.New("lengthCodec.Encode: l out of range")
81 }
82 if l < 8 {
83 if err = lc.choice[0].Encode(e, 0); err != nil {
84 return
85 }
86 return lc.low[posState].Encode(e, l)
87 }
88 if err = lc.choice[0].Encode(e, 1); err != nil {
89 return
90 }
91 if l < 16 {
92 if err = lc.choice[1].Encode(e, 0); err != nil {
93 return
94 }
95 return lc.mid[posState].Encode(e, l-8)
96 }
97 if err = lc.choice[1].Encode(e, 1); err != nil {
98 return
99 }
100 if err = lc.high.Encode(e, l-16); err != nil {
101 return
102 }
103 return nil
104}
105
106// Decode reads the length offset. Add minMatchLen to compute the actual length
107// to the length offset l.
108func (lc *lengthCodec) Decode(d *rangeDecoder, posState uint32,
109) (l uint32, err error) {
110 var b uint32
111 if b, err = lc.choice[0].Decode(d); err != nil {
112 return
113 }
114 if b == 0 {
115 l, err = lc.low[posState].Decode(d)
116 return
117 }
118 if b, err = lc.choice[1].Decode(d); err != nil {
119 return
120 }
121 if b == 0 {
122 l, err = lc.mid[posState].Decode(d)
123 l += 8
124 return
125 }
126 l, err = lc.high.Decode(d)
127 l += 16
128 return
129}
diff --git a/vendor/github.com/ulikunitz/xz/lzma/literalcodec.go b/vendor/github.com/ulikunitz/xz/lzma/literalcodec.go
new file mode 100644
index 0000000..c949d6e
--- /dev/null
+++ b/vendor/github.com/ulikunitz/xz/lzma/literalcodec.go
@@ -0,0 +1,132 @@
1// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package lzma
6
7// literalCodec supports the encoding of literal. It provides 768 probability
8// values per literal state. The upper 512 probabilities are used with the
9// context of a match bit.
10type literalCodec struct {
11 probs []prob
12}
13
14// deepcopy initializes literal codec c as a deep copy of the source.
15func (c *literalCodec) deepcopy(src *literalCodec) {
16 if c == src {
17 return
18 }
19 c.probs = make([]prob, len(src.probs))
20 copy(c.probs, src.probs)
21}
22
23// init initializes the literal codec.
24func (c *literalCodec) init(lc, lp int) {
25 switch {
26 case !(minLC <= lc && lc <= maxLC):
27 panic("lc out of range")
28 case !(minLP <= lp && lp <= maxLP):
29 panic("lp out of range")
30 }
31 c.probs = make([]prob, 0x300<<uint(lc+lp))
32 for i := range c.probs {
33 c.probs[i] = probInit
34 }
35}
36
37// Encode encodes the byte s using a range encoder as well as the current LZMA
38// encoder state, a match byte and the literal state.
39func (c *literalCodec) Encode(e *rangeEncoder, s byte,
40 state uint32, match byte, litState uint32,
41) (err error) {
42 k := litState * 0x300
43 probs := c.probs[k : k+0x300]
44 symbol := uint32(1)
45 r := uint32(s)
46 if state >= 7 {
47 m := uint32(match)
48 for {
49 matchBit := (m >> 7) & 1
50 m <<= 1
51 bit := (r >> 7) & 1
52 r <<= 1
53 i := ((1 + matchBit) << 8) | symbol
54 if err = probs[i].Encode(e, bit); err != nil {
55 return
56 }
57 symbol = (symbol << 1) | bit
58 if matchBit != bit {
59 break
60 }
61 if symbol >= 0x100 {
62 break
63 }
64 }
65 }
66 for symbol < 0x100 {
67 bit := (r >> 7) & 1
68 r <<= 1
69 if err = probs[symbol].Encode(e, bit); err != nil {
70 return
71 }
72 symbol = (symbol << 1) | bit
73 }
74 return nil
75}
76
77// Decode decodes a literal byte using the range decoder as well as the LZMA
78// state, a match byte, and the literal state.
79func (c *literalCodec) Decode(d *rangeDecoder,
80 state uint32, match byte, litState uint32,
81) (s byte, err error) {
82 k := litState * 0x300
83 probs := c.probs[k : k+0x300]
84 symbol := uint32(1)
85 if state >= 7 {
86 m := uint32(match)
87 for {
88 matchBit := (m >> 7) & 1
89 m <<= 1
90 i := ((1 + matchBit) << 8) | symbol
91 bit, err := d.DecodeBit(&probs[i])
92 if err != nil {
93 return 0, err
94 }
95 symbol = (symbol << 1) | bit
96 if matchBit != bit {
97 break
98 }
99 if symbol >= 0x100 {
100 break
101 }
102 }
103 }
104 for symbol < 0x100 {
105 bit, err := d.DecodeBit(&probs[symbol])
106 if err != nil {
107 return 0, err
108 }
109 symbol = (symbol << 1) | bit
110 }
111 s = byte(symbol - 0x100)
112 return s, nil
113}
114
115// minLC and maxLC define the range for LC values.
116const (
117 minLC = 0
118 maxLC = 8
119)
120
121// minLC and maxLC define the range for LP values.
122const (
123 minLP = 0
124 maxLP = 4
125)
126
127// minState and maxState define a range for the state values stored in
128// the State values.
129const (
130 minState = 0
131 maxState = 11
132)
diff --git a/vendor/github.com/ulikunitz/xz/lzma/matchalgorithm.go b/vendor/github.com/ulikunitz/xz/lzma/matchalgorithm.go
new file mode 100644
index 0000000..4a244eb
--- /dev/null
+++ b/vendor/github.com/ulikunitz/xz/lzma/matchalgorithm.go
@@ -0,0 +1,52 @@
1// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package lzma
6
7import "errors"
8
9// MatchAlgorithm identifies an algorithm to find matches in the
10// dictionary.
11type MatchAlgorithm byte
12
13// Supported matcher algorithms.
14const (
15 HashTable4 MatchAlgorithm = iota
16 BinaryTree
17)
18
19// maStrings are used by the String method.
20var maStrings = map[MatchAlgorithm]string{
21 HashTable4: "HashTable4",
22 BinaryTree: "BinaryTree",
23}
24
25// String returns a string representation of the Matcher.
26func (a MatchAlgorithm) String() string {
27 if s, ok := maStrings[a]; ok {
28 return s
29 }
30 return "unknown"
31}
32
33var errUnsupportedMatchAlgorithm = errors.New(
34 "lzma: unsupported match algorithm value")
35
36// verify checks whether the matcher value is supported.
37func (a MatchAlgorithm) verify() error {
38 if _, ok := maStrings[a]; !ok {
39 return errUnsupportedMatchAlgorithm
40 }
41 return nil
42}
43
44func (a MatchAlgorithm) new(dictCap int) (m matcher, err error) {
45 switch a {
46 case HashTable4:
47 return newHashTable(dictCap, 4)
48 case BinaryTree:
49 return newBinTree(dictCap)
50 }
51 return nil, errUnsupportedMatchAlgorithm
52}
diff --git a/vendor/github.com/ulikunitz/xz/lzma/operation.go b/vendor/github.com/ulikunitz/xz/lzma/operation.go
new file mode 100644
index 0000000..733bb99
--- /dev/null
+++ b/vendor/github.com/ulikunitz/xz/lzma/operation.go
@@ -0,0 +1,80 @@
1// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package lzma
6
7import (
8 "errors"
9 "fmt"
10 "unicode"
11)
12
13// operation represents an operation on the dictionary during encoding or
14// decoding.
15type operation interface {
16 Len() int
17}
18
19// rep represents a repetition at the given distance and the given length
20type match struct {
21 // supports all possible distance values, including the eos marker
22 distance int64
23 // length
24 n int
25}
26
27// verify checks whether the match is valid. If that is not the case an
28// error is returned.
29func (m match) verify() error {
30 if !(minDistance <= m.distance && m.distance <= maxDistance) {
31 return errors.New("distance out of range")
32 }
33 if !(1 <= m.n && m.n <= maxMatchLen) {
34 return errors.New("length out of range")
35 }
36 return nil
37}
38
39// l return the l-value for the match, which is the difference of length
40// n and 2.
41func (m match) l() uint32 {
42 return uint32(m.n - minMatchLen)
43}
44
45// dist returns the dist value for the match, which is one less of the
46// distance stored in the match.
47func (m match) dist() uint32 {
48 return uint32(m.distance - minDistance)
49}
50
51// Len returns the number of bytes matched.
52func (m match) Len() int {
53 return m.n
54}
55
56// String returns a string representation for the repetition.
57func (m match) String() string {
58 return fmt.Sprintf("M{%d,%d}", m.distance, m.n)
59}
60
61// lit represents a single byte literal.
62type lit struct {
63 b byte
64}
65
66// Len returns 1 for the single byte literal.
67func (l lit) Len() int {
68 return 1
69}
70
71// String returns a string representation for the literal.
72func (l lit) String() string {
73 var c byte
74 if unicode.IsPrint(rune(l.b)) {
75 c = l.b
76 } else {
77 c = '.'
78 }
79 return fmt.Sprintf("L{%c/%02x}", c, l.b)
80}
diff --git a/vendor/github.com/ulikunitz/xz/lzma/prob.go b/vendor/github.com/ulikunitz/xz/lzma/prob.go
new file mode 100644
index 0000000..24d50ec
--- /dev/null
+++ b/vendor/github.com/ulikunitz/xz/lzma/prob.go
@@ -0,0 +1,53 @@
1// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package lzma
6
7// movebits defines the number of bits used for the updates of probability
8// values.
9const movebits = 5
10
11// probbits defines the number of bits of a probability value.
12const probbits = 11
13
14// probInit defines 0.5 as initial value for prob values.
15const probInit prob = 1 << (probbits - 1)
16
17// Type prob represents probabilities. The type can also be used to encode and
18// decode single bits.
19type prob uint16
20
21// Dec decreases the probability. The decrease is proportional to the
22// probability value.
23func (p *prob) dec() {
24 *p -= *p >> movebits
25}
26
27// Inc increases the probability. The Increase is proportional to the
28// difference of 1 and the probability value.
29func (p *prob) inc() {
30 *p += ((1 << probbits) - *p) >> movebits
31}
32
33// Computes the new bound for a given range using the probability value.
34func (p prob) bound(r uint32) uint32 {
35 return (r >> probbits) * uint32(p)
36}
37
38// Bits returns 1. One is the number of bits that can be encoded or decoded
39// with a single prob value.
40func (p prob) Bits() int {
41 return 1
42}
43
44// Encode encodes the least-significant bit of v. Note that the p value will be
45// changed.
46func (p *prob) Encode(e *rangeEncoder, v uint32) error {
47 return e.EncodeBit(v, p)
48}
49
50// Decode decodes a single bit. Note that the p value will change.
51func (p *prob) Decode(d *rangeDecoder) (v uint32, err error) {
52 return d.DecodeBit(p)
53}
diff --git a/vendor/github.com/ulikunitz/xz/lzma/properties.go b/vendor/github.com/ulikunitz/xz/lzma/properties.go
new file mode 100644
index 0000000..23418e2
--- /dev/null
+++ b/vendor/github.com/ulikunitz/xz/lzma/properties.go
@@ -0,0 +1,69 @@
1// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package lzma
6
7import (
8 "errors"
9 "fmt"
10)
11
12// maximum and minimum values for the LZMA properties.
13const (
14 minPB = 0
15 maxPB = 4
16)
17
18// maxPropertyCode is the possible maximum of a properties code byte.
19const maxPropertyCode = (maxPB+1)*(maxLP+1)*(maxLC+1) - 1
20
21// Properties contains the parameters LC, LP and PB. The parameter LC
22// defines the number of literal context bits; parameter LP the number
23// of literal position bits and PB the number of position bits.
24type Properties struct {
25 LC int
26 LP int
27 PB int
28}
29
30// String returns the properties in a string representation.
31func (p *Properties) String() string {
32 return fmt.Sprintf("LC %d LP %d PB %d", p.LC, p.LP, p.PB)
33}
34
35// PropertiesForCode converts a properties code byte into a Properties value.
36func PropertiesForCode(code byte) (p Properties, err error) {
37 if code > maxPropertyCode {
38 return p, errors.New("lzma: invalid properties code")
39 }
40 p.LC = int(code % 9)
41 code /= 9
42 p.LP = int(code % 5)
43 code /= 5
44 p.PB = int(code % 5)
45 return p, err
46}
47
48// verify checks the properties for correctness.
49func (p *Properties) verify() error {
50 if p == nil {
51 return errors.New("lzma: properties are nil")
52 }
53 if !(minLC <= p.LC && p.LC <= maxLC) {
54 return errors.New("lzma: lc out of range")
55 }
56 if !(minLP <= p.LP && p.LP <= maxLP) {
57 return errors.New("lzma: lp out of range")
58 }
59 if !(minPB <= p.PB && p.PB <= maxPB) {
60 return errors.New("lzma: pb out of range")
61 }
62 return nil
63}
64
65// Code converts the properties to a byte. The function assumes that
66// the properties components are all in range.
67func (p Properties) Code() byte {
68 return byte((p.PB*5+p.LP)*9 + p.LC)
69}
diff --git a/vendor/github.com/ulikunitz/xz/lzma/rangecodec.go b/vendor/github.com/ulikunitz/xz/lzma/rangecodec.go
new file mode 100644
index 0000000..6361c5e
--- /dev/null
+++ b/vendor/github.com/ulikunitz/xz/lzma/rangecodec.go
@@ -0,0 +1,248 @@
1// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package lzma
6
7import (
8 "errors"
9 "io"
10)
11
12// rangeEncoder implements range encoding of single bits. The low value can
13// overflow therefore we need uint64. The cache value is used to handle
14// overflows.
15type rangeEncoder struct {
16 lbw *LimitedByteWriter
17 nrange uint32
18 low uint64
19 cacheLen int64
20 cache byte
21}
22
23// maxInt64 provides the maximal value of the int64 type
24const maxInt64 = 1<<63 - 1
25
26// newRangeEncoder creates a new range encoder.
27func newRangeEncoder(bw io.ByteWriter) (re *rangeEncoder, err error) {
28 lbw, ok := bw.(*LimitedByteWriter)
29 if !ok {
30 lbw = &LimitedByteWriter{BW: bw, N: maxInt64}
31 }
32 return &rangeEncoder{
33 lbw: lbw,
34 nrange: 0xffffffff,
35 cacheLen: 1}, nil
36}
37
38// Available returns the number of bytes that still can be written. The
39// method takes the bytes that will be currently written by Close into
40// account.
41func (e *rangeEncoder) Available() int64 {
42 return e.lbw.N - (e.cacheLen + 4)
43}
44
45// writeByte writes a single byte to the underlying writer. An error is
46// returned if the limit is reached. The written byte will be counted if
47// the underlying writer doesn't return an error.
48func (e *rangeEncoder) writeByte(c byte) error {
49 if e.Available() < 1 {
50 return ErrLimit
51 }
52 return e.lbw.WriteByte(c)
53}
54
55// DirectEncodeBit encodes the least-significant bit of b with probability 1/2.
56func (e *rangeEncoder) DirectEncodeBit(b uint32) error {
57 e.nrange >>= 1
58 e.low += uint64(e.nrange) & (0 - (uint64(b) & 1))
59
60 // normalize
61 const top = 1 << 24
62 if e.nrange >= top {
63 return nil
64 }
65 e.nrange <<= 8
66 return e.shiftLow()
67}
68
69// EncodeBit encodes the least significant bit of b. The p value will be
70// updated by the function depending on the bit encoded.
71func (e *rangeEncoder) EncodeBit(b uint32, p *prob) error {
72 bound := p.bound(e.nrange)
73 if b&1 == 0 {
74 e.nrange = bound
75 p.inc()
76 } else {
77 e.low += uint64(bound)
78 e.nrange -= bound
79 p.dec()
80 }
81
82 // normalize
83 const top = 1 << 24
84 if e.nrange >= top {
85 return nil
86 }
87 e.nrange <<= 8
88 return e.shiftLow()
89}
90
91// Close writes a complete copy of the low value.
92func (e *rangeEncoder) Close() error {
93 for i := 0; i < 5; i++ {
94 if err := e.shiftLow(); err != nil {
95 return err
96 }
97 }
98 return nil
99}
100
101// shiftLow shifts the low value for 8 bit. The shifted byte is written into
102// the byte writer. The cache value is used to handle overflows.
103func (e *rangeEncoder) shiftLow() error {
104 if uint32(e.low) < 0xff000000 || (e.low>>32) != 0 {
105 tmp := e.cache
106 for {
107 err := e.writeByte(tmp + byte(e.low>>32))
108 if err != nil {
109 return err
110 }
111 tmp = 0xff
112 e.cacheLen--
113 if e.cacheLen <= 0 {
114 if e.cacheLen < 0 {
115 panic("negative cacheLen")
116 }
117 break
118 }
119 }
120 e.cache = byte(uint32(e.low) >> 24)
121 }
122 e.cacheLen++
123 e.low = uint64(uint32(e.low) << 8)
124 return nil
125}
126
127// rangeDecoder decodes single bits of the range encoding stream.
128type rangeDecoder struct {
129 br io.ByteReader
130 nrange uint32
131 code uint32
132}
133
134// init initializes the range decoder, by reading from the byte reader.
135func (d *rangeDecoder) init() error {
136 d.nrange = 0xffffffff
137 d.code = 0
138
139 b, err := d.br.ReadByte()
140 if err != nil {
141 return err
142 }
143 if b != 0 {
144 return errors.New("newRangeDecoder: first byte not zero")
145 }
146
147 for i := 0; i < 4; i++ {
148 if err = d.updateCode(); err != nil {
149 return err
150 }
151 }
152
153 if d.code >= d.nrange {
154 return errors.New("newRangeDecoder: d.code >= d.nrange")
155 }
156
157 return nil
158}
159
160// newRangeDecoder initializes a range decoder. It reads five bytes from the
161// reader and therefore may return an error.
162func newRangeDecoder(br io.ByteReader) (d *rangeDecoder, err error) {
163 d = &rangeDecoder{br: br, nrange: 0xffffffff}
164
165 b, err := d.br.ReadByte()
166 if err != nil {
167 return nil, err
168 }
169 if b != 0 {
170 return nil, errors.New("newRangeDecoder: first byte not zero")
171 }
172
173 for i := 0; i < 4; i++ {
174 if err = d.updateCode(); err != nil {
175 return nil, err
176 }
177 }
178
179 if d.code >= d.nrange {
180 return nil, errors.New("newRangeDecoder: d.code >= d.nrange")
181 }
182
183 return d, nil
184}
185
186// possiblyAtEnd checks whether the decoder may be at the end of the stream.
187func (d *rangeDecoder) possiblyAtEnd() bool {
188 return d.code == 0
189}
190
191// DirectDecodeBit decodes a bit with probability 1/2. The return value b will
192// contain the bit at the least-significant position. All other bits will be
193// zero.
194func (d *rangeDecoder) DirectDecodeBit() (b uint32, err error) {
195 d.nrange >>= 1
196 d.code -= d.nrange
197 t := 0 - (d.code >> 31)
198 d.code += d.nrange & t
199 b = (t + 1) & 1
200
201 // d.code will stay less then d.nrange
202
203 // normalize
204 // assume d.code < d.nrange
205 const top = 1 << 24
206 if d.nrange >= top {
207 return b, nil
208 }
209 d.nrange <<= 8
210 // d.code < d.nrange will be maintained
211 return b, d.updateCode()
212}
213
214// decodeBit decodes a single bit. The bit will be returned at the
215// least-significant position. All other bits will be zero. The probability
216// value will be updated.
217func (d *rangeDecoder) DecodeBit(p *prob) (b uint32, err error) {
218 bound := p.bound(d.nrange)
219 if d.code < bound {
220 d.nrange = bound
221 p.inc()
222 b = 0
223 } else {
224 d.code -= bound
225 d.nrange -= bound
226 p.dec()
227 b = 1
228 }
229 // normalize
230 // assume d.code < d.nrange
231 const top = 1 << 24
232 if d.nrange >= top {
233 return b, nil
234 }
235 d.nrange <<= 8
236 // d.code < d.nrange will be maintained
237 return b, d.updateCode()
238}
239
240// updateCode reads a new byte into the code.
241func (d *rangeDecoder) updateCode() error {
242 b, err := d.br.ReadByte()
243 if err != nil {
244 return err
245 }
246 d.code = (d.code << 8) | uint32(b)
247 return nil
248}
diff --git a/vendor/github.com/ulikunitz/xz/lzma/reader.go b/vendor/github.com/ulikunitz/xz/lzma/reader.go
new file mode 100644
index 0000000..2ef3dca
--- /dev/null
+++ b/vendor/github.com/ulikunitz/xz/lzma/reader.go
@@ -0,0 +1,100 @@
1// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5// Package lzma supports the decoding and encoding of LZMA streams.
6// Reader and Writer support the classic LZMA format. Reader2 and
7// Writer2 support the decoding and encoding of LZMA2 streams.
8//
9// The package is written completely in Go and doesn't rely on any external
10// library.
11package lzma
12
13import (
14 "errors"
15 "io"
16)
17
18// ReaderConfig stores the parameters for the reader of the classic LZMA
19// format.
20type ReaderConfig struct {
21 DictCap int
22}
23
24// fill converts the zero values of the configuration to the default values.
25func (c *ReaderConfig) fill() {
26 if c.DictCap == 0 {
27 c.DictCap = 8 * 1024 * 1024
28 }
29}
30
31// Verify checks the reader configuration for errors. Zero values will
32// be replaced by default values.
33func (c *ReaderConfig) Verify() error {
34 c.fill()
35 if !(MinDictCap <= c.DictCap && int64(c.DictCap) <= MaxDictCap) {
36 return errors.New("lzma: dictionary capacity is out of range")
37 }
38 return nil
39}
40
41// Reader provides a reader for LZMA files or streams.
42type Reader struct {
43 lzma io.Reader
44 h header
45 d *decoder
46}
47
48// NewReader creates a new reader for an LZMA stream using the classic
49// format. NewReader reads and checks the header of the LZMA stream.
50func NewReader(lzma io.Reader) (r *Reader, err error) {
51 return ReaderConfig{}.NewReader(lzma)
52}
53
54// NewReader creates a new reader for an LZMA stream in the classic
55// format. The function reads and verifies the the header of the LZMA
56// stream.
57func (c ReaderConfig) NewReader(lzma io.Reader) (r *Reader, err error) {
58 if err = c.Verify(); err != nil {
59 return nil, err
60 }
61 data := make([]byte, HeaderLen)
62 if _, err := io.ReadFull(lzma, data); err != nil {
63 if err == io.EOF {
64 return nil, errors.New("lzma: unexpected EOF")
65 }
66 return nil, err
67 }
68 r = &Reader{lzma: lzma}
69 if err = r.h.unmarshalBinary(data); err != nil {
70 return nil, err
71 }
72 if r.h.dictCap < MinDictCap {
73 return nil, errors.New("lzma: dictionary capacity too small")
74 }
75 dictCap := r.h.dictCap
76 if c.DictCap > dictCap {
77 dictCap = c.DictCap
78 }
79
80 state := newState(r.h.properties)
81 dict, err := newDecoderDict(dictCap)
82 if err != nil {
83 return nil, err
84 }
85 r.d, err = newDecoder(ByteReader(lzma), state, dict, r.h.size)
86 if err != nil {
87 return nil, err
88 }
89 return r, nil
90}
91
92// EOSMarker indicates that an EOS marker has been encountered.
93func (r *Reader) EOSMarker() bool {
94 return r.d.eosMarker
95}
96
97// Read returns uncompressed data.
98func (r *Reader) Read(p []byte) (n int, err error) {
99 return r.d.Read(p)
100}
diff --git a/vendor/github.com/ulikunitz/xz/lzma/reader2.go b/vendor/github.com/ulikunitz/xz/lzma/reader2.go
new file mode 100644
index 0000000..a55cfaa
--- /dev/null
+++ b/vendor/github.com/ulikunitz/xz/lzma/reader2.go
@@ -0,0 +1,232 @@
1// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package lzma
6
7import (
8 "errors"
9 "io"
10
11 "github.com/ulikunitz/xz/internal/xlog"
12)
13
14// Reader2Config stores the parameters for the LZMA2 reader.
15// format.
16type Reader2Config struct {
17 DictCap int
18}
19
20// fill converts the zero values of the configuration to the default values.
21func (c *Reader2Config) fill() {
22 if c.DictCap == 0 {
23 c.DictCap = 8 * 1024 * 1024
24 }
25}
26
27// Verify checks the reader configuration for errors. Zero configuration values
28// will be replaced by default values.
29func (c *Reader2Config) Verify() error {
30 c.fill()
31 if !(MinDictCap <= c.DictCap && int64(c.DictCap) <= MaxDictCap) {
32 return errors.New("lzma: dictionary capacity is out of range")
33 }
34 return nil
35}
36
37// Reader2 supports the reading of LZMA2 chunk sequences. Note that the
38// first chunk should have a dictionary reset and the first compressed
39// chunk a properties reset. The chunk sequence may not be terminated by
40// an end-of-stream chunk.
41type Reader2 struct {
42 r io.Reader
43 err error
44
45 dict *decoderDict
46 ur *uncompressedReader
47 decoder *decoder
48 chunkReader io.Reader
49
50 cstate chunkState
51 ctype chunkType
52}
53
54// NewReader2 creates a reader for an LZMA2 chunk sequence.
55func NewReader2(lzma2 io.Reader) (r *Reader2, err error) {
56 return Reader2Config{}.NewReader2(lzma2)
57}
58
59// NewReader2 creates an LZMA2 reader using the given configuration.
60func (c Reader2Config) NewReader2(lzma2 io.Reader) (r *Reader2, err error) {
61 if err = c.Verify(); err != nil {
62 return nil, err
63 }
64 r = &Reader2{r: lzma2, cstate: start}
65 r.dict, err = newDecoderDict(c.DictCap)
66 if err != nil {
67 return nil, err
68 }
69 if err = r.startChunk(); err != nil {
70 r.err = err
71 }
72 return r, nil
73}
74
75// uncompressed tests whether the chunk type specifies an uncompressed
76// chunk.
77func uncompressed(ctype chunkType) bool {
78 return ctype == cU || ctype == cUD
79}
80
81// startChunk parses a new chunk.
82func (r *Reader2) startChunk() error {
83 r.chunkReader = nil
84 header, err := readChunkHeader(r.r)
85 if err != nil {
86 if err == io.EOF {
87 err = io.ErrUnexpectedEOF
88 }
89 return err
90 }
91 xlog.Debugf("chunk header %v", header)
92 if err = r.cstate.next(header.ctype); err != nil {
93 return err
94 }
95 if r.cstate == stop {
96 return io.EOF
97 }
98 if header.ctype == cUD || header.ctype == cLRND {
99 r.dict.Reset()
100 }
101 size := int64(header.uncompressed) + 1
102 if uncompressed(header.ctype) {
103 if r.ur != nil {
104 r.ur.Reopen(r.r, size)
105 } else {
106 r.ur = newUncompressedReader(r.r, r.dict, size)
107 }
108 r.chunkReader = r.ur
109 return nil
110 }
111 br := ByteReader(io.LimitReader(r.r, int64(header.compressed)+1))
112 if r.decoder == nil {
113 state := newState(header.props)
114 r.decoder, err = newDecoder(br, state, r.dict, size)
115 if err != nil {
116 return err
117 }
118 r.chunkReader = r.decoder
119 return nil
120 }
121 switch header.ctype {
122 case cLR:
123 r.decoder.State.Reset()
124 case cLRN, cLRND:
125 r.decoder.State = newState(header.props)
126 }
127 err = r.decoder.Reopen(br, size)
128 if err != nil {
129 return err
130 }
131 r.chunkReader = r.decoder
132 return nil
133}
134
135// Read reads data from the LZMA2 chunk sequence.
136func (r *Reader2) Read(p []byte) (n int, err error) {
137 if r.err != nil {
138 return 0, r.err
139 }
140 for n < len(p) {
141 var k int
142 k, err = r.chunkReader.Read(p[n:])
143 n += k
144 if err != nil {
145 if err == io.EOF {
146 err = r.startChunk()
147 if err == nil {
148 continue
149 }
150 }
151 r.err = err
152 return n, err
153 }
154 if k == 0 {
155 r.err = errors.New("lzma: Reader2 doesn't get data")
156 return n, r.err
157 }
158 }
159 return n, nil
160}
161
162// EOS returns whether the LZMA2 stream has been terminated by an
163// end-of-stream chunk.
164func (r *Reader2) EOS() bool {
165 return r.cstate == stop
166}
167
168// uncompressedReader is used to read uncompressed chunks.
169type uncompressedReader struct {
170 lr io.LimitedReader
171 Dict *decoderDict
172 eof bool
173 err error
174}
175
176// newUncompressedReader initializes a new uncompressedReader.
177func newUncompressedReader(r io.Reader, dict *decoderDict, size int64) *uncompressedReader {
178 ur := &uncompressedReader{
179 lr: io.LimitedReader{R: r, N: size},
180 Dict: dict,
181 }
182 return ur
183}
184
185// Reopen reinitializes an uncompressed reader.
186func (ur *uncompressedReader) Reopen(r io.Reader, size int64) {
187 ur.err = nil
188 ur.eof = false
189 ur.lr = io.LimitedReader{R: r, N: size}
190}
191
192// fill reads uncompressed data into the dictionary.
193func (ur *uncompressedReader) fill() error {
194 if !ur.eof {
195 n, err := io.CopyN(ur.Dict, &ur.lr, int64(ur.Dict.Available()))
196 if err != io.EOF {
197 return err
198 }
199 ur.eof = true
200 if n > 0 {
201 return nil
202 }
203 }
204 if ur.lr.N != 0 {
205 return io.ErrUnexpectedEOF
206 }
207 return io.EOF
208}
209
210// Read reads uncompressed data from the limited reader.
211func (ur *uncompressedReader) Read(p []byte) (n int, err error) {
212 if ur.err != nil {
213 return 0, ur.err
214 }
215 for {
216 var k int
217 k, err = ur.Dict.Read(p[n:])
218 n += k
219 if n >= len(p) {
220 return n, nil
221 }
222 if err != nil {
223 break
224 }
225 err = ur.fill()
226 if err != nil {
227 break
228 }
229 }
230 ur.err = err
231 return n, err
232}
diff --git a/vendor/github.com/ulikunitz/xz/lzma/state.go b/vendor/github.com/ulikunitz/xz/lzma/state.go
new file mode 100644
index 0000000..5023510
--- /dev/null
+++ b/vendor/github.com/ulikunitz/xz/lzma/state.go
@@ -0,0 +1,151 @@
1// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package lzma
6
7// states defines the overall state count
8const states = 12
9
10// State maintains the full state of the operation encoding or decoding
11// process.
12type state struct {
13 rep [4]uint32
14 isMatch [states << maxPosBits]prob
15 isRepG0Long [states << maxPosBits]prob
16 isRep [states]prob
17 isRepG0 [states]prob
18 isRepG1 [states]prob
19 isRepG2 [states]prob
20 litCodec literalCodec
21 lenCodec lengthCodec
22 repLenCodec lengthCodec
23 distCodec distCodec
24 state uint32
25 posBitMask uint32
26 Properties Properties
27}
28
29// initProbSlice initializes a slice of probabilities.
30func initProbSlice(p []prob) {
31 for i := range p {
32 p[i] = probInit
33 }
34}
35
36// Reset sets all state information to the original values.
37func (s *state) Reset() {
38 p := s.Properties
39 *s = state{
40 Properties: p,
41 // dict: s.dict,
42 posBitMask: (uint32(1) << uint(p.PB)) - 1,
43 }
44 initProbSlice(s.isMatch[:])
45 initProbSlice(s.isRep[:])
46 initProbSlice(s.isRepG0[:])
47 initProbSlice(s.isRepG1[:])
48 initProbSlice(s.isRepG2[:])
49 initProbSlice(s.isRepG0Long[:])
50 s.litCodec.init(p.LC, p.LP)
51 s.lenCodec.init()
52 s.repLenCodec.init()
53 s.distCodec.init()
54}
55
56// initState initializes the state.
57func initState(s *state, p Properties) {
58 *s = state{Properties: p}
59 s.Reset()
60}
61
62// newState creates a new state from the give Properties.
63func newState(p Properties) *state {
64 s := &state{Properties: p}
65 s.Reset()
66 return s
67}
68
69// deepcopy initializes s as a deep copy of the source.
70func (s *state) deepcopy(src *state) {
71 if s == src {
72 return
73 }
74 s.rep = src.rep
75 s.isMatch = src.isMatch
76 s.isRepG0Long = src.isRepG0Long
77 s.isRep = src.isRep
78 s.isRepG0 = src.isRepG0
79 s.isRepG1 = src.isRepG1
80 s.isRepG2 = src.isRepG2
81 s.litCodec.deepcopy(&src.litCodec)
82 s.lenCodec.deepcopy(&src.lenCodec)
83 s.repLenCodec.deepcopy(&src.repLenCodec)
84 s.distCodec.deepcopy(&src.distCodec)
85 s.state = src.state
86 s.posBitMask = src.posBitMask
87 s.Properties = src.Properties
88}
89
90// cloneState creates a new clone of the give state.
91func cloneState(src *state) *state {
92 s := new(state)
93 s.deepcopy(src)
94 return s
95}
96
97// updateStateLiteral updates the state for a literal.
98func (s *state) updateStateLiteral() {
99 switch {
100 case s.state < 4:
101 s.state = 0
102 return
103 case s.state < 10:
104 s.state -= 3
105 return
106 }
107 s.state -= 6
108}
109
110// updateStateMatch updates the state for a match.
111func (s *state) updateStateMatch() {
112 if s.state < 7 {
113 s.state = 7
114 } else {
115 s.state = 10
116 }
117}
118
119// updateStateRep updates the state for a repetition.
120func (s *state) updateStateRep() {
121 if s.state < 7 {
122 s.state = 8
123 } else {
124 s.state = 11
125 }
126}
127
128// updateStateShortRep updates the state for a short repetition.
129func (s *state) updateStateShortRep() {
130 if s.state < 7 {
131 s.state = 9
132 } else {
133 s.state = 11
134 }
135}
136
137// states computes the states of the operation codec.
138func (s *state) states(dictHead int64) (state1, state2, posState uint32) {
139 state1 = s.state
140 posState = uint32(dictHead) & s.posBitMask
141 state2 = (s.state << maxPosBits) | posState
142 return
143}
144
145// litState computes the literal state.
146func (s *state) litState(prev byte, dictHead int64) uint32 {
147 lp, lc := uint(s.Properties.LP), uint(s.Properties.LC)
148 litState := ((uint32(dictHead) & ((1 << lp) - 1)) << lc) |
149 (uint32(prev) >> (8 - lc))
150 return litState
151}
diff --git a/vendor/github.com/ulikunitz/xz/lzma/treecodecs.go b/vendor/github.com/ulikunitz/xz/lzma/treecodecs.go
new file mode 100644
index 0000000..504b3d7
--- /dev/null
+++ b/vendor/github.com/ulikunitz/xz/lzma/treecodecs.go
@@ -0,0 +1,133 @@
1// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package lzma
6
7// treeCodec encodes or decodes values with a fixed bit size. It is using a
8// tree of probability value. The root of the tree is the most-significant bit.
9type treeCodec struct {
10 probTree
11}
12
13// makeTreeCodec makes a tree codec. The bits value must be inside the range
14// [1,32].
15func makeTreeCodec(bits int) treeCodec {
16 return treeCodec{makeProbTree(bits)}
17}
18
19// deepcopy initializes tc as a deep copy of the source.
20func (tc *treeCodec) deepcopy(src *treeCodec) {
21 tc.probTree.deepcopy(&src.probTree)
22}
23
24// Encode uses the range encoder to encode a fixed-bit-size value.
25func (tc *treeCodec) Encode(e *rangeEncoder, v uint32) (err error) {
26 m := uint32(1)
27 for i := int(tc.bits) - 1; i >= 0; i-- {
28 b := (v >> uint(i)) & 1
29 if err := e.EncodeBit(b, &tc.probs[m]); err != nil {
30 return err
31 }
32 m = (m << 1) | b
33 }
34 return nil
35}
36
37// Decodes uses the range decoder to decode a fixed-bit-size value. Errors may
38// be caused by the range decoder.
39func (tc *treeCodec) Decode(d *rangeDecoder) (v uint32, err error) {
40 m := uint32(1)
41 for j := 0; j < int(tc.bits); j++ {
42 b, err := d.DecodeBit(&tc.probs[m])
43 if err != nil {
44 return 0, err
45 }
46 m = (m << 1) | b
47 }
48 return m - (1 << uint(tc.bits)), nil
49}
50
51// treeReverseCodec is another tree codec, where the least-significant bit is
52// the start of the probability tree.
53type treeReverseCodec struct {
54 probTree
55}
56
57// deepcopy initializes the treeReverseCodec as a deep copy of the
58// source.
59func (tc *treeReverseCodec) deepcopy(src *treeReverseCodec) {
60 tc.probTree.deepcopy(&src.probTree)
61}
62
63// makeTreeReverseCodec creates treeReverseCodec value. The bits argument must
64// be in the range [1,32].
65func makeTreeReverseCodec(bits int) treeReverseCodec {
66 return treeReverseCodec{makeProbTree(bits)}
67}
68
69// Encode uses range encoder to encode a fixed-bit-size value. The range
70// encoder may cause errors.
71func (tc *treeReverseCodec) Encode(v uint32, e *rangeEncoder) (err error) {
72 m := uint32(1)
73 for i := uint(0); i < uint(tc.bits); i++ {
74 b := (v >> i) & 1
75 if err := e.EncodeBit(b, &tc.probs[m]); err != nil {
76 return err
77 }
78 m = (m << 1) | b
79 }
80 return nil
81}
82
83// Decodes uses the range decoder to decode a fixed-bit-size value. Errors
84// returned by the range decoder will be returned.
85func (tc *treeReverseCodec) Decode(d *rangeDecoder) (v uint32, err error) {
86 m := uint32(1)
87 for j := uint(0); j < uint(tc.bits); j++ {
88 b, err := d.DecodeBit(&tc.probs[m])
89 if err != nil {
90 return 0, err
91 }
92 m = (m << 1) | b
93 v |= b << j
94 }
95 return v, nil
96}
97
98// probTree stores enough probability values to be used by the treeEncode and
99// treeDecode methods of the range coder types.
100type probTree struct {
101 probs []prob
102 bits byte
103}
104
105// deepcopy initializes the probTree value as a deep copy of the source.
106func (t *probTree) deepcopy(src *probTree) {
107 if t == src {
108 return
109 }
110 t.probs = make([]prob, len(src.probs))
111 copy(t.probs, src.probs)
112 t.bits = src.bits
113}
114
115// makeProbTree initializes a probTree structure.
116func makeProbTree(bits int) probTree {
117 if !(1 <= bits && bits <= 32) {
118 panic("bits outside of range [1,32]")
119 }
120 t := probTree{
121 bits: byte(bits),
122 probs: make([]prob, 1<<uint(bits)),
123 }
124 for i := range t.probs {
125 t.probs[i] = probInit
126 }
127 return t
128}
129
130// Bits provides the number of bits for the values to de- or encode.
131func (t *probTree) Bits() int {
132 return int(t.bits)
133}
diff --git a/vendor/github.com/ulikunitz/xz/lzma/writer.go b/vendor/github.com/ulikunitz/xz/lzma/writer.go
new file mode 100644
index 0000000..efe34fb
--- /dev/null
+++ b/vendor/github.com/ulikunitz/xz/lzma/writer.go
@@ -0,0 +1,209 @@
1// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package lzma
6
7import (
8 "bufio"
9 "errors"
10 "io"
11)
12
13// MinDictCap and MaxDictCap provide the range of supported dictionary
14// capacities.
15const (
16 MinDictCap = 1 << 12
17 MaxDictCap = 1<<32 - 1
18)
19
20// WriterConfig defines the configuration parameter for a writer.
21type WriterConfig struct {
22 // Properties for the encoding. If the it is nil the value
23 // {LC: 3, LP: 0, PB: 2} will be chosen.
24 Properties *Properties
25 // The capacity of the dictionary. If DictCap is zero, the value
26 // 8 MiB will be chosen.
27 DictCap int
28 // Size of the lookahead buffer; value 0 indicates default size
29 // 4096
30 BufSize int
31 // Match algorithm
32 Matcher MatchAlgorithm
33 // SizeInHeader indicates that the header will contain an
34 // explicit size.
35 SizeInHeader bool
36 // Size of the data to be encoded. A positive value will imply
37 // than an explicit size will be set in the header.
38 Size int64
39 // EOSMarker requests whether the EOSMarker needs to be written.
40 // If no explicit size is been given the EOSMarker will be
41 // set automatically.
42 EOSMarker bool
43}
44
45// fill converts zero-value fields to their explicit default values.
46func (c *WriterConfig) fill() {
47 if c.Properties == nil {
48 c.Properties = &Properties{LC: 3, LP: 0, PB: 2}
49 }
50 if c.DictCap == 0 {
51 c.DictCap = 8 * 1024 * 1024
52 }
53 if c.BufSize == 0 {
54 c.BufSize = 4096
55 }
56 if c.Size > 0 {
57 c.SizeInHeader = true
58 }
59 if !c.SizeInHeader {
60 c.EOSMarker = true
61 }
62}
63
64// Verify checks WriterConfig for errors. Verify will replace zero
65// values with default values.
66func (c *WriterConfig) Verify() error {
67 c.fill()
68 var err error
69 if c == nil {
70 return errors.New("lzma: WriterConfig is nil")
71 }
72 if c.Properties == nil {
73 return errors.New("lzma: WriterConfig has no Properties set")
74 }
75 if err = c.Properties.verify(); err != nil {
76 return err
77 }
78 if !(MinDictCap <= c.DictCap && int64(c.DictCap) <= MaxDictCap) {
79 return errors.New("lzma: dictionary capacity is out of range")
80 }
81 if !(maxMatchLen <= c.BufSize) {
82 return errors.New("lzma: lookahead buffer size too small")
83 }
84 if c.SizeInHeader {
85 if c.Size < 0 {
86 return errors.New("lzma: negative size not supported")
87 }
88 } else if !c.EOSMarker {
89 return errors.New("lzma: EOS marker is required")
90 }
91 if err = c.Matcher.verify(); err != nil {
92 return err
93 }
94
95 return nil
96}
97
98// header returns the header structure for this configuration.
99func (c *WriterConfig) header() header {
100 h := header{
101 properties: *c.Properties,
102 dictCap: c.DictCap,
103 size: -1,
104 }
105 if c.SizeInHeader {
106 h.size = c.Size
107 }
108 return h
109}
110
111// Writer writes an LZMA stream in the classic format.
112type Writer struct {
113 h header
114 bw io.ByteWriter
115 buf *bufio.Writer
116 e *encoder
117}
118
119// NewWriter creates a new LZMA writer for the classic format. The
120// method will write the header to the underlying stream.
121func (c WriterConfig) NewWriter(lzma io.Writer) (w *Writer, err error) {
122 if err = c.Verify(); err != nil {
123 return nil, err
124 }
125 w = &Writer{h: c.header()}
126
127 var ok bool
128 w.bw, ok = lzma.(io.ByteWriter)
129 if !ok {
130 w.buf = bufio.NewWriter(lzma)
131 w.bw = w.buf
132 }
133 state := newState(w.h.properties)
134 m, err := c.Matcher.new(w.h.dictCap)
135 if err != nil {
136 return nil, err
137 }
138 dict, err := newEncoderDict(w.h.dictCap, c.BufSize, m)
139 if err != nil {
140 return nil, err
141 }
142 var flags encoderFlags
143 if c.EOSMarker {
144 flags = eosMarker
145 }
146 if w.e, err = newEncoder(w.bw, state, dict, flags); err != nil {
147 return nil, err
148 }
149
150 if err = w.writeHeader(); err != nil {
151 return nil, err
152 }
153 return w, nil
154}
155
156// NewWriter creates a new LZMA writer using the classic format. The
157// function writes the header to the underlying stream.
158func NewWriter(lzma io.Writer) (w *Writer, err error) {
159 return WriterConfig{}.NewWriter(lzma)
160}
161
162// writeHeader writes the LZMA header into the stream.
163func (w *Writer) writeHeader() error {
164 data, err := w.h.marshalBinary()
165 if err != nil {
166 return err
167 }
168 _, err = w.bw.(io.Writer).Write(data)
169 return err
170}
171
172// Write puts data into the Writer.
173func (w *Writer) Write(p []byte) (n int, err error) {
174 if w.h.size >= 0 {
175 m := w.h.size
176 m -= w.e.Compressed() + int64(w.e.dict.Buffered())
177 if m < 0 {
178 m = 0
179 }
180 if m < int64(len(p)) {
181 p = p[:m]
182 err = ErrNoSpace
183 }
184 }
185 var werr error
186 if n, werr = w.e.Write(p); werr != nil {
187 err = werr
188 }
189 return n, err
190}
191
192// Close closes the writer stream. It ensures that all data from the
193// buffer will be compressed and the LZMA stream will be finished.
194func (w *Writer) Close() error {
195 if w.h.size >= 0 {
196 n := w.e.Compressed() + int64(w.e.dict.Buffered())
197 if n != w.h.size {
198 return errSize
199 }
200 }
201 err := w.e.Close()
202 if w.buf != nil {
203 ferr := w.buf.Flush()
204 if err == nil {
205 err = ferr
206 }
207 }
208 return err
209}
diff --git a/vendor/github.com/ulikunitz/xz/lzma/writer2.go b/vendor/github.com/ulikunitz/xz/lzma/writer2.go
new file mode 100644
index 0000000..7c1afe1
--- /dev/null
+++ b/vendor/github.com/ulikunitz/xz/lzma/writer2.go
@@ -0,0 +1,305 @@
1// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package lzma
6
7import (
8 "bytes"
9 "errors"
10 "io"
11)
12
13// Writer2Config is used to create a Writer2 using parameters.
14type Writer2Config struct {
15 // The properties for the encoding. If the it is nil the value
16 // {LC: 3, LP: 0, PB: 2} will be chosen.
17 Properties *Properties
18 // The capacity of the dictionary. If DictCap is zero, the value
19 // 8 MiB will be chosen.
20 DictCap int
21 // Size of the lookahead buffer; value 0 indicates default size
22 // 4096
23 BufSize int
24 // Match algorithm
25 Matcher MatchAlgorithm
26}
27
28// fill replaces zero values with default values.
29func (c *Writer2Config) fill() {
30 if c.Properties == nil {
31 c.Properties = &Properties{LC: 3, LP: 0, PB: 2}
32 }
33 if c.DictCap == 0 {
34 c.DictCap = 8 * 1024 * 1024
35 }
36 if c.BufSize == 0 {
37 c.BufSize = 4096
38 }
39}
40
41// Verify checks the Writer2Config for correctness. Zero values will be
42// replaced by default values.
43func (c *Writer2Config) Verify() error {
44 c.fill()
45 var err error
46 if c == nil {
47 return errors.New("lzma: WriterConfig is nil")
48 }
49 if c.Properties == nil {
50 return errors.New("lzma: WriterConfig has no Properties set")
51 }
52 if err = c.Properties.verify(); err != nil {
53 return err
54 }
55 if !(MinDictCap <= c.DictCap && int64(c.DictCap) <= MaxDictCap) {
56 return errors.New("lzma: dictionary capacity is out of range")
57 }
58 if !(maxMatchLen <= c.BufSize) {
59 return errors.New("lzma: lookahead buffer size too small")
60 }
61 if c.Properties.LC+c.Properties.LP > 4 {
62 return errors.New("lzma: sum of lc and lp exceeds 4")
63 }
64 if err = c.Matcher.verify(); err != nil {
65 return err
66 }
67 return nil
68}
69
70// Writer2 supports the creation of an LZMA2 stream. But note that
71// written data is buffered, so call Flush or Close to write data to the
72// underlying writer. The Close method writes the end-of-stream marker
73// to the stream. So you may be able to concatenate the output of two
74// writers as long the output of the first writer has only been flushed
75// but not closed.
76//
77// Any change to the fields Properties, DictCap must be done before the
78// first call to Write, Flush or Close.
79type Writer2 struct {
80 w io.Writer
81
82 start *state
83 encoder *encoder
84
85 cstate chunkState
86 ctype chunkType
87
88 buf bytes.Buffer
89 lbw LimitedByteWriter
90}
91
92// NewWriter2 creates an LZMA2 chunk sequence writer with the default
93// parameters and options.
94func NewWriter2(lzma2 io.Writer) (w *Writer2, err error) {
95 return Writer2Config{}.NewWriter2(lzma2)
96}
97
98// NewWriter2 creates a new LZMA2 writer using the given configuration.
99func (c Writer2Config) NewWriter2(lzma2 io.Writer) (w *Writer2, err error) {
100 if err = c.Verify(); err != nil {
101 return nil, err
102 }
103 w = &Writer2{
104 w: lzma2,
105 start: newState(*c.Properties),
106 cstate: start,
107 ctype: start.defaultChunkType(),
108 }
109 w.buf.Grow(maxCompressed)
110 w.lbw = LimitedByteWriter{BW: &w.buf, N: maxCompressed}
111 m, err := c.Matcher.new(c.DictCap)
112 if err != nil {
113 return nil, err
114 }
115 d, err := newEncoderDict(c.DictCap, c.BufSize, m)
116 if err != nil {
117 return nil, err
118 }
119 w.encoder, err = newEncoder(&w.lbw, cloneState(w.start), d, 0)
120 if err != nil {
121 return nil, err
122 }
123 return w, nil
124}
125
126// written returns the number of bytes written to the current chunk
127func (w *Writer2) written() int {
128 if w.encoder == nil {
129 return 0
130 }
131 return int(w.encoder.Compressed()) + w.encoder.dict.Buffered()
132}
133
134// errClosed indicates that the writer is closed.
135var errClosed = errors.New("lzma: writer closed")
136
137// Writes data to LZMA2 stream. Note that written data will be buffered.
138// Use Flush or Close to ensure that data is written to the underlying
139// writer.
140func (w *Writer2) Write(p []byte) (n int, err error) {
141 if w.cstate == stop {
142 return 0, errClosed
143 }
144 for n < len(p) {
145 m := maxUncompressed - w.written()
146 if m <= 0 {
147 panic("lzma: maxUncompressed reached")
148 }
149 var q []byte
150 if n+m < len(p) {
151 q = p[n : n+m]
152 } else {
153 q = p[n:]
154 }
155 k, err := w.encoder.Write(q)
156 n += k
157 if err != nil && err != ErrLimit {
158 return n, err
159 }
160 if err == ErrLimit || k == m {
161 if err = w.flushChunk(); err != nil {
162 return n, err
163 }
164 }
165 }
166 return n, nil
167}
168
169// writeUncompressedChunk writes an uncompressed chunk to the LZMA2
170// stream.
171func (w *Writer2) writeUncompressedChunk() error {
172 u := w.encoder.Compressed()
173 if u <= 0 {
174 return errors.New("lzma: can't write empty uncompressed chunk")
175 }
176 if u > maxUncompressed {
177 panic("overrun of uncompressed data limit")
178 }
179 switch w.ctype {
180 case cLRND:
181 w.ctype = cUD
182 default:
183 w.ctype = cU
184 }
185 w.encoder.state = w.start
186
187 header := chunkHeader{
188 ctype: w.ctype,
189 uncompressed: uint32(u - 1),
190 }
191 hdata, err := header.MarshalBinary()
192 if err != nil {
193 return err
194 }
195 if _, err = w.w.Write(hdata); err != nil {
196 return err
197 }
198 _, err = w.encoder.dict.CopyN(w.w, int(u))
199 return err
200}
201
202// writeCompressedChunk writes a compressed chunk to the underlying
203// writer.
204func (w *Writer2) writeCompressedChunk() error {
205 if w.ctype == cU || w.ctype == cUD {
206 panic("chunk type uncompressed")
207 }
208
209 u := w.encoder.Compressed()
210 if u <= 0 {
211 return errors.New("writeCompressedChunk: empty chunk")
212 }
213 if u > maxUncompressed {
214 panic("overrun of uncompressed data limit")
215 }
216 c := w.buf.Len()
217 if c <= 0 {
218 panic("no compressed data")
219 }
220 if c > maxCompressed {
221 panic("overrun of compressed data limit")
222 }
223 header := chunkHeader{
224 ctype: w.ctype,
225 uncompressed: uint32(u - 1),
226 compressed: uint16(c - 1),
227 props: w.encoder.state.Properties,
228 }
229 hdata, err := header.MarshalBinary()
230 if err != nil {
231 return err
232 }
233 if _, err = w.w.Write(hdata); err != nil {
234 return err
235 }
236 _, err = io.Copy(w.w, &w.buf)
237 return err
238}
239
240// writes a single chunk to the underlying writer.
241func (w *Writer2) writeChunk() error {
242 u := int(uncompressedHeaderLen + w.encoder.Compressed())
243 c := headerLen(w.ctype) + w.buf.Len()
244 if u < c {
245 return w.writeUncompressedChunk()
246 }
247 return w.writeCompressedChunk()
248}
249
250// flushChunk terminates the current chunk. The encoder will be reset
251// to support the next chunk.
252func (w *Writer2) flushChunk() error {
253 if w.written() == 0 {
254 return nil
255 }
256 var err error
257 if err = w.encoder.Close(); err != nil {
258 return err
259 }
260 if err = w.writeChunk(); err != nil {
261 return err
262 }
263 w.buf.Reset()
264 w.lbw.N = maxCompressed
265 if err = w.encoder.Reopen(&w.lbw); err != nil {
266 return err
267 }
268 if err = w.cstate.next(w.ctype); err != nil {
269 return err
270 }
271 w.ctype = w.cstate.defaultChunkType()
272 w.start = cloneState(w.encoder.state)
273 return nil
274}
275
276// Flush writes all buffered data out to the underlying stream. This
277// could result in multiple chunks to be created.
278func (w *Writer2) Flush() error {
279 if w.cstate == stop {
280 return errClosed
281 }
282 for w.written() > 0 {
283 if err := w.flushChunk(); err != nil {
284 return err
285 }
286 }
287 return nil
288}
289
290// Close terminates the LZMA2 stream with an EOS chunk.
291func (w *Writer2) Close() error {
292 if w.cstate == stop {
293 return errClosed
294 }
295 if err := w.Flush(); err != nil {
296 return nil
297 }
298 // write zero byte EOS chunk
299 _, err := w.w.Write([]byte{0})
300 if err != nil {
301 return err
302 }
303 w.cstate = stop
304 return nil
305}