2019-11-18 06:18:33 +01:00
|
|
|
// Copyright 2011 The Snappy-Go Authors. All rights reserved.
|
|
|
|
// Modified for deflate by Klaus Post (c) 2015.
|
|
|
|
// Use of this source code is governed by a BSD-style
|
|
|
|
// license that can be found in the LICENSE file.
|
|
|
|
|
|
|
|
package flate
|
|
|
|
|
|
|
|
import (
|
2021-04-23 02:08:53 +02:00
|
|
|
"encoding/binary"
|
2019-11-18 06:18:33 +01:00
|
|
|
"fmt"
|
|
|
|
"math/bits"
|
|
|
|
)
|
|
|
|
|
|
|
|
type fastEnc interface {
|
|
|
|
Encode(dst *tokens, src []byte)
|
|
|
|
Reset()
|
|
|
|
}
|
|
|
|
|
|
|
|
func newFastEnc(level int) fastEnc {
|
|
|
|
switch level {
|
|
|
|
case 1:
|
|
|
|
return &fastEncL1{fastGen: fastGen{cur: maxStoreBlockSize}}
|
|
|
|
case 2:
|
|
|
|
return &fastEncL2{fastGen: fastGen{cur: maxStoreBlockSize}}
|
|
|
|
case 3:
|
|
|
|
return &fastEncL3{fastGen: fastGen{cur: maxStoreBlockSize}}
|
|
|
|
case 4:
|
|
|
|
return &fastEncL4{fastGen: fastGen{cur: maxStoreBlockSize}}
|
|
|
|
case 5:
|
|
|
|
return &fastEncL5{fastGen: fastGen{cur: maxStoreBlockSize}}
|
|
|
|
case 6:
|
|
|
|
return &fastEncL6{fastGen: fastGen{cur: maxStoreBlockSize}}
|
|
|
|
default:
|
|
|
|
panic("invalid level specified")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
const (
|
2020-02-28 10:51:18 +01:00
|
|
|
tableBits = 15 // Bits used in the table
|
2019-11-18 06:18:33 +01:00
|
|
|
tableSize = 1 << tableBits // Size of the table
|
|
|
|
tableShift = 32 - tableBits // Right-shift to get the tableBits most significant bits of a uint32.
|
|
|
|
baseMatchOffset = 1 // The smallest match offset
|
|
|
|
baseMatchLength = 3 // The smallest match length per the RFC section 3.2.5
|
|
|
|
maxMatchOffset = 1 << 15 // The largest match offset
|
|
|
|
|
2020-02-28 10:51:18 +01:00
|
|
|
bTableBits = 17 // Bits used in the big tables
|
|
|
|
bTableSize = 1 << bTableBits // Size of the table
|
2021-06-10 16:44:25 +02:00
|
|
|
allocHistory = maxStoreBlockSize * 5 // Size to preallocate for history.
|
2020-02-28 10:51:18 +01:00
|
|
|
bufferReset = (1 << 31) - allocHistory - maxStoreBlockSize - 1 // Reset the buffer offset when reaching this.
|
2019-11-18 06:18:33 +01:00
|
|
|
)
|
|
|
|
|
|
|
|
const (
|
|
|
|
prime3bytes = 506832829
|
|
|
|
prime4bytes = 2654435761
|
|
|
|
prime5bytes = 889523592379
|
|
|
|
prime6bytes = 227718039650203
|
|
|
|
prime7bytes = 58295818150454627
|
|
|
|
prime8bytes = 0xcf1bbcdcb7a56463
|
|
|
|
)
|
|
|
|
|
|
|
|
func load32(b []byte, i int) uint32 {
|
|
|
|
// Help the compiler eliminate bounds checks on the read so it can be done in a single read.
|
|
|
|
b = b[i:]
|
|
|
|
b = b[:4]
|
|
|
|
return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
|
|
|
|
}
|
|
|
|
|
|
|
|
func load64(b []byte, i int) uint64 {
|
2021-04-23 02:08:53 +02:00
|
|
|
return binary.LittleEndian.Uint64(b[i:])
|
2019-11-18 06:18:33 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
func load3232(b []byte, i int32) uint32 {
|
2021-04-23 02:08:53 +02:00
|
|
|
return binary.LittleEndian.Uint32(b[i:])
|
2019-11-18 06:18:33 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
func load6432(b []byte, i int32) uint64 {
|
2021-04-23 02:08:53 +02:00
|
|
|
return binary.LittleEndian.Uint64(b[i:])
|
2019-11-18 06:18:33 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
func hash(u uint32) uint32 {
|
|
|
|
return (u * 0x1e35a7bd) >> tableShift
|
|
|
|
}
|
|
|
|
|
|
|
|
type tableEntry struct {
|
|
|
|
offset int32
|
|
|
|
}
|
|
|
|
|
|
|
|
// fastGen maintains the table for matches,
|
|
|
|
// and the previous byte block for level 2.
|
|
|
|
// This is the generic implementation.
|
|
|
|
type fastGen struct {
|
|
|
|
hist []byte
|
|
|
|
cur int32
|
|
|
|
}
|
|
|
|
|
|
|
|
func (e *fastGen) addBlock(src []byte) int32 {
|
|
|
|
// check if we have space already
|
|
|
|
if len(e.hist)+len(src) > cap(e.hist) {
|
|
|
|
if cap(e.hist) == 0 {
|
|
|
|
e.hist = make([]byte, 0, allocHistory)
|
|
|
|
} else {
|
|
|
|
if cap(e.hist) < maxMatchOffset*2 {
|
|
|
|
panic("unexpected buffer size")
|
|
|
|
}
|
|
|
|
// Move down
|
|
|
|
offset := int32(len(e.hist)) - maxMatchOffset
|
|
|
|
copy(e.hist[0:maxMatchOffset], e.hist[offset:])
|
|
|
|
e.cur += offset
|
|
|
|
e.hist = e.hist[:maxMatchOffset]
|
|
|
|
}
|
|
|
|
}
|
|
|
|
s := int32(len(e.hist))
|
|
|
|
e.hist = append(e.hist, src...)
|
|
|
|
return s
|
|
|
|
}
|
|
|
|
|
|
|
|
// hash4 returns the hash of u to fit in a hash table with h bits.
|
|
|
|
// Preferably h should be a constant and should always be <32.
|
|
|
|
func hash4u(u uint32, h uint8) uint32 {
|
2020-10-16 07:06:27 +02:00
|
|
|
return (u * prime4bytes) >> ((32 - h) & reg8SizeMask32)
|
2019-11-18 06:18:33 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
type tableEntryPrev struct {
|
|
|
|
Cur tableEntry
|
|
|
|
Prev tableEntry
|
|
|
|
}
|
|
|
|
|
|
|
|
// hash4x64 returns the hash of the lowest 4 bytes of u to fit in a hash table with h bits.
|
|
|
|
// Preferably h should be a constant and should always be <32.
|
|
|
|
func hash4x64(u uint64, h uint8) uint32 {
|
2020-10-16 07:06:27 +02:00
|
|
|
return (uint32(u) * prime4bytes) >> ((32 - h) & reg8SizeMask32)
|
2019-11-18 06:18:33 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// hash7 returns the hash of the lowest 7 bytes of u to fit in a hash table with h bits.
|
|
|
|
// Preferably h should be a constant and should always be <64.
|
|
|
|
func hash7(u uint64, h uint8) uint32 {
|
2020-10-16 07:06:27 +02:00
|
|
|
return uint32(((u << (64 - 56)) * prime7bytes) >> ((64 - h) & reg8SizeMask64))
|
2019-11-18 06:18:33 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// hash8 returns the hash of u to fit in a hash table with h bits.
|
|
|
|
// Preferably h should be a constant and should always be <64.
|
|
|
|
func hash8(u uint64, h uint8) uint32 {
|
2020-10-16 07:06:27 +02:00
|
|
|
return uint32((u * prime8bytes) >> ((64 - h) & reg8SizeMask64))
|
2019-11-18 06:18:33 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// hash6 returns the hash of the lowest 6 bytes of u to fit in a hash table with h bits.
|
|
|
|
// Preferably h should be a constant and should always be <64.
|
|
|
|
func hash6(u uint64, h uint8) uint32 {
|
2020-10-16 07:06:27 +02:00
|
|
|
return uint32(((u << (64 - 48)) * prime6bytes) >> ((64 - h) & reg8SizeMask64))
|
2019-11-18 06:18:33 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// matchlen will return the match length between offsets and t in src.
|
|
|
|
// The maximum length returned is maxMatchLength - 4.
|
|
|
|
// It is assumed that s > t, that t >=0 and s < len(src).
|
|
|
|
func (e *fastGen) matchlen(s, t int32, src []byte) int32 {
|
|
|
|
if debugDecode {
|
|
|
|
if t >= s {
|
|
|
|
panic(fmt.Sprint("t >=s:", t, s))
|
|
|
|
}
|
|
|
|
if int(s) >= len(src) {
|
|
|
|
panic(fmt.Sprint("s >= len(src):", s, len(src)))
|
|
|
|
}
|
|
|
|
if t < 0 {
|
|
|
|
panic(fmt.Sprint("t < 0:", t))
|
|
|
|
}
|
|
|
|
if s-t > maxMatchOffset {
|
|
|
|
panic(fmt.Sprint(s, "-", t, "(", s-t, ") > maxMatchLength (", maxMatchOffset, ")"))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
s1 := int(s) + maxMatchLength - 4
|
|
|
|
if s1 > len(src) {
|
|
|
|
s1 = len(src)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Extend the match to be as long as possible.
|
|
|
|
return int32(matchLen(src[s:s1], src[t:]))
|
|
|
|
}
|
|
|
|
|
|
|
|
// matchlenLong will return the match length between offsets and t in src.
|
|
|
|
// It is assumed that s > t, that t >=0 and s < len(src).
|
|
|
|
func (e *fastGen) matchlenLong(s, t int32, src []byte) int32 {
|
|
|
|
if debugDecode {
|
|
|
|
if t >= s {
|
|
|
|
panic(fmt.Sprint("t >=s:", t, s))
|
|
|
|
}
|
|
|
|
if int(s) >= len(src) {
|
|
|
|
panic(fmt.Sprint("s >= len(src):", s, len(src)))
|
|
|
|
}
|
|
|
|
if t < 0 {
|
|
|
|
panic(fmt.Sprint("t < 0:", t))
|
|
|
|
}
|
|
|
|
if s-t > maxMatchOffset {
|
|
|
|
panic(fmt.Sprint(s, "-", t, "(", s-t, ") > maxMatchLength (", maxMatchOffset, ")"))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Extend the match to be as long as possible.
|
|
|
|
return int32(matchLen(src[s:], src[t:]))
|
|
|
|
}
|
|
|
|
|
|
|
|
// Reset the encoding table.
|
|
|
|
func (e *fastGen) Reset() {
|
2020-02-28 10:51:18 +01:00
|
|
|
if cap(e.hist) < allocHistory {
|
|
|
|
e.hist = make([]byte, 0, allocHistory)
|
|
|
|
}
|
|
|
|
// We offset current position so everything will be out of reach.
|
|
|
|
// If we are above the buffer reset it will be cleared anyway since len(hist) == 0.
|
|
|
|
if e.cur <= bufferReset {
|
|
|
|
e.cur += maxMatchOffset + int32(len(e.hist))
|
2019-11-18 06:18:33 +01:00
|
|
|
}
|
|
|
|
e.hist = e.hist[:0]
|
|
|
|
}
|
|
|
|
|
|
|
|
// matchLen returns the maximum length.
|
|
|
|
// 'a' must be the shortest of the two.
|
|
|
|
func matchLen(a, b []byte) int {
|
|
|
|
b = b[:len(a)]
|
|
|
|
var checked int
|
2021-04-23 02:08:53 +02:00
|
|
|
if len(a) >= 4 {
|
2019-11-18 06:18:33 +01:00
|
|
|
// Try 4 bytes first
|
2021-04-23 02:08:53 +02:00
|
|
|
if diff := binary.LittleEndian.Uint32(a) ^ binary.LittleEndian.Uint32(b); diff != 0 {
|
2019-11-18 06:18:33 +01:00
|
|
|
return bits.TrailingZeros32(diff) >> 3
|
|
|
|
}
|
|
|
|
// Switch to 8 byte matching.
|
|
|
|
checked = 4
|
|
|
|
a = a[4:]
|
|
|
|
b = b[4:]
|
|
|
|
for len(a) >= 8 {
|
|
|
|
b = b[:len(a)]
|
2021-04-23 02:08:53 +02:00
|
|
|
if diff := binary.LittleEndian.Uint64(a) ^ binary.LittleEndian.Uint64(b); diff != 0 {
|
2019-11-18 06:18:33 +01:00
|
|
|
return checked + (bits.TrailingZeros64(diff) >> 3)
|
|
|
|
}
|
|
|
|
checked += 8
|
|
|
|
a = a[8:]
|
|
|
|
b = b[8:]
|
|
|
|
}
|
|
|
|
}
|
|
|
|
b = b[:len(a)]
|
|
|
|
for i := range a {
|
|
|
|
if a[i] != b[i] {
|
2021-04-23 02:08:53 +02:00
|
|
|
return i + checked
|
2019-11-18 06:18:33 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return len(a) + checked
|
|
|
|
}
|