third_party/cargo: add image
parent
2ae07a4679
commit
661bc12dbd
|
@ -16,6 +16,10 @@ alias(
|
|||
name = "env_logger",
|
||||
actual = "//third_party/cargo/vendor/env_logger-0.6.2:env_logger",
|
||||
)
|
||||
alias(
|
||||
name = "image",
|
||||
actual = "//third_party/cargo/vendor/image-0.23.1:image",
|
||||
)
|
||||
alias(
|
||||
name = "log",
|
||||
actual = "//third_party/cargo/vendor/log-0.4.8:log",
|
||||
|
|
|
@ -1,5 +1,11 @@
|
|||
# This file is automatically @generated by Cargo.
|
||||
# It is not intended for manual editing.
|
||||
[[package]]
|
||||
name = "adler32"
|
||||
version = "1.0.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5d2e7343e7fc9de883d1b0341e0b13970f764c14101234857d2ddafa1cb1cac2"
|
||||
|
||||
[[package]]
|
||||
name = "aho-corasick"
|
||||
version = "0.7.10"
|
||||
|
@ -79,6 +85,12 @@ version = "0.1.6"
|
|||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0d8c1fef690941d3e7788d328517591fecc684c084084702d6ff1641e993699a"
|
||||
|
||||
[[package]]
|
||||
name = "bytemuck"
|
||||
version = "1.2.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "37fa13df2292ecb479ec23aa06f4507928bef07839be9ef15281411076629431"
|
||||
|
||||
[[package]]
|
||||
name = "byteorder"
|
||||
version = "1.3.4"
|
||||
|
@ -167,12 +179,19 @@ dependencies = [
|
|||
"objc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "color_quant"
|
||||
version = "1.0.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0dbbb57365263e881e805dc77d94697c9118fd94d8da011240555aa7b23445bd"
|
||||
|
||||
[[package]]
|
||||
name = "compile_with_bazel"
|
||||
version = "1.33.7"
|
||||
dependencies = [
|
||||
"cgmath",
|
||||
"env_logger",
|
||||
"image",
|
||||
"log",
|
||||
"openvr",
|
||||
"vulkano",
|
||||
|
@ -249,6 +268,15 @@ dependencies = [
|
|||
"objc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "crc32fast"
|
||||
version = "1.2.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ba125de2af0df55319f41944744ad91c71113bf74a4646efff39afe1f6842db1"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "crossbeam"
|
||||
version = "0.7.3"
|
||||
|
@ -320,6 +348,16 @@ dependencies = [
|
|||
"lazy_static",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "deflate"
|
||||
version = "0.8.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "050ef6de42a33903b30a7497b76b40d3d58691d4d3eec355348c122444a388f0"
|
||||
dependencies = [
|
||||
"adler32",
|
||||
"byteorder",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "dispatch"
|
||||
version = "0.2.0"
|
||||
|
@ -341,6 +379,12 @@ version = "1.1.1"
|
|||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "52ba6eb47c2131e784a38b726eb54c1e1484904f013e576a25354d0124161af6"
|
||||
|
||||
[[package]]
|
||||
name = "either"
|
||||
version = "1.5.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "bb1f6b1ce1c140482ea30ddd3335fc0024ac7ee112895426e0a629a6c20adfe3"
|
||||
|
||||
[[package]]
|
||||
name = "env_logger"
|
||||
version = "0.6.2"
|
||||
|
@ -397,6 +441,16 @@ version = "0.3.3"
|
|||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3dcaa9ae7725d12cdb85b3ad99a434db70b468c09ded17e012d86b5c1010f7a7"
|
||||
|
||||
[[package]]
|
||||
name = "gif"
|
||||
version = "0.10.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "471d90201b3b223f3451cd4ad53e34295f16a1df17b1edf3736d47761c3981af"
|
||||
dependencies = [
|
||||
"color_quant",
|
||||
"lzw",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "half"
|
||||
version = "1.5.0"
|
||||
|
@ -421,6 +475,33 @@ dependencies = [
|
|||
"quick-error",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "image"
|
||||
version = "0.23.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "403f0c29211dc50a78eb53eeeae87945d44cb63071881834212f36677fccb2b4"
|
||||
dependencies = [
|
||||
"bytemuck",
|
||||
"byteorder",
|
||||
"gif",
|
||||
"jpeg-decoder",
|
||||
"num-iter",
|
||||
"num-rational",
|
||||
"num-traits",
|
||||
"png",
|
||||
"scoped_threadpool",
|
||||
"tiff",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "inflate"
|
||||
version = "0.4.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1cdb29978cc5797bd8dcc8e5bf7de604891df2a8dc576973d71a281e916db2ff"
|
||||
dependencies = [
|
||||
"adler32",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "instant"
|
||||
version = "0.1.2"
|
||||
|
@ -436,6 +517,16 @@ dependencies = [
|
|||
"libc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "jpeg-decoder"
|
||||
version = "0.1.18"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0256f0aec7352539102a9efbcb75543227b7ab1117e0f95450023af730128451"
|
||||
dependencies = [
|
||||
"byteorder",
|
||||
"rayon",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "kernel32-sys"
|
||||
version = "0.2.2"
|
||||
|
@ -501,6 +592,12 @@ dependencies = [
|
|||
"cfg-if",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "lzw"
|
||||
version = "0.10.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7d947cbb889ed21c2a84be6ffbaebf5b4e0f4340638cba0444907e38b56be084"
|
||||
|
||||
[[package]]
|
||||
name = "malloc_buf"
|
||||
version = "0.0.6"
|
||||
|
@ -556,6 +653,15 @@ dependencies = [
|
|||
"objc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "miniz_oxide"
|
||||
version = "0.3.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "aa679ff6578b1cddee93d7e82e263b94a575e0bfced07284eb0c037c1d2416a5"
|
||||
dependencies = [
|
||||
"adler32",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "mio"
|
||||
version = "0.6.21"
|
||||
|
@ -623,6 +729,38 @@ dependencies = [
|
|||
"void",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "num-integer"
|
||||
version = "0.1.42"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3f6ea62e9d81a77cd3ee9a2a5b9b609447857f3d358704331e4ef39eb247fcba"
|
||||
dependencies = [
|
||||
"autocfg 1.0.0",
|
||||
"num-traits",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "num-iter"
|
||||
version = "0.1.40"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "dfb0800a0291891dd9f4fe7bd9c19384f98f7fbe0cd0f39a2c6b88b9868bbc00"
|
||||
dependencies = [
|
||||
"autocfg 1.0.0",
|
||||
"num-integer",
|
||||
"num-traits",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "num-rational"
|
||||
version = "0.2.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "da4dc79f9e6c81bef96148c8f6b8e72ad4541caa4a24373e900a36da07de03a3"
|
||||
dependencies = [
|
||||
"autocfg 1.0.0",
|
||||
"num-integer",
|
||||
"num-traits",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "num-traits"
|
||||
version = "0.2.11"
|
||||
|
@ -632,6 +770,16 @@ dependencies = [
|
|||
"autocfg 1.0.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "num_cpus"
|
||||
version = "1.12.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "46203554f085ff89c235cd12f7075f3233af9b11ed7c9e16dfe2560d03313ce6"
|
||||
dependencies = [
|
||||
"hermit-abi",
|
||||
"libc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "objc"
|
||||
version = "0.2.7"
|
||||
|
@ -715,6 +863,18 @@ version = "0.3.17"
|
|||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "05da548ad6865900e60eaba7f589cc0783590a92e940c26953ff81ddbab2d677"
|
||||
|
||||
[[package]]
|
||||
name = "png"
|
||||
version = "0.16.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "46060468187c21c00ffa2a920690b29997d7fd543f5a4d400461e4a7d4fccde8"
|
||||
dependencies = [
|
||||
"bitflags",
|
||||
"crc32fast",
|
||||
"deflate",
|
||||
"inflate",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "proc-macro2"
|
||||
version = "0.4.30"
|
||||
|
@ -854,6 +1014,30 @@ dependencies = [
|
|||
"libc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rayon"
|
||||
version = "1.3.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "db6ce3297f9c85e16621bb8cca38a06779ffc31bb8184e1be4bed2be4678a098"
|
||||
dependencies = [
|
||||
"crossbeam-deque",
|
||||
"either",
|
||||
"rayon-core",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rayon-core"
|
||||
version = "1.7.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "08a89b46efaf957e52b18062fb2f4660f8b8a4dde1807ca002690868ef2c85a9"
|
||||
dependencies = [
|
||||
"crossbeam-deque",
|
||||
"crossbeam-queue",
|
||||
"crossbeam-utils",
|
||||
"lazy_static",
|
||||
"num_cpus",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rdrand"
|
||||
version = "0.4.0"
|
||||
|
@ -917,6 +1101,12 @@ dependencies = [
|
|||
"winapi-util",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "scoped_threadpool"
|
||||
version = "0.1.9"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1d51f5df5af43ab3f1360b429fa5e0152ac5ce8c0bd6485cae490332e96846a8"
|
||||
|
||||
[[package]]
|
||||
name = "scopeguard"
|
||||
version = "1.1.0"
|
||||
|
@ -988,6 +1178,17 @@ dependencies = [
|
|||
"lazy_static",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tiff"
|
||||
version = "0.4.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "002351e428db1eb1d8656d4ca61947c3519ac3191e1c804d4600cd32093b77ad"
|
||||
dependencies = [
|
||||
"byteorder",
|
||||
"lzw",
|
||||
"miniz_oxide",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "unicode-xid"
|
||||
version = "0.1.0"
|
||||
|
|
|
@ -8,6 +8,7 @@ path = "fake_lib.rs"
|
|||
|
||||
[dependencies]
|
||||
env_logger = "0.6.1"
|
||||
image = "0.23.1"
|
||||
log = "0.4.6"
|
||||
vulkano = "0.18.0"
|
||||
vulkano-win = "0.18.0"
|
||||
|
@ -42,3 +43,6 @@ additional_deps = ['//third_party/cargo/patches:libloading_global_static']
|
|||
|
||||
[raze.crates.cgmath.'0.17.0']
|
||||
gen_buildrs = true
|
||||
|
||||
[raze.crates.image.'0.23.1']
|
||||
gen_buildrs = true
|
||||
|
|
|
@ -0,0 +1 @@
|
|||
{"files":{"Cargo.toml":"3dfd0367a0af86dd57c4faf9f8a5b1ce8179c38e28d470d3c46ce2d2b45ef20f","LICENSE":"9efeecf73f68ed91830f71c69a53de1328d1f8c6968a68ca6e6b2d6f3a92a088","README.md":"77c9e2080e5ae700403343c27fe08bb616f1df92a8b42b0e7808a7b7d32eb7a2","appveyor.yml":"4873092bae0713890497e5ceae761af359d680e6cce5ce003bf38bc5c45cde44","src/lib.rs":"596ac0c2bbdfa759fb79eb7b7d9e18d6c51be0849f22204a85c4906fe2ae8bde"},"package":"5d2e7343e7fc9de883d1b0341e0b13970f764c14101234857d2ddafa1cb1cac2"}
|
|
@ -0,0 +1,42 @@
|
|||
"""
|
||||
cargo-raze crate build file.
|
||||
|
||||
DO NOT EDIT! Replaced on runs of cargo-raze
|
||||
"""
|
||||
package(default_visibility = [
|
||||
# Public for visibility by "@raze__crate__version//" targets.
|
||||
#
|
||||
# Prefer access through "//third_party/cargo", which limits external
|
||||
# visibility to explicit Cargo.toml dependencies.
|
||||
"//visibility:public",
|
||||
])
|
||||
|
||||
licenses([
|
||||
"notice", # "Zlib"
|
||||
])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_rust//rust:rust.bzl",
|
||||
"rust_library",
|
||||
"rust_binary",
|
||||
"rust_test",
|
||||
)
|
||||
|
||||
|
||||
|
||||
rust_library(
|
||||
name = "adler32",
|
||||
crate_root = "src/lib.rs",
|
||||
crate_type = "lib",
|
||||
edition = "2015",
|
||||
srcs = glob(["**/*.rs"]),
|
||||
deps = [
|
||||
],
|
||||
rustc_flags = [
|
||||
"--cap-lints=allow",
|
||||
],
|
||||
version = "1.0.4",
|
||||
crate_features = [
|
||||
],
|
||||
)
|
||||
|
|
@ -0,0 +1,24 @@
|
|||
# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
|
||||
#
|
||||
# When uploading crates to the registry Cargo will automatically
|
||||
# "normalize" Cargo.toml files for maximal compatibility
|
||||
# with all versions of Cargo and also rewrite `path` dependencies
|
||||
# to registry (e.g., crates.io) dependencies
|
||||
#
|
||||
# If you believe there's an error in this file please file an
|
||||
# issue against the rust-lang/cargo repository. If you're
|
||||
# editing this file be aware that the upstream Cargo.toml
|
||||
# will likely look very different (and much more reasonable)
|
||||
|
||||
[package]
|
||||
name = "adler32"
|
||||
version = "1.0.4"
|
||||
authors = ["Remi Rampin <remirampin@gmail.com>"]
|
||||
description = "Minimal Adler32 implementation for Rust."
|
||||
documentation = "https://remram44.github.io/adler32-rs/index.html"
|
||||
readme = "README.md"
|
||||
keywords = ["adler32", "hash", "rolling"]
|
||||
license = "Zlib"
|
||||
repository = "https://github.com/remram44/adler32-rs"
|
||||
[dev-dependencies.rand]
|
||||
version = "0.4"
|
|
@ -0,0 +1,43 @@
|
|||
Copyright notice for the Rust port:
|
||||
|
||||
(C) 2016 Remi Rampin
|
||||
|
||||
This software is provided 'as-is', without any express or implied
|
||||
warranty. In no event will the authors be held liable for any damages
|
||||
arising from the use of this software.
|
||||
|
||||
Permission is granted to anyone to use this software for any purpose,
|
||||
including commercial applications, and to alter it and redistribute it
|
||||
freely, subject to the following restrictions:
|
||||
|
||||
1. The origin of this software must not be misrepresented; you must not
|
||||
claim that you wrote the original software. If you use this software
|
||||
in a product, an acknowledgment in the product documentation would be
|
||||
appreciated but is not required.
|
||||
2. Altered source versions must be plainly marked as such, and must not be
|
||||
misrepresented as being the original software.
|
||||
3. This notice may not be removed or altered from any source distribution.
|
||||
|
||||
|
||||
Copyright notice for the original C code from the zlib project:
|
||||
|
||||
(C) 1995-2017 Jean-loup Gailly and Mark Adler
|
||||
|
||||
This software is provided 'as-is', without any express or implied
|
||||
warranty. In no event will the authors be held liable for any damages
|
||||
arising from the use of this software.
|
||||
|
||||
Permission is granted to anyone to use this software for any purpose,
|
||||
including commercial applications, and to alter it and redistribute it
|
||||
freely, subject to the following restrictions:
|
||||
|
||||
1. The origin of this software must not be misrepresented; you must not
|
||||
claim that you wrote the original software. If you use this software
|
||||
in a product, an acknowledgment in the product documentation would be
|
||||
appreciated but is not required.
|
||||
2. Altered source versions must be plainly marked as such, and must not be
|
||||
misrepresented as being the original software.
|
||||
3. This notice may not be removed or altered from any source distribution.
|
||||
|
||||
Jean-loup Gailly Mark Adler
|
||||
jloup@gzip.org madler@alumni.caltech.edu
|
|
@ -0,0 +1,13 @@
|
|||
[![Build Status](https://travis-ci.org/remram44/adler32-rs.svg?branch=master)](https://travis-ci.org/remram44/adler32-rs/builds)
|
||||
[![Win Build](https://ci.appveyor.com/api/projects/status/ekyg20rd6rwrus64/branch/master?svg=true)](https://ci.appveyor.com/project/remram44/adler32-rs)
|
||||
[![Crates.io](https://img.shields.io/crates/v/adler32.svg)](https://crates.io/crates/adler32)
|
||||
[![Say Thanks!](https://img.shields.io/badge/Say%20Thanks-!-1EAEDB.svg)](https://saythanks.io/to/remram44)
|
||||
|
||||
What is this?
|
||||
=============
|
||||
|
||||
It is an implementation of the [Adler32 rolling hash algorithm](https://en.wikipedia.org/wiki/Adler-32) in the [Rust programming language](https://www.rust-lang.org/).
|
||||
|
||||
It is adapted from Jean-Loup Gailly's and Mark Adler's [original implementation in zlib](https://github.com/madler/zlib/blob/2fa463bacfff79181df1a5270fb67cc679a53e71/adler32.c). A copy of the zlib copyright and license can be found in LICENSE-ZLIB.
|
||||
|
||||
[Generated documentation](https://remram44.github.io/adler32-rs/index.html)
|
|
@ -0,0 +1,12 @@
|
|||
install:
|
||||
- ps: Start-FileDownload 'https://static.rust-lang.org/dist/rust-nightly-i686-pc-windows-gnu.exe'
|
||||
- rust-nightly-i686-pc-windows-gnu.exe /VERYSILENT /NORESTART /DIR="C:\Program Files (x86)\Rust"
|
||||
- set PATH=%PATH%;C:\Program Files (x86)\Rust\bin
|
||||
- rustc -V
|
||||
- cargo -V
|
||||
|
||||
build: false
|
||||
|
||||
test_script:
|
||||
- cargo build --verbose
|
||||
- cargo test --verbose
|
|
@ -0,0 +1,307 @@
|
|||
//! A minimal implementation of Adler32 for Rust.
|
||||
//!
|
||||
//! This provides the simple method adler32(), that exhausts a Read and
|
||||
//! computes the Adler32 hash, as well as the RollingAdler32 struct, that can
|
||||
//! build a hash byte-by-byte, allowing to 'forget' past bytes in a rolling
|
||||
//! fashion.
|
||||
//!
|
||||
//! The adler32 code has been translated (as accurately as I could manage) from
|
||||
//! the zlib implementation.
|
||||
|
||||
#[cfg(test)]
|
||||
extern crate rand;
|
||||
|
||||
use std::io;
|
||||
|
||||
// adler32 algorithm and implementation taken from zlib; http://www.zlib.net/
|
||||
// It was translated into Rust as accurately as I could manage
|
||||
// The (slow) reference was taken from Wikipedia; https://en.wikipedia.org/
|
||||
|
||||
/* zlib.h -- interface of the 'zlib' general purpose compression library
|
||||
version 1.2.8, April 28th, 2013
|
||||
|
||||
Copyright (C) 1995-2013 Jean-loup Gailly and Mark Adler
|
||||
|
||||
This software is provided 'as-is', without any express or implied
|
||||
warranty. In no event will the authors be held liable for any damages
|
||||
arising from the use of this software.
|
||||
|
||||
Permission is granted to anyone to use this software for any purpose,
|
||||
including commercial applications, and to alter it and redistribute it
|
||||
freely, subject to the following restrictions:
|
||||
|
||||
1. The origin of this software must not be misrepresented; you must not
|
||||
claim that you wrote the original software. If you use this software
|
||||
in a product, an acknowledgment in the product documentation would be
|
||||
appreciated but is not required.
|
||||
2. Altered source versions must be plainly marked as such, and must not be
|
||||
misrepresented as being the original software.
|
||||
3. This notice may not be removed or altered from any source distribution.
|
||||
|
||||
Jean-loup Gailly Mark Adler
|
||||
jloup@gzip.org madler@alumni.caltech.edu
|
||||
|
||||
*/
|
||||
|
||||
// largest prime smaller than 65536
|
||||
const BASE: u32 = 65521;
|
||||
|
||||
// NMAX is the largest n such that 255n(n+1)/2 + (n+1)(BASE-1) <= 2^32-1
|
||||
const NMAX: usize = 5552;
|
||||
|
||||
#[inline(always)]
|
||||
fn do1(adler: &mut u32, sum2: &mut u32, buf: &[u8]) {
|
||||
*adler += u32::from(buf[0]);
|
||||
*sum2 += *adler;
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn do2(adler: &mut u32, sum2: &mut u32, buf: &[u8]) {
|
||||
do1(adler, sum2, &buf[0..1]);
|
||||
do1(adler, sum2, &buf[1..2]);
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn do4(adler: &mut u32, sum2: &mut u32, buf: &[u8]) {
|
||||
do2(adler, sum2, &buf[0..2]);
|
||||
do2(adler, sum2, &buf[2..4]);
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn do8(adler: &mut u32, sum2: &mut u32, buf: &[u8]) {
|
||||
do4(adler, sum2, &buf[0..4]);
|
||||
do4(adler, sum2, &buf[4..8]);
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn do16(adler: &mut u32, sum2: &mut u32, buf: &[u8]) {
|
||||
do8(adler, sum2, &buf[0..8]);
|
||||
do8(adler, sum2, &buf[8..16]);
|
||||
}
|
||||
|
||||
/// A rolling version of the Adler32 hash, which can 'forget' past bytes.
|
||||
///
|
||||
/// Calling remove() will update the hash to the value it would have if that
|
||||
/// past byte had never been fed to the algorithm. This allows you to get the
|
||||
/// hash of a rolling window very efficiently.
|
||||
pub struct RollingAdler32 {
|
||||
a: u32,
|
||||
b: u32,
|
||||
}
|
||||
|
||||
impl Default for RollingAdler32 {
|
||||
fn default() -> RollingAdler32 {
|
||||
RollingAdler32::new()
|
||||
}
|
||||
}
|
||||
|
||||
impl RollingAdler32 {
|
||||
/// Creates an empty Adler32 context (with hash 1).
|
||||
pub fn new() -> RollingAdler32 {
|
||||
Self::from_value(1)
|
||||
}
|
||||
|
||||
/// Creates an Adler32 context with the given initial value.
|
||||
pub fn from_value(adler32: u32) -> RollingAdler32 {
|
||||
let a = adler32 & 0xFFFF;
|
||||
let b = adler32 >> 16;
|
||||
RollingAdler32 { a, b }
|
||||
}
|
||||
|
||||
/// Convenience function initializing a context from the hash of a buffer.
|
||||
pub fn from_buffer(buffer: &[u8]) -> RollingAdler32 {
|
||||
let mut hash = RollingAdler32::new();
|
||||
hash.update_buffer(buffer);
|
||||
hash
|
||||
}
|
||||
|
||||
/// Returns the current hash.
|
||||
pub fn hash(&self) -> u32 {
|
||||
(self.b << 16) | self.a
|
||||
}
|
||||
|
||||
/// Removes the given `byte` that was fed to the algorithm `size` bytes ago.
|
||||
pub fn remove(&mut self, size: usize, byte: u8) {
|
||||
let byte = u32::from(byte);
|
||||
self.a = (self.a + BASE - byte) % BASE;
|
||||
self.b = ((self.b + BASE - 1)
|
||||
.wrapping_add(BASE.wrapping_sub(size as u32)
|
||||
.wrapping_mul(byte))) % BASE;
|
||||
}
|
||||
|
||||
/// Feeds a new `byte` to the algorithm to update the hash.
|
||||
pub fn update(&mut self, byte: u8) {
|
||||
let byte = u32::from(byte);
|
||||
self.a = (self.a + byte) % BASE;
|
||||
self.b = (self.b + self.a) % BASE;
|
||||
}
|
||||
|
||||
/// Feeds a vector of bytes to the algorithm to update the hash.
|
||||
pub fn update_buffer(&mut self, buffer: &[u8]) {
|
||||
let len = buffer.len();
|
||||
|
||||
// in case user likes doing a byte at a time, keep it fast
|
||||
if len == 1 {
|
||||
self.update(buffer[0]);
|
||||
return;
|
||||
}
|
||||
|
||||
// in case short lengths are provided, keep it somewhat fast
|
||||
if len < 16 {
|
||||
for byte in buffer.iter().take(len) {
|
||||
self.a += u32::from(*byte);
|
||||
self.b += self.a;
|
||||
}
|
||||
if self.a >= BASE {
|
||||
self.a -= BASE;
|
||||
}
|
||||
self.b %= BASE;
|
||||
return;
|
||||
}
|
||||
|
||||
let mut pos = 0;
|
||||
|
||||
// do length NMAX blocks -- requires just one modulo operation;
|
||||
while pos + NMAX <= len {
|
||||
let end = pos + NMAX;
|
||||
while pos < end {
|
||||
// 16 sums unrolled
|
||||
do16(&mut self.a, &mut self.b, &buffer[pos..pos + 16]);
|
||||
pos += 16;
|
||||
}
|
||||
self.a %= BASE;
|
||||
self.b %= BASE;
|
||||
}
|
||||
|
||||
// do remaining bytes (less than NMAX, still just one modulo)
|
||||
if pos < len { // avoid modulos if none remaining
|
||||
while len - pos >= 16 {
|
||||
do16(&mut self.a, &mut self.b, &buffer[pos..pos + 16]);
|
||||
pos += 16;
|
||||
}
|
||||
while len - pos > 0 {
|
||||
self.a += u32::from(buffer[pos]);
|
||||
self.b += self.a;
|
||||
pos += 1;
|
||||
}
|
||||
self.a %= BASE;
|
||||
self.b %= BASE;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Consume a Read object and returns the Adler32 hash.
|
||||
pub fn adler32<R: io::Read>(mut reader: R) -> io::Result<u32> {
|
||||
let mut hash = RollingAdler32::new();
|
||||
let mut buffer = [0u8; NMAX];
|
||||
let mut read = try!(reader.read(&mut buffer));
|
||||
while read > 0 {
|
||||
hash.update_buffer(&buffer[..read]);
|
||||
read = try!(reader.read(&mut buffer));
|
||||
}
|
||||
Ok(hash.hash())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use rand;
|
||||
use rand::Rng;
|
||||
use std::io;
|
||||
|
||||
use super::{BASE, adler32, RollingAdler32};
|
||||
|
||||
fn adler32_slow<R: io::Read>(reader: R) -> io::Result<u32> {
|
||||
let mut a: u32 = 1;
|
||||
let mut b: u32 = 0;
|
||||
|
||||
for byte in reader.bytes() {
|
||||
let byte = try!(byte) as u32;
|
||||
a = (a + byte) % BASE;
|
||||
b = (b + a) % BASE;
|
||||
}
|
||||
|
||||
Ok((b << 16) | a)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn testvectors() {
|
||||
fn do_test(v: u32, bytes: &[u8]) {
|
||||
let mut hash = RollingAdler32::new();
|
||||
hash.update_buffer(&bytes);
|
||||
assert_eq!(hash.hash(), v);
|
||||
|
||||
let r = io::Cursor::new(bytes);
|
||||
assert_eq!(adler32(r).unwrap(), v);
|
||||
}
|
||||
do_test(0x00000001, b"");
|
||||
do_test(0x00620062, b"a");
|
||||
do_test(0x024d0127, b"abc");
|
||||
do_test(0x29750586, b"message digest");
|
||||
do_test(0x90860b20, b"abcdefghijklmnopqrstuvwxyz");
|
||||
do_test(0x8adb150c, b"ABCDEFGHIJKLMNOPQRSTUVWXYZ\
|
||||
abcdefghijklmnopqrstuvwxyz\
|
||||
0123456789");
|
||||
do_test(0x97b61069, b"1234567890123456789012345678901234567890\
|
||||
1234567890123456789012345678901234567890");
|
||||
do_test(0xD6251498, &[255; 64000]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn compare() {
|
||||
let mut rng = rand::thread_rng();
|
||||
let mut data = vec![0u8; 5589];
|
||||
for size in [0, 1, 3, 4, 5, 31, 32, 33, 67,
|
||||
5550, 5552, 5553, 5568, 5584, 5589].iter().cloned() {
|
||||
rng.fill_bytes(&mut data[..size]);
|
||||
let r1 = io::Cursor::new(&data[..size]);
|
||||
let r2 = r1.clone();
|
||||
if adler32_slow(r1).unwrap() != adler32(r2).unwrap() {
|
||||
panic!("Comparison failed, size={}", size);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn rolling() {
|
||||
assert_eq!(RollingAdler32::from_value(0x01020304).hash(), 0x01020304);
|
||||
|
||||
fn do_test(a: &[u8], b: &[u8]) {
|
||||
let mut total = Vec::with_capacity(a.len() + b.len());
|
||||
total.extend(a);
|
||||
total.extend(b);
|
||||
let mut h = RollingAdler32::from_buffer(&total[..(b.len())]);
|
||||
for i in 0..(a.len()) {
|
||||
h.remove(b.len(), a[i]);
|
||||
h.update(total[b.len() + i]);
|
||||
}
|
||||
assert_eq!(h.hash(), adler32(b).unwrap());
|
||||
}
|
||||
do_test(b"a", b"b");
|
||||
do_test(b"", b"this a test");
|
||||
do_test(b"th", b"is a test");
|
||||
do_test(b"this a ", b"test");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn long_window_remove() {
|
||||
let mut hash = RollingAdler32::new();
|
||||
let w = 65536;
|
||||
assert!(w as u32 > BASE);
|
||||
|
||||
let mut bytes = vec![0; w*3];
|
||||
for (i, b) in bytes.iter_mut().enumerate() {
|
||||
*b = i as u8;
|
||||
}
|
||||
|
||||
for (i, b) in bytes.iter().enumerate() {
|
||||
if i >= w {
|
||||
hash.remove(w, bytes[i - w]);
|
||||
}
|
||||
hash.update(*b);
|
||||
if i > 0 && i % w == 0 {
|
||||
assert_eq!(hash.hash(), 0x433a8772);
|
||||
}
|
||||
}
|
||||
assert_eq!(hash.hash(), 0xbbba8772);
|
||||
}
|
||||
}
|
|
@ -0,0 +1 @@
|
|||
{"files":{"Cargo.toml":"671c72410a736a19fa60d743233baa254c1588a443d9847e25edb3c7e04f2829","LICENSE-ZLIB.md":"84b34dd7608f7fb9b17bd588a6bf392bf7de504e2716f024a77d89f1b145a151","README.md":"4a27f4dcc0e3fbd2b6c4495d310e9179fb7fa4b77a3504821442f769ff4841ba","appveyor.yml":"09c69d96f1d6298a909d514e5a4fcdf0562be65619bdd2cdb966041ade217ef3","bors.toml":"1d8a7a56c5c76925a3daa8c50a40cc82cbfc638f521f864106bd60b1e8a219a2","changelog.md":"cff011496dbe7d4ca419b079e990f568354c0ffc230101486a08261d57c2da8a","pedantic.bat":"afd79f32caf7dc86e0390838992030decc5024c1348c86eb1c519c9c832bfe5e","rustfmt.toml":"1717bca34bc413693c82b6c50d633be8023545fa9a387b2da817ae848e2f1fc1","scripts/travis.sh":"a57fcf5ece149dd6da26481ebb429f359ccebd733a73de2e87f451371302142b","src/allocation.rs":"5bee031d7a2e4e7201543b0a4181c4f95e461049b045d3a3ab489819677847d9","src/contiguous.rs":"288aa77eca807f47d28c4372f6eb3fd87d885dcaf886fb725c10fdbaf1fd27d0","src/lib.rs":"32baa9a75add0916856e25fa37b3f5082c319d20f523e09d747b0b46333f3e0a","src/offset_of.rs":"aa89eb88ab3acd5694936e9bc922de5d0923e991afe732803946e4b66d7f2ef2","src/pod.rs":"b64399dac0d0dcc6179b4da48c02a15dee881afe858d27aed58253775016f4da","src/transparent.rs":"7d72eaa199c8b8656df324e7a846eb5589cb848080ecb4a75cbbef3b284ee46b","src/zeroable.rs":"c1ab8a5b9af7094fa710338529ee31588e616e2f954db1df0c98b15bbd1a18f6","tests/cast_slice_tests.rs":"de4a5879b0ef74df96ffe04412d7da49364725812e8ba1770e43867d58d8952c","tests/doc_tests.rs":"0008789fc7281f581c8c91eac13ea4683f82cdeadadc4119c7b21b38f7d41577","tests/std_tests.rs":"69661f26dc385c38d6c2bd37a62ba476e81ef88b4ed6565f3a47dd173133365c"},"package":"37fa13df2292ecb479ec23aa06f4507928bef07839be9ef15281411076629431"}
|
|
@ -0,0 +1,45 @@
|
|||
"""
|
||||
cargo-raze crate build file.
|
||||
|
||||
DO NOT EDIT! Replaced on runs of cargo-raze
|
||||
"""
|
||||
package(default_visibility = [
|
||||
# Public for visibility by "@raze__crate__version//" targets.
|
||||
#
|
||||
# Prefer access through "//third_party/cargo", which limits external
|
||||
# visibility to explicit Cargo.toml dependencies.
|
||||
"//visibility:public",
|
||||
])
|
||||
|
||||
licenses([
|
||||
"notice", # "Zlib"
|
||||
])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_rust//rust:rust.bzl",
|
||||
"rust_library",
|
||||
"rust_binary",
|
||||
"rust_test",
|
||||
)
|
||||
|
||||
|
||||
|
||||
rust_library(
|
||||
name = "bytemuck",
|
||||
crate_root = "src/lib.rs",
|
||||
crate_type = "lib",
|
||||
edition = "2018",
|
||||
srcs = glob(["**/*.rs"]),
|
||||
deps = [
|
||||
],
|
||||
rustc_flags = [
|
||||
"--cap-lints=allow",
|
||||
],
|
||||
version = "1.2.0",
|
||||
crate_features = [
|
||||
],
|
||||
)
|
||||
|
||||
# Unsupported target "cast_slice_tests" with type "test" omitted
|
||||
# Unsupported target "doc_tests" with type "test" omitted
|
||||
# Unsupported target "std_tests" with type "test" omitted
|
|
@ -0,0 +1,33 @@
|
|||
# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
|
||||
#
|
||||
# When uploading crates to the registry Cargo will automatically
|
||||
# "normalize" Cargo.toml files for maximal compatibility
|
||||
# with all versions of Cargo and also rewrite `path` dependencies
|
||||
# to registry (e.g., crates.io) dependencies
|
||||
#
|
||||
# If you believe there's an error in this file please file an
|
||||
# issue against the rust-lang/cargo repository. If you're
|
||||
# editing this file be aware that the upstream Cargo.toml
|
||||
# will likely look very different (and much more reasonable)
|
||||
|
||||
[package]
|
||||
edition = "2018"
|
||||
name = "bytemuck"
|
||||
version = "1.2.0"
|
||||
authors = ["Lokathor <zefria@gmail.com>"]
|
||||
description = "A crate for mucking around with piles of bytes."
|
||||
readme = "README.md"
|
||||
keywords = ["transmute", "bytes", "casting"]
|
||||
categories = ["encoding", "no-std"]
|
||||
license = "Zlib"
|
||||
repository = "https://github.com/Lokathor/bytemuck"
|
||||
[package.metadata.docs.rs]
|
||||
all-features = true
|
||||
|
||||
[features]
|
||||
extern_crate_alloc = []
|
||||
[badges.appveyor]
|
||||
repository = "Lokathor/bytemuck"
|
||||
|
||||
[badges.travis-ci]
|
||||
repository = "Lokathor/bytemuck"
|
|
@ -0,0 +1,11 @@
|
|||
Copyright (c) 2019 Daniel "Lokathor" Gee.
|
||||
|
||||
This software is provided 'as-is', without any express or implied warranty. In no event will the authors be held liable for any damages arising from the use of this software.
|
||||
|
||||
Permission is granted to anyone to use this software for any purpose, including commercial applications, and to alter it and redistribute it freely, subject to the following restrictions:
|
||||
|
||||
1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required.
|
||||
|
||||
2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software.
|
||||
|
||||
3. This notice may not be removed or altered from any source distribution.
|
|
@ -0,0 +1,26 @@
|
|||
[![License:Zlib](https://img.shields.io/badge/License-Zlib-brightgreen.svg)](https://opensource.org/licenses/Zlib)
|
||||
![Minimum Rust Version](https://img.shields.io/badge/Min%20Rust-1.34-green.svg)
|
||||
[![travis.ci](https://travis-ci.org/Lokathor/bytemuck.svg?branch=master)](https://travis-ci.org/Lokathor/bytemuck)
|
||||
[![AppVeyor](https://ci.appveyor.com/api/projects/status/hgr4if0snmkmqj88/branch/master?svg=true)](https://ci.appveyor.com/project/Lokathor/bytemuck/branch/master)
|
||||
[![crates.io](https://img.shields.io/crates/v/bytemuck.svg)](https://crates.io/crates/bytemuck)
|
||||
[![docs.rs](https://docs.rs/bytemuck/badge.svg)](https://docs.rs/bytemuck/)
|
||||
|
||||
# bytemuck
|
||||
|
||||
A crate for mucking around with piles of bytes.
|
||||
|
||||
## Extensions
|
||||
|
||||
There is experimental support for the `Zeroable` trait being derived through a
|
||||
proc-macro. I'm not the author of that crate, please file bugs with that crate
|
||||
in the other repo.
|
||||
|
||||
* https://github.com/rodrimati1992/zeroable_crates
|
||||
|
||||
## Stability
|
||||
|
||||
The goal is to stay at 1.y.z until _at least_ the next edition of Rust.
|
||||
|
||||
I consider any increase of the Minimum Rust Version to be a semver breaking change,
|
||||
so `rustc-1.34` will continue to be supported for at least the rest of the
|
||||
`bytemuck-1.y.z` series of the crate.
|
|
@ -0,0 +1,45 @@
|
|||
|
||||
os: Visual Studio 2015
|
||||
|
||||
branches:
|
||||
only:
|
||||
- staging
|
||||
- trying
|
||||
- master
|
||||
- dev
|
||||
|
||||
matrix:
|
||||
fast_finish: true
|
||||
|
||||
environment:
|
||||
matrix:
|
||||
# Stable
|
||||
- channel: 1.34.0
|
||||
target: i686-pc-windows-msvc
|
||||
- channel: 1.34.0
|
||||
target: i686-pc-windows-gnu
|
||||
- channel: 1.34.0
|
||||
target: x86_64-pc-windows-msvc
|
||||
- channel: 1.34.0
|
||||
target: x86_64-pc-windows-gnu
|
||||
# Beta and Nightly are checked by TravisCI since builds there run in
|
||||
# parallel.
|
||||
|
||||
install:
|
||||
- appveyor DownloadFile https://win.rustup.rs/ -FileName rustup-init.exe
|
||||
- rustup-init -y --default-toolchain %channel% --default-host %target%
|
||||
- set PATH=%PATH%;%USERPROFILE%\.cargo\bin
|
||||
- rustup component add rustfmt
|
||||
- rustup component add clippy
|
||||
- rustc -vV
|
||||
- cargo -vV
|
||||
|
||||
# On advice of retep we skip the "build" script phase
|
||||
build: false
|
||||
|
||||
test_script:
|
||||
- cargo fmt -- --check
|
||||
- cargo clippy
|
||||
- cargo test --no-default-features
|
||||
- cargo test
|
||||
#- cargo test --all-features
|
|
@ -0,0 +1 @@
|
|||
status = ["continuous-integration/travis-ci/push"]
|
|
@ -0,0 +1,25 @@
|
|||
# `bytemuck` changelog
|
||||
|
||||
## 1.2.0
|
||||
|
||||
* [thomcc](https://github.com/thomcc) added many things:
|
||||
* A fully sound `offset_of!` macro [#10](https://github.com/Lokathor/bytemuck/pull/10)
|
||||
* A `Contiguous` trait for when you've got enums with declared values
|
||||
all in a row [#12](https://github.com/Lokathor/bytemuck/pull/12)
|
||||
* A `TransparentWrapper` marker trait for when you want to more clearly
|
||||
enable adding and removing a wrapper struct to its inner value
|
||||
[#15](https://github.com/Lokathor/bytemuck/pull/15)
|
||||
* Now MIRI is run on CI in every sigle push!
|
||||
[#16](https://github.com/Lokathor/bytemuck/pull/16)
|
||||
|
||||
## 1.1.0
|
||||
|
||||
* [SimonSapin](https://github.com/SimonSapin) added `from_bytes`,
|
||||
`from_bytes_mut`, `try_from_bytes`, and `try_from_bytes_mut` ([PR
|
||||
Link](https://github.com/Lokathor/bytemuck/pull/8))
|
||||
|
||||
## 1.0.1
|
||||
|
||||
* Changed to the [zlib](https://opensource.org/licenses/Zlib) license.
|
||||
* Added much more proper documentation.
|
||||
* Reduced the minimum Rust version to 1.34
|
|
@ -0,0 +1 @@
|
|||
cargo clippy -- -W clippy::pedantic
|
|
@ -0,0 +1,8 @@
|
|||
error_on_line_overflow = false
|
||||
merge_imports = true
|
||||
reorder_imports = true
|
||||
use_try_shorthand = true
|
||||
tab_spaces = 2
|
||||
max_width = 80
|
||||
color = "Never"
|
||||
use_small_heuristics = "Max"
|
|
@ -0,0 +1,77 @@
|
|||
#!/bin/bash
|
||||
|
||||
set -e
|
||||
|
||||
if [[ "$RUN_MIRI" != "" ]]; then
|
||||
|
||||
cargo clean
|
||||
|
||||
# Install and run the latest version of nightly where miri built successfully.
|
||||
# Taken from: https://github.com/rust-lang/miri#running-miri-on-ci
|
||||
|
||||
MIRI_NIGHTLY=nightly-$(curl -s https://rust-lang.github.io/rustup-components-history/x86_64-unknown-linux-gnu/miri)
|
||||
echo "Installing latest nightly with Miri: $MIRI_NIGHTLY"
|
||||
rustup set profile minimal
|
||||
rustup default "$MIRI_NIGHTLY"
|
||||
|
||||
rustup component add miri
|
||||
cargo miri setup
|
||||
|
||||
cargo miri test --verbose
|
||||
cargo miri test --verbose --no-default-features
|
||||
cargo miri test --verbose --all-features
|
||||
|
||||
else
|
||||
|
||||
rustup component add clippy
|
||||
cargo clippy
|
||||
|
||||
if [[ "$TARGET" != "" ]]; then rustup target install $TARGET; fi
|
||||
|
||||
if [[ "$TARGET" == "wasm32-"* && "$TARGET" != "wasm32-wasi" ]]; then
|
||||
cargo-web --version || cargo install cargo-web
|
||||
cargo web test --no-default-features $FLAGS --target=$TARGET
|
||||
cargo web test $FLAGS --target=$TARGET
|
||||
#cargo web test --all-features $FLAGS --target=$TARGET
|
||||
|
||||
elif [[ "$TARGET" == *"-linux-android"* ]]; then
|
||||
export PATH=/usr/local/android-sdk/ndk-bundle/toolchains/llvm/prebuilt/linux-x86_64/bin:$PATH
|
||||
pushd linux-android
|
||||
cargo build --no-default-features --target=$TARGET $FLAGS
|
||||
cargo build --target=$TARGET $FLAGS
|
||||
#cargo build --all-features --target=$TARGET $FLAGS
|
||||
# Don't test, can't run android emulators successfully on travis currently
|
||||
popd
|
||||
|
||||
elif [[ "$TARGET" == *"-apple-ios" || "$TARGET" == "wasm32-wasi" ]]; then
|
||||
cargo build --no-default-features --target=$TARGET $FLAGS
|
||||
cargo build --target=$TARGET $FLAGS
|
||||
#cargo build --all-features --target=$TARGET $FLAGS
|
||||
# Don't test
|
||||
# iOS simulator setup/teardown is complicated
|
||||
# cargo-web doesn't support wasm32-wasi yet, nor can wasm-pack test specify a target
|
||||
|
||||
elif [[ "$TARGET" == *"-unknown-linux-gnueabihf" ]]; then
|
||||
#sudo apt-get update
|
||||
#sudo apt-get install -y gcc-arm-linux-gnueabihf g++-arm-linux-gnueabihf
|
||||
pushd generic-cross
|
||||
cargo build --no-default-features --target=$TARGET $FLAGS
|
||||
cargo build --target=$TARGET $FLAGS
|
||||
#cargo build --all-features --target=$TARGET $FLAGS
|
||||
# Don't test
|
||||
popd
|
||||
|
||||
elif [[ "$TARGET" != "" ]]; then
|
||||
pushd generic-cross
|
||||
cargo test --no-default-features --target=$TARGET $FLAGS
|
||||
cargo test --target=$TARGET $FLAGS
|
||||
#cargo test --all-features --target=$TARGET $FLAGS
|
||||
popd
|
||||
|
||||
else
|
||||
# Push nothing, target host CPU architecture
|
||||
cargo test --no-default-features $FLAGS
|
||||
cargo test $FLAGS
|
||||
fi
|
||||
|
||||
fi
|
|
@ -0,0 +1,119 @@
|
|||
//! Stuff to boost things in the `alloc` crate.
|
||||
//!
|
||||
//! * You must enable the `extern_crate_alloc` feature of `bytemuck` or you will
|
||||
//! not be able to use this module!
|
||||
|
||||
use super::*;
|
||||
use alloc::{
|
||||
alloc::{alloc_zeroed, Layout},
|
||||
boxed::Box,
|
||||
vec::Vec,
|
||||
};
|
||||
|
||||
/// As [`try_cast_box`](try_cast_box), but unwraps for you.
|
||||
#[inline]
|
||||
pub fn cast_box<A: Pod, B: Pod>(input: Box<A>) -> Box<B> {
|
||||
try_cast_box(input).map_err(|(e, _v)| e).unwrap()
|
||||
}
|
||||
|
||||
/// Attempts to cast the content type of a [`Box`](alloc::boxed::Box).
|
||||
///
|
||||
/// On failure you get back an error along with the starting `Box`.
|
||||
///
|
||||
/// ## Failure
|
||||
///
|
||||
/// * The start and end content type of the `Box` must have the exact same
|
||||
/// alignment.
|
||||
/// * The start and end size of the `Box` must have the exact same size.
|
||||
#[inline]
|
||||
pub fn try_cast_box<A: Pod, B: Pod>(
|
||||
input: Box<A>,
|
||||
) -> Result<Box<B>, (PodCastError, Box<A>)> {
|
||||
if align_of::<A>() != align_of::<B>() {
|
||||
Err((PodCastError::AlignmentMismatch, input))
|
||||
} else if size_of::<A>() != size_of::<B>() {
|
||||
Err((PodCastError::SizeMismatch, input))
|
||||
} else {
|
||||
// Note(Lokathor): This is much simpler than with the Vec casting!
|
||||
let ptr: *mut B = Box::into_raw(input) as *mut B;
|
||||
Ok(unsafe { Box::from_raw(ptr) })
|
||||
}
|
||||
}
|
||||
|
||||
/// Allocates a `Box<T>` with all of the contents being zeroed out.
|
||||
///
|
||||
/// This uses the global allocator to create a zeroed allocation and _then_
|
||||
/// turns it into a Box. In other words, it's 100% assured that the zeroed data
|
||||
/// won't be put temporarily on the stack. You can make a box of any size
|
||||
/// without fear of a stack overflow.
|
||||
///
|
||||
/// ## Failure
|
||||
///
|
||||
/// This fails if the allocation fails.
|
||||
#[inline]
|
||||
pub fn try_zeroed_box<T: Zeroable>() -> Result<Box<T>, ()> {
|
||||
if size_of::<T>() == 0 {
|
||||
return Ok(Box::new(T::zeroed()));
|
||||
}
|
||||
let layout =
|
||||
Layout::from_size_align(size_of::<T>(), align_of::<T>()).unwrap();
|
||||
let ptr = unsafe { alloc_zeroed(layout) };
|
||||
if ptr.is_null() {
|
||||
// we don't know what the error is because `alloc_zeroed` is a dumb API
|
||||
Err(())
|
||||
} else {
|
||||
Ok(unsafe { Box::<T>::from_raw(ptr as *mut T) })
|
||||
}
|
||||
}
|
||||
|
||||
/// As [`try_zeroed_box`], but unwraps for you.
|
||||
#[inline]
|
||||
pub fn zeroed_box<T: Zeroable>() -> Box<T> {
|
||||
try_zeroed_box().unwrap()
|
||||
}
|
||||
|
||||
/// As [`try_cast_vec`](try_cast_vec), but unwraps for you.
|
||||
#[inline]
|
||||
pub fn cast_vec<A: Pod, B: Pod>(input: Vec<A>) -> Vec<B> {
|
||||
try_cast_vec(input).map_err(|(e, _v)| e).unwrap()
|
||||
}
|
||||
|
||||
/// Attempts to cast the content type of a [`Vec`](alloc::vec::Vec).
|
||||
///
|
||||
/// On failure you get back an error along with the starting `Vec`.
|
||||
///
|
||||
/// ## Failure
|
||||
///
|
||||
/// * The start and end content type of the `Vec` must have the exact same
|
||||
/// alignment.
|
||||
/// * The start and end size of the `Vec` must have the exact same size.
|
||||
/// * In the future this second restriction might be lessened by having the
|
||||
/// capacity and length get adjusted during transmutation, but for now it's
|
||||
/// absolute.
|
||||
#[inline]
|
||||
pub fn try_cast_vec<A: Pod, B: Pod>(
|
||||
input: Vec<A>,
|
||||
) -> Result<Vec<B>, (PodCastError, Vec<A>)> {
|
||||
if align_of::<A>() != align_of::<B>() {
|
||||
Err((PodCastError::AlignmentMismatch, input))
|
||||
} else if size_of::<A>() != size_of::<B>() {
|
||||
// Note(Lokathor): Under some conditions it would be possible to cast
|
||||
// between Vec content types of the same alignment but different sizes by
|
||||
// changing the capacity and len values in the output Vec. However, we will
|
||||
// not attempt that for now.
|
||||
Err((PodCastError::SizeMismatch, input))
|
||||
} else {
|
||||
// Note(Lokathor): First we record the length and capacity, which don't have
|
||||
// any secret provenance metadata.
|
||||
let length: usize = input.len();
|
||||
let capacity: usize = input.capacity();
|
||||
// Note(Lokathor): Next we "pre-forget" the old Vec by wrapping with
|
||||
// ManuallyDrop, because if we used `core::mem::forget` after taking the
|
||||
// pointer then that would invalidate our pointer. In nightly there's a
|
||||
// "into raw parts" method, which we can switch this too eventually.
|
||||
let mut manual_drop_vec = ManuallyDrop::new(input);
|
||||
let vec_ptr: *mut A = manual_drop_vec.as_mut_ptr();
|
||||
let ptr: *mut B = vec_ptr as *mut B;
|
||||
Ok(unsafe { Vec::from_raw_parts(ptr, length, capacity) })
|
||||
}
|
||||
}
|
|
@ -0,0 +1,203 @@
|
|||
use super::*;
|
||||
use core::mem::{size_of, transmute_copy};
|
||||
|
||||
/// A trait indicating that:
|
||||
///
|
||||
/// 1. A type has an equivalent representation to some known integral type.
|
||||
/// 2. All instances of this type fall in a fixed range of values.
|
||||
/// 3. Within that range, there are no gaps.
|
||||
///
|
||||
/// This is generally useful for fieldless enums (aka "c-style" enums), however
|
||||
/// it's important that it only be used for those with an explicit `#[repr]`, as
|
||||
/// `#[repr(Rust)]` fieldess enums have an unspecified layout.
|
||||
///
|
||||
/// Additionally, you shouldn't assume that all implementations are enums. Any
|
||||
/// type which meets the requirements above while following the rules under
|
||||
/// "Safety" below is valid.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```
|
||||
/// # use bytemuck::Contiguous;
|
||||
/// #[repr(u8)]
|
||||
/// #[derive(Debug, Copy, Clone, PartialEq)]
|
||||
/// enum Foo {
|
||||
/// A = 0,
|
||||
/// B = 1,
|
||||
/// C = 2,
|
||||
/// D = 3,
|
||||
/// E = 4,
|
||||
/// }
|
||||
/// unsafe impl Contiguous for Foo {
|
||||
/// type Int = u8;
|
||||
/// const MIN_VALUE: u8 = Foo::A as u8;
|
||||
/// const MAX_VALUE: u8 = Foo::E as u8;
|
||||
/// }
|
||||
/// assert_eq!(Foo::from_integer(3).unwrap(), Foo::D);
|
||||
/// assert_eq!(Foo::from_integer(8), None);
|
||||
/// assert_eq!(Foo::C.into_integer(), 2);
|
||||
/// ```
|
||||
/// # Safety
|
||||
///
|
||||
/// This is an unsafe trait, and incorrectly implementing it is undefined
|
||||
/// behavior.
|
||||
///
|
||||
/// Informally, by implementing it, you're asserting that `C` is identical to
|
||||
/// the integral type `C::Int`, and that every `C` falls between `C::MIN_VALUE`
|
||||
/// and `C::MAX_VALUE` exactly once, without any gaps.
|
||||
///
|
||||
/// Precisely, the guarantees you must uphold when implementing `Contiguous` for
|
||||
/// some type `C` are:
|
||||
///
|
||||
/// 1. The size of `C` and `C::Int` must be the same, and neither may be a ZST.
|
||||
/// (Note: alignment is explicitly allowed to differ)
|
||||
///
|
||||
/// 2. `C::Int` must be a primitive integer, and not a wrapper type. In the
|
||||
/// future, this may be lifted to include cases where the behavior is
|
||||
/// identical for a relevant set of traits (Ord, arithmetic, ...).
|
||||
///
|
||||
/// 3. All `C::Int`s which are in the *inclusive* range between `C::MIN_VALUE`
|
||||
/// and `C::MAX_VALUE` are bitwise identical to unique valid instances of
|
||||
/// `C`.
|
||||
///
|
||||
/// 4. There exist no instances of `C` such that their bitpatterns, when
|
||||
/// interpreted as instances of `C::Int`, fall outside of the `MAX_VALUE` /
|
||||
/// `MIN_VALUE` range -- It is legal for unsafe code to assume that if it
|
||||
/// gets a `C` that implements `Contiguous`, it is in the appropriate range.
|
||||
///
|
||||
/// 5. Finally, you promise not to provide overridden implementations of
|
||||
/// `Contiguous::from_integer` and `Contiguous::into_integer`.
|
||||
///
|
||||
/// For clarity, the following rules could be derived from the above, but are
|
||||
/// listed explicitly:
|
||||
///
|
||||
/// - `C::MAX_VALUE` must be greater or equal to `C::MIN_VALUE` (therefore, `C`
|
||||
/// must be an inhabited type).
|
||||
///
|
||||
/// - There exist no two values between `MIN_VALUE` and `MAX_VALUE` such that
|
||||
/// when interpreted as a `C` they are considered identical (by, say, match).
|
||||
pub unsafe trait Contiguous: Copy + 'static {
|
||||
/// The primitive integer type with an identical representation to this
|
||||
/// type.
|
||||
///
|
||||
/// Contiguous is broadly intended for use with fieldless enums, and for
|
||||
/// these the correct integer type is easy: The enum should have a
|
||||
/// `#[repr(Int)]` or `#[repr(C)]` attribute, (if it does not, it is
|
||||
/// *unsound* to implement `Contiguous`!).
|
||||
///
|
||||
/// - For `#[repr(Int)]`, use the listed `Int`. e.g. `#[repr(u8)]` should
|
||||
/// use `type Int = u8`.
|
||||
///
|
||||
/// - For `#[repr(C)]`, use whichever type the C compiler will use to
|
||||
/// represent the given enum. This is usually `c_int` (from `std::os::raw`
|
||||
/// or `libc`), but it's up to you to make the determination as the
|
||||
/// implementer of the unsafe trait.
|
||||
///
|
||||
/// For precise rules, see the list under "Safety" above.
|
||||
type Int: Copy + Ord;
|
||||
|
||||
/// The upper *inclusive* bound for valid instances of this type.
|
||||
const MAX_VALUE: Self::Int;
|
||||
|
||||
/// The lower *inclusive* bound for valid instances of this type.
|
||||
const MIN_VALUE: Self::Int;
|
||||
|
||||
/// If `value` is within the range for valid instances of this type,
|
||||
/// returns `Some(converted_value)`, otherwise, returns `None`.
|
||||
///
|
||||
/// This is a trait method so that you can write `value.into_integer()` in
|
||||
/// your code. It is a contract of this trait that if you implement
|
||||
/// `Contiguous` on your type you **must not** override this method.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// We will not panic for any correct implementation of `Contiguous`, but
|
||||
/// *may* panic if we detect an incorrect one.
|
||||
///
|
||||
/// This is undefined behavior regardless, so it could have been the nasal
|
||||
/// demons at that point anyway ;).
|
||||
#[inline]
|
||||
fn from_integer(value: Self::Int) -> Option<Self> {
|
||||
// Guard against an illegal implementation of Contiguous. Annoyingly we
|
||||
// can't rely on `transmute` to do this for us (see below), but
|
||||
// whatever, this gets compiled into nothing in release.
|
||||
assert!(size_of::<Self>() == size_of::<Self::Int>());
|
||||
if Self::MIN_VALUE <= value && value <= Self::MAX_VALUE {
|
||||
// SAFETY: We've checked their bounds (and their size, even though
|
||||
// they've sworn under the Oath Of Unsafe Rust that that already
|
||||
// matched) so this is allowed by `Contiguous`'s unsafe contract.
|
||||
//
|
||||
// So, the `transmute_copy`. ideally we'd use transmute here, which
|
||||
// is more obviously safe. Sadly, we can't, as these types still
|
||||
// have unspecified sizes.
|
||||
Some(unsafe { transmute_copy::<Self::Int, Self>(&value) })
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
/// Perform the conversion from `C` into the underlying integral type. This
|
||||
/// mostly exists otherwise generic code would need unsafe for the `value as
|
||||
/// integer`
|
||||
///
|
||||
/// This is a trait method so that you can write `value.into_integer()` in
|
||||
/// your code. It is a contract of this trait that if you implement
|
||||
/// `Contiguous` on your type you **must not** override this method.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// We will not panic for any correct implementation of `Contiguous`, but
|
||||
/// *may* panic if we detect an incorrect one.
|
||||
///
|
||||
/// This is undefined behavior regardless, so it could have been the nasal
|
||||
/// demons at that point anyway ;).
|
||||
#[inline]
|
||||
fn into_integer(self) -> Self::Int {
|
||||
// Guard against an illegal implementation of Contiguous. Annoyingly we
|
||||
// can't rely on `transmute` to do the size check for us (see
|
||||
// `from_integer's comment`), but whatever, this gets compiled into
|
||||
// nothing in release. Note that we don't check the result of cast
|
||||
assert!(size_of::<Self>() == size_of::<Self::Int>());
|
||||
|
||||
// SAFETY: The unsafe contract requires that these have identical
|
||||
// representations, and that the range be entirely valid. Using
|
||||
// transmute_copy instead of transmute here is annoying, but is required
|
||||
// as `Self` and `Self::Int` have unspecified sizes still.
|
||||
unsafe { transmute_copy::<Self, Self::Int>(&self) }
|
||||
}
|
||||
}
|
||||
|
||||
macro_rules! impl_contiguous {
|
||||
($($src:ty as $repr:ident in [$min:expr, $max:expr];)*) => {$(
|
||||
unsafe impl Contiguous for $src {
|
||||
type Int = $repr;
|
||||
const MAX_VALUE: $repr = $max;
|
||||
const MIN_VALUE: $repr = $min;
|
||||
}
|
||||
)*};
|
||||
}
|
||||
|
||||
impl_contiguous! {
|
||||
bool as u8 in [0, 1];
|
||||
|
||||
u8 as u8 in [0, u8::max_value()];
|
||||
u16 as u16 in [0, u16::max_value()];
|
||||
u32 as u32 in [0, u32::max_value()];
|
||||
u64 as u64 in [0, u64::max_value()];
|
||||
u128 as u128 in [0, u128::max_value()];
|
||||
usize as usize in [0, usize::max_value()];
|
||||
|
||||
i8 as i8 in [i8::min_value(), i8::max_value()];
|
||||
i16 as i16 in [i16::min_value(), i16::max_value()];
|
||||
i32 as i32 in [i32::min_value(), i32::max_value()];
|
||||
i64 as i64 in [i64::min_value(), i64::max_value()];
|
||||
i128 as i128 in [i128::min_value(), i128::max_value()];
|
||||
isize as isize in [isize::min_value(), isize::max_value()];
|
||||
|
||||
NonZeroU8 as u8 in [1, u8::max_value()];
|
||||
NonZeroU16 as u16 in [1, u16::max_value()];
|
||||
NonZeroU32 as u32 in [1, u32::max_value()];
|
||||
NonZeroU64 as u64 in [1, u64::max_value()];
|
||||
NonZeroU128 as u128 in [1, u128::max_value()];
|
||||
NonZeroUsize as usize in [1, usize::max_value()];
|
||||
}
|
|
@ -0,0 +1,433 @@
|
|||
#![no_std]
|
||||
#![warn(missing_docs)]
|
||||
|
||||
//! This crate gives small utilities for casting between plain data types.
|
||||
//!
|
||||
//! ## Basics
|
||||
//!
|
||||
//! Data comes in five basic forms in Rust, so we have five basic casting
|
||||
//! functions:
|
||||
//!
|
||||
//! * `T` uses [`cast`]
|
||||
//! * `&T` uses [`cast_ref`]
|
||||
//! * `&mut T` uses [`cast_mut`]
|
||||
//! * `&[T]` uses [`cast_slice`]
|
||||
//! * `&mut [T]` uses [`cast_slice_mut`]
|
||||
//!
|
||||
//! Some casts will never fail (eg: `cast::<u32, f32>` always works), other
|
||||
//! casts might fail (eg: `cast_ref::<[u8; 4], u32>` will fail if the reference
|
||||
//! isn't already aligned to 4). Each casting function has a "try" version which
|
||||
//! will return a `Result`, and the "normal" version which will simply panic on
|
||||
//! invalid input.
|
||||
//!
|
||||
//! ## Using Your Own Types
|
||||
//!
|
||||
//! All the functions here are guarded by the [`Pod`] trait, which is a
|
||||
//! sub-trait of the [`Zeroable`] trait.
|
||||
//!
|
||||
//! If you're very sure that your type is eligible, you can implement those
|
||||
//! traits for your type and then they'll have full casting support. However,
|
||||
//! these traits are `unsafe`, and you should carefully read the requirements
|
||||
//! before adding the them to your own types.
|
||||
//!
|
||||
//! ## Features
|
||||
//!
|
||||
//! * This crate is core only by default, but if you're using Rust 1.36 or later
|
||||
//! you can enable the `extern_crate_alloc` cargo feature for some additional
|
||||
//! methods related to `Box` and `Vec`. Note that the `docs.rs` documentation
|
||||
//! is always built with `extern_crate_alloc` cargo feature enabled.
|
||||
|
||||
#[cfg(target_arch = "x86")]
|
||||
use core::arch::x86;
|
||||
#[cfg(target_arch = "x86_64")]
|
||||
use core::arch::x86_64;
|
||||
//
|
||||
use core::{marker::*, mem::*, num::*, ptr::*};
|
||||
|
||||
// Used from macros to ensure we aren't using some locally defined name and
|
||||
// actually are referencing libcore. This also would allow pre-2018 edition
|
||||
// crates to use our macros, but I'm not sure how important that is.
|
||||
#[doc(hidden)]
|
||||
pub use ::core as __core;
|
||||
|
||||
macro_rules! impl_unsafe_marker_for_array {
|
||||
( $marker:ident , $( $n:expr ),* ) => {
|
||||
$(unsafe impl<T> $marker for [T; $n] where T: $marker {})*
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "extern_crate_alloc")]
|
||||
extern crate alloc;
|
||||
#[cfg(feature = "extern_crate_alloc")]
|
||||
pub mod allocation;
|
||||
#[cfg(feature = "extern_crate_alloc")]
|
||||
pub use allocation::*;
|
||||
|
||||
mod zeroable;
|
||||
pub use zeroable::*;
|
||||
|
||||
mod pod;
|
||||
pub use pod::*;
|
||||
|
||||
mod contiguous;
|
||||
pub use contiguous::*;
|
||||
|
||||
mod offset_of;
|
||||
pub use offset_of::*;
|
||||
|
||||
mod transparent;
|
||||
pub use transparent::*;
|
||||
|
||||
/*
|
||||
|
||||
Note(Lokathor): We've switched all of the `unwrap` to `match` because there is
|
||||
apparently a bug: https://github.com/rust-lang/rust/issues/68667
|
||||
and it doesn't seem to show up in simple godbolt examples but has been reported
|
||||
as having an impact when there's a cast mixed in with other more complicated
|
||||
code around it. Rustc/LLVM ends up missing that the `Err` can't ever happen for
|
||||
particular type combinations, and then it doesn't fully eliminated the panic
|
||||
possibility code branch.
|
||||
|
||||
*/
|
||||
|
||||
/// Immediately panics.
|
||||
#[cold]
|
||||
#[inline(never)]
|
||||
fn something_went_wrong(src: &str, err: PodCastError) -> ! {
|
||||
// Note(Lokathor): Keeping the panic here makes the panic _formatting_ go
|
||||
// here too, which helps assembly readability and also helps keep down
|
||||
// the inline pressure.
|
||||
panic!("{src}>{err:?}", src = src, err = err)
|
||||
}
|
||||
|
||||
/// Re-interprets `&T` as `&[u8]`.
|
||||
///
|
||||
/// Any ZST becomes an empty slice, and in that case the pointer value of that
|
||||
/// empty slice might not match the pointer value of the input reference.
|
||||
#[inline]
|
||||
pub fn bytes_of<T: Pod>(t: &T) -> &[u8] {
|
||||
match try_cast_slice::<T, u8>(core::slice::from_ref(t)) {
|
||||
Ok(s) => s,
|
||||
Err(_) => unreachable!(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Re-interprets `&mut T` as `&mut [u8]`.
|
||||
///
|
||||
/// Any ZST becomes an empty slice, and in that case the pointer value of that
|
||||
/// empty slice might not match the pointer value of the input reference.
|
||||
#[inline]
|
||||
pub fn bytes_of_mut<T: Pod>(t: &mut T) -> &mut [u8] {
|
||||
match try_cast_slice_mut::<T, u8>(core::slice::from_mut(t)) {
|
||||
Ok(s) => s,
|
||||
Err(_) => unreachable!(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Re-interprets `&[u8]` as `&T`.
|
||||
///
|
||||
/// ## Panics
|
||||
///
|
||||
/// This is [`try_from_bytes`] but will panic on error.
|
||||
#[inline]
|
||||
pub fn from_bytes<T: Pod>(s: &[u8]) -> &T {
|
||||
match try_from_bytes(s) {
|
||||
Ok(t) => t,
|
||||
Err(e) => something_went_wrong("from_bytes", e),
|
||||
}
|
||||
}
|
||||
|
||||
/// Re-interprets `&mut [u8]` as `&mut T`.
|
||||
///
|
||||
/// ## Panics
|
||||
///
|
||||
/// This is [`try_from_bytes_mut`] but will panic on error.
|
||||
#[inline]
|
||||
pub fn from_bytes_mut<T: Pod>(s: &mut [u8]) -> &mut T {
|
||||
match try_from_bytes_mut(s) {
|
||||
Ok(t) => t,
|
||||
Err(e) => something_went_wrong("from_bytes_mut", e),
|
||||
}
|
||||
}
|
||||
|
||||
/// Re-interprets `&[u8]` as `&T`.
|
||||
///
|
||||
/// ## Failure
|
||||
///
|
||||
/// * If the slice isn't aligned for the new type
|
||||
/// * If the slice's length isn’t exactly the size of the new type
|
||||
#[inline]
|
||||
pub fn try_from_bytes<T: Pod>(s: &[u8]) -> Result<&T, PodCastError> {
|
||||
if s.len() != size_of::<T>() {
|
||||
Err(PodCastError::SizeMismatch)
|
||||
} else if (s.as_ptr() as usize) % align_of::<T>() != 0 {
|
||||
Err(PodCastError::AlignmentMismatch)
|
||||
} else {
|
||||
Ok(unsafe { &*(s.as_ptr() as *const T) })
|
||||
}
|
||||
}
|
||||
|
||||
/// Re-interprets `&mut [u8]` as `&mut T`.
|
||||
///
|
||||
/// ## Failure
|
||||
///
|
||||
/// * If the slice isn't aligned for the new type
|
||||
/// * If the slice's length isn’t exactly the size of the new type
|
||||
#[inline]
|
||||
pub fn try_from_bytes_mut<T: Pod>(
|
||||
s: &mut [u8],
|
||||
) -> Result<&mut T, PodCastError> {
|
||||
if s.len() != size_of::<T>() {
|
||||
Err(PodCastError::SizeMismatch)
|
||||
} else if (s.as_ptr() as usize) % align_of::<T>() != 0 {
|
||||
Err(PodCastError::AlignmentMismatch)
|
||||
} else {
|
||||
Ok(unsafe { &mut *(s.as_mut_ptr() as *mut T) })
|
||||
}
|
||||
}
|
||||
|
||||
/// The things that can go wrong when casting between [`Pod`] data forms.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub enum PodCastError {
|
||||
/// You tried to cast a slice to an element type with a higher alignment
|
||||
/// requirement but the slice wasn't aligned.
|
||||
TargetAlignmentGreaterAndInputNotAligned,
|
||||
/// If the element size changes then the output slice changes length
|
||||
/// accordingly. If the output slice wouldn't be a whole number of elements
|
||||
/// then the conversion fails.
|
||||
OutputSliceWouldHaveSlop,
|
||||
/// When casting a slice you can't convert between ZST elements and non-ZST
|
||||
/// elements. When casting an individual `T`, `&T`, or `&mut T` value the
|
||||
/// source size and destination size must be an exact match.
|
||||
SizeMismatch,
|
||||
/// For this type of cast the alignments must be exactly the same and they
|
||||
/// were not so now you're sad.
|
||||
AlignmentMismatch,
|
||||
}
|
||||
|
||||
/// Cast `T` into `U`
|
||||
///
|
||||
/// ## Panics
|
||||
///
|
||||
/// This is [`try_cast`] but will panic on error.
|
||||
#[inline]
|
||||
pub fn cast<A: Pod, B: Pod>(a: A) -> B {
|
||||
if size_of::<A>() == size_of::<B>() {
|
||||
// Plz mr compiler, just notice that we can't ever hit Err in this case.
|
||||
match try_cast(a) {
|
||||
Ok(b) => b,
|
||||
Err(_) => unreachable!(),
|
||||
}
|
||||
} else {
|
||||
match try_cast(a) {
|
||||
Ok(b) => b,
|
||||
Err(e) => something_went_wrong("cast", e),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Cast `&mut T` into `&mut U`.
|
||||
///
|
||||
/// ## Panics
|
||||
///
|
||||
/// This is [`try_cast_mut`] but will panic on error.
|
||||
#[inline]
|
||||
pub fn cast_mut<A: Pod, B: Pod>(a: &mut A) -> &mut B {
|
||||
if size_of::<A>() == size_of::<B>() && align_of::<A>() >= align_of::<B>() {
|
||||
// Plz mr compiler, just notice that we can't ever hit Err in this case.
|
||||
match try_cast_mut(a) {
|
||||
Ok(b) => b,
|
||||
Err(_) => unreachable!(),
|
||||
}
|
||||
} else {
|
||||
match try_cast_mut(a) {
|
||||
Ok(b) => b,
|
||||
Err(e) => something_went_wrong("cast_mut", e),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Cast `&T` into `&U`.
|
||||
///
|
||||
/// ## Panics
|
||||
///
|
||||
/// This is [`try_cast_ref`] but will panic on error.
|
||||
#[inline]
|
||||
pub fn cast_ref<A: Pod, B: Pod>(a: &A) -> &B {
|
||||
if size_of::<A>() == size_of::<B>() && align_of::<A>() >= align_of::<B>() {
|
||||
// Plz mr compiler, just notice that we can't ever hit Err in this case.
|
||||
match try_cast_ref(a) {
|
||||
Ok(b) => b,
|
||||
Err(_) => unreachable!(),
|
||||
}
|
||||
} else {
|
||||
match try_cast_ref(a) {
|
||||
Ok(b) => b,
|
||||
Err(e) => something_went_wrong("cast_ref", e),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Cast `&[T]` into `&[U]`.
|
||||
///
|
||||
/// ## Panics
|
||||
///
|
||||
/// This is [`try_cast_slice`] but will panic on error.
|
||||
#[inline]
|
||||
pub fn cast_slice<A: Pod, B: Pod>(a: &[A]) -> &[B] {
|
||||
match try_cast_slice(a) {
|
||||
Ok(b) => b,
|
||||
Err(e) => something_went_wrong("cast_slice", e),
|
||||
}
|
||||
}
|
||||
|
||||
/// Cast `&mut [T]` into `&mut [U]`.
|
||||
///
|
||||
/// ## Panics
|
||||
///
|
||||
/// This is [`try_cast_slice_mut`] but will panic on error.
|
||||
#[inline]
|
||||
pub fn cast_slice_mut<A: Pod, B: Pod>(a: &mut [A]) -> &mut [B] {
|
||||
match try_cast_slice_mut(a) {
|
||||
Ok(b) => b,
|
||||
Err(e) => something_went_wrong("cast_slice_mut", e),
|
||||
}
|
||||
}
|
||||
|
||||
/// As `align_to`, but safe because of the [`Pod`] bound.
|
||||
#[inline]
|
||||
pub fn pod_align_to<T: Pod, U: Pod>(vals: &[T]) -> (&[T], &[U], &[T]) {
|
||||
unsafe { vals.align_to::<U>() }
|
||||
}
|
||||
|
||||
/// As `align_to_mut`, but safe because of the [`Pod`] bound.
|
||||
#[inline]
|
||||
pub fn pod_align_to_mut<T: Pod, U: Pod>(
|
||||
vals: &mut [T],
|
||||
) -> (&mut [T], &mut [U], &mut [T]) {
|
||||
unsafe { vals.align_to_mut::<U>() }
|
||||
}
|
||||
|
||||
/// Try to cast `T` into `U`.
|
||||
///
|
||||
/// ## Failure
|
||||
///
|
||||
/// * If the types don't have the same size this fails.
|
||||
#[inline]
|
||||
pub fn try_cast<A: Pod, B: Pod>(a: A) -> Result<B, PodCastError> {
|
||||
if size_of::<A>() == size_of::<B>() {
|
||||
let mut b = B::zeroed();
|
||||
// Note(Lokathor): We copy in terms of `u8` because that allows us to bypass
|
||||
// any potential alignment difficulties.
|
||||
let ap = &a as *const A as *const u8;
|
||||
let bp = &mut b as *mut B as *mut u8;
|
||||
unsafe { ap.copy_to_nonoverlapping(bp, size_of::<A>()) };
|
||||
Ok(b)
|
||||
} else {
|
||||
Err(PodCastError::SizeMismatch)
|
||||
}
|
||||
}
|
||||
|
||||
/// Try to convert a `&T` into `&U`.
|
||||
///
|
||||
/// ## Failure
|
||||
///
|
||||
/// * If the reference isn't aligned in the new type
|
||||
/// * If the source type and target type aren't the same size.
|
||||
#[inline]
|
||||
pub fn try_cast_ref<A: Pod, B: Pod>(a: &A) -> Result<&B, PodCastError> {
|
||||
// Note(Lokathor): everything with `align_of` and `size_of` will optimize away
|
||||
// after monomorphization.
|
||||
if align_of::<B>() > align_of::<A>()
|
||||
&& (a as *const A as usize) % align_of::<B>() != 0
|
||||
{
|
||||
Err(PodCastError::TargetAlignmentGreaterAndInputNotAligned)
|
||||
} else if size_of::<B>() == size_of::<A>() {
|
||||
Ok(unsafe { &*(a as *const A as *const B) })
|
||||
} else {
|
||||
Err(PodCastError::SizeMismatch)
|
||||
}
|
||||
}
|
||||
|
||||
/// Try to convert a `&mut T` into `&mut U`.
|
||||
///
|
||||
/// As [`try_cast_ref`], but `mut`.
|
||||
#[inline]
|
||||
pub fn try_cast_mut<A: Pod, B: Pod>(a: &mut A) -> Result<&mut B, PodCastError> {
|
||||
// Note(Lokathor): everything with `align_of` and `size_of` will optimize away
|
||||
// after monomorphization.
|
||||
if align_of::<B>() > align_of::<A>()
|
||||
&& (a as *mut A as usize) % align_of::<B>() != 0
|
||||
{
|
||||
Err(PodCastError::TargetAlignmentGreaterAndInputNotAligned)
|
||||
} else if size_of::<B>() == size_of::<A>() {
|
||||
Ok(unsafe { &mut *(a as *mut A as *mut B) })
|
||||
} else {
|
||||
Err(PodCastError::SizeMismatch)
|
||||
}
|
||||
}
|
||||
|
||||
/// Try to convert `&[T]` into `&[U]` (possibly with a change in length).
|
||||
///
|
||||
/// * `input.as_ptr() as usize == output.as_ptr() as usize`
|
||||
/// * `input.len() * size_of::<A>() == output.len() * size_of::<B>()`
|
||||
///
|
||||
/// ## Failure
|
||||
///
|
||||
/// * If the target type has a greater alignment requirement and the input slice
|
||||
/// isn't aligned.
|
||||
/// * If the target element type is a different size from the current element
|
||||
/// type, and the output slice wouldn't be a whole number of elements when
|
||||
/// accounting for the size change (eg: 3 `u16` values is 1.5 `u32` values, so
|
||||
/// that's a failure).
|
||||
/// * Similarly, you can't convert between a
|
||||
/// [ZST](https://doc.rust-lang.org/nomicon/exotic-sizes.html#zero-sized-types-zsts)
|
||||
/// and a non-ZST.
|
||||
#[inline]
|
||||
pub fn try_cast_slice<A: Pod, B: Pod>(a: &[A]) -> Result<&[B], PodCastError> {
|
||||
// Note(Lokathor): everything with `align_of` and `size_of` will optimize away
|
||||
// after monomorphization.
|
||||
if align_of::<B>() > align_of::<A>()
|
||||
&& (a.as_ptr() as usize) % align_of::<B>() != 0
|
||||
{
|
||||
Err(PodCastError::TargetAlignmentGreaterAndInputNotAligned)
|
||||
} else if size_of::<B>() == size_of::<A>() {
|
||||
Ok(unsafe { core::slice::from_raw_parts(a.as_ptr() as *const B, a.len()) })
|
||||
} else if size_of::<A>() == 0 || size_of::<B>() == 0 {
|
||||
Err(PodCastError::SizeMismatch)
|
||||
} else if core::mem::size_of_val(a) % size_of::<B>() == 0 {
|
||||
let new_len = core::mem::size_of_val(a) / size_of::<B>();
|
||||
Ok(unsafe { core::slice::from_raw_parts(a.as_ptr() as *const B, new_len) })
|
||||
} else {
|
||||
Err(PodCastError::OutputSliceWouldHaveSlop)
|
||||
}
|
||||
}
|
||||
|
||||
/// Try to convert `&mut [T]` into `&mut [U]` (possibly with a change in length).
|
||||
///
|
||||
/// As [`try_cast_slice`], but `&mut`.
|
||||
#[inline]
|
||||
pub fn try_cast_slice_mut<A: Pod, B: Pod>(
|
||||
a: &mut [A],
|
||||
) -> Result<&mut [B], PodCastError> {
|
||||
// Note(Lokathor): everything with `align_of` and `size_of` will optimize away
|
||||
// after monomorphization.
|
||||
if align_of::<B>() > align_of::<A>()
|
||||
&& (a.as_mut_ptr() as usize) % align_of::<B>() != 0
|
||||
{
|
||||
Err(PodCastError::TargetAlignmentGreaterAndInputNotAligned)
|
||||
} else if size_of::<B>() == size_of::<A>() {
|
||||
Ok(unsafe {
|
||||
core::slice::from_raw_parts_mut(a.as_mut_ptr() as *mut B, a.len())
|
||||
})
|
||||
} else if size_of::<A>() == 0 || size_of::<B>() == 0 {
|
||||
Err(PodCastError::SizeMismatch)
|
||||
} else if core::mem::size_of_val(a) % size_of::<B>() == 0 {
|
||||
let new_len = core::mem::size_of_val(a) / size_of::<B>();
|
||||
Ok(unsafe {
|
||||
core::slice::from_raw_parts_mut(a.as_mut_ptr() as *mut B, new_len)
|
||||
})
|
||||
} else {
|
||||
Err(PodCastError::OutputSliceWouldHaveSlop)
|
||||
}
|
||||
}
|
|
@ -0,0 +1,103 @@
|
|||
#![forbid(unsafe_code)]
|
||||
|
||||
/// Find the offset in bytes of the given `$field` of `$Type`, using `$instance`
|
||||
/// as an already-initialized value to work with.
|
||||
///
|
||||
/// This is similar to the macro from `memoffset`, however it's fully well
|
||||
/// defined even in current versions of Rust (and uses no unsafe code).
|
||||
///
|
||||
/// It does by using the `$instance` argument to have an already-initialized
|
||||
/// instance of `$Type` rather than trying to find a way access the fields of an
|
||||
/// uninitialized one without hitting soundness problems. The value passed to
|
||||
/// the macro is referenced but not moved.
|
||||
///
|
||||
/// This means the API is more limited, but it's also sound even in rather
|
||||
/// extreme cases, like some of the examples.
|
||||
///
|
||||
/// ## Caveats
|
||||
///
|
||||
/// 1. The offset is in bytes, and so you will likely have to cast your base
|
||||
/// pointers to `*const u8`/`*mut u8` before getting field addresses.
|
||||
///
|
||||
/// 2. The offset values of repr(Rust) types are not stable, and may change
|
||||
/// wildly between releases of the compiler. Use repr(C) if you can.
|
||||
///
|
||||
/// 3. The value of the `$instance` parameter has no bearing on the output of
|
||||
/// this macro. It is just used to avoid soundness problems. The only
|
||||
/// requirement is that it be initialized. In particular, the value returned
|
||||
/// is not a field pointer, or anything like that.
|
||||
///
|
||||
/// ## Examples
|
||||
///
|
||||
/// ### Use with zeroable types
|
||||
/// A common requirement in GPU apis is to specify the layout of vertices. These
|
||||
/// will generally be [`Zeroable`] (if not [`Pod`]), and are a good fit for
|
||||
/// `offset_of!`.
|
||||
/// ```
|
||||
/// # use bytemuck::{Zeroable, offset_of};
|
||||
/// #[repr(C)]
|
||||
/// struct Vertex {
|
||||
/// pos: [f32; 2],
|
||||
/// uv: [u16; 2],
|
||||
/// color: [u8; 4],
|
||||
/// }
|
||||
/// unsafe impl Zeroable for Vertex {}
|
||||
///
|
||||
/// let pos = offset_of!(Zeroable::zeroed(), Vertex, pos);
|
||||
/// let uv = offset_of!(Zeroable::zeroed(), Vertex, uv);
|
||||
/// let color = offset_of!(Zeroable::zeroed(), Vertex, color);
|
||||
///
|
||||
/// assert_eq!(pos, 0);
|
||||
/// assert_eq!(uv, 8);
|
||||
/// assert_eq!(color, 12);
|
||||
/// ```
|
||||
///
|
||||
/// ### Use with other types
|
||||
///
|
||||
/// More esoteric uses are possible too, including with types generally not safe
|
||||
/// to otherwise use with bytemuck. `Strings`, `Vec`s, etc.
|
||||
///
|
||||
/// ```
|
||||
/// #[derive(Default)]
|
||||
/// struct Foo {
|
||||
/// a: u8,
|
||||
/// b: &'static str,
|
||||
/// c: i32,
|
||||
/// }
|
||||
///
|
||||
/// let a_offset = bytemuck::offset_of!(Default::default(), Foo, a);
|
||||
/// let b_offset = bytemuck::offset_of!(Default::default(), Foo, b);
|
||||
/// let c_offset = bytemuck::offset_of!(Default::default(), Foo, c);
|
||||
///
|
||||
/// assert_ne!(a_offset, b_offset);
|
||||
/// assert_ne!(b_offset, c_offset);
|
||||
/// // We can't check against hardcoded values for a repr(Rust) type,
|
||||
/// // but prove to ourself this way.
|
||||
///
|
||||
/// let foo = Foo::default();
|
||||
/// // Note: offsets are in bytes.
|
||||
/// let as_bytes = &foo as *const _ as *const u8;
|
||||
///
|
||||
/// // we're using wrapping_offset here becasue it's not worth
|
||||
/// // the unsafe block, but it would be valid to use `add` instead,
|
||||
/// // as it cannot overflow.
|
||||
/// assert_eq!(&foo.a as *const _ as usize, as_bytes.wrapping_add(a_offset) as usize);
|
||||
/// assert_eq!(&foo.b as *const _ as usize, as_bytes.wrapping_add(b_offset) as usize);
|
||||
/// assert_eq!(&foo.c as *const _ as usize, as_bytes.wrapping_add(c_offset) as usize);
|
||||
/// ```
|
||||
#[macro_export]
|
||||
macro_rules! offset_of {
|
||||
($instance:expr, $Type:path, $field:tt) => {{
|
||||
// This helps us guard against field access going through a Deref impl.
|
||||
#[allow(clippy::unneeded_field_pattern)]
|
||||
let $Type { $field: _, .. };
|
||||
let reference: &$Type = &$instance;
|
||||
let address = reference as *const _ as usize;
|
||||
let field_pointer = &reference.$field as *const _ as usize;
|
||||
// These asserts/unwraps are compiled away at release, and defend against
|
||||
// the case where somehow a deref impl is still invoked.
|
||||
let result = field_pointer.checked_sub(address).unwrap();
|
||||
assert!(result <= $crate::__core::mem::size_of::<$Type>());
|
||||
result
|
||||
}};
|
||||
}
|
|
@ -0,0 +1,99 @@
|
|||
use super::*;
|
||||
|
||||
/// Marker trait for "plain old data".
|
||||
///
|
||||
/// The point of this trait is that once something is marked "plain old data"
|
||||
/// you can really go to town with the bit fiddling and bit casting. Therefore,
|
||||
/// it's a relatively strong claim to make about a type. Do not add this to your
|
||||
/// type casually.
|
||||
///
|
||||
/// **Reminder:** The results of casting around bytes between data types are
|
||||
/// _endian dependant_. Little-endian machines are the most common, but
|
||||
/// big-endian machines do exist (and big-endian is also used for "network
|
||||
/// order" bytes).
|
||||
///
|
||||
/// ## Safety
|
||||
///
|
||||
/// * The type must be inhabited (eg: no
|
||||
/// [Infallible](core::convert::Infallible)).
|
||||
/// * The type must allow any bit pattern (eg: no `bool` or `char`, which have
|
||||
/// illegal bit patterns).
|
||||
/// * The type must not contain any padding bytes, either in the middle or on
|
||||
/// the end (eg: no `#[repr(C)] struct Foo(u8, u16)`, which has padding in the
|
||||
/// middle, and also no `#[repr(C)] struct Foo(u16, u8)`, which has padding on
|
||||
/// the end).
|
||||
/// * The type needs to have all fields also be `Pod`.
|
||||
/// * The type needs to be `repr(C)` or `repr(transparent)`. In the case of
|
||||
/// `repr(C)`, the `packed` and `align` repr modifiers can be used as long as
|
||||
/// all other rules end up being followed.
|
||||
pub unsafe trait Pod: Zeroable + Copy + 'static {}
|
||||
|
||||
unsafe impl Pod for () {}
|
||||
unsafe impl Pod for u8 {}
|
||||
unsafe impl Pod for i8 {}
|
||||
unsafe impl Pod for u16 {}
|
||||
unsafe impl Pod for i16 {}
|
||||
unsafe impl Pod for u32 {}
|
||||
unsafe impl Pod for i32 {}
|
||||
unsafe impl Pod for u64 {}
|
||||
unsafe impl Pod for i64 {}
|
||||
unsafe impl Pod for usize {}
|
||||
unsafe impl Pod for isize {}
|
||||
unsafe impl Pod for u128 {}
|
||||
unsafe impl Pod for i128 {}
|
||||
unsafe impl Pod for f32 {}
|
||||
unsafe impl Pod for f64 {}
|
||||
unsafe impl<T: Pod> Pod for Wrapping<T> {}
|
||||
|
||||
unsafe impl Pod for Option<NonZeroI8> {}
|
||||
unsafe impl Pod for Option<NonZeroI16> {}
|
||||
unsafe impl Pod for Option<NonZeroI32> {}
|
||||
unsafe impl Pod for Option<NonZeroI64> {}
|
||||
unsafe impl Pod for Option<NonZeroI128> {}
|
||||
unsafe impl Pod for Option<NonZeroIsize> {}
|
||||
unsafe impl Pod for Option<NonZeroU8> {}
|
||||
unsafe impl Pod for Option<NonZeroU16> {}
|
||||
unsafe impl Pod for Option<NonZeroU32> {}
|
||||
unsafe impl Pod for Option<NonZeroU64> {}
|
||||
unsafe impl Pod for Option<NonZeroU128> {}
|
||||
unsafe impl Pod for Option<NonZeroUsize> {}
|
||||
|
||||
unsafe impl<T: 'static> Pod for *mut T {}
|
||||
unsafe impl<T: 'static> Pod for *const T {}
|
||||
unsafe impl<T: 'static> Pod for Option<NonNull<T>> {}
|
||||
unsafe impl<T: Pod> Pod for PhantomData<T> {}
|
||||
unsafe impl<T: Pod> Pod for ManuallyDrop<T> {}
|
||||
|
||||
// Note(Lokathor): MaybeUninit can NEVER be Pod.
|
||||
|
||||
impl_unsafe_marker_for_array!(
|
||||
Pod, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
|
||||
20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 48, 64, 96, 128, 256,
|
||||
512, 1024, 2048, 4096
|
||||
);
|
||||
|
||||
#[cfg(target_arch = "x86")]
|
||||
unsafe impl Pod for x86::__m128i {}
|
||||
#[cfg(target_arch = "x86")]
|
||||
unsafe impl Pod for x86::__m128 {}
|
||||
#[cfg(target_arch = "x86")]
|
||||
unsafe impl Pod for x86::__m128d {}
|
||||
#[cfg(target_arch = "x86")]
|
||||
unsafe impl Pod for x86::__m256i {}
|
||||
#[cfg(target_arch = "x86")]
|
||||
unsafe impl Pod for x86::__m256 {}
|
||||
#[cfg(target_arch = "x86")]
|
||||
unsafe impl Pod for x86::__m256d {}
|
||||
|
||||
#[cfg(target_arch = "x86_64")]
|
||||
unsafe impl Pod for x86_64::__m128i {}
|
||||
#[cfg(target_arch = "x86_64")]
|
||||
unsafe impl Pod for x86_64::__m128 {}
|
||||
#[cfg(target_arch = "x86_64")]
|
||||
unsafe impl Pod for x86_64::__m128d {}
|
||||
#[cfg(target_arch = "x86_64")]
|
||||
unsafe impl Pod for x86_64::__m256i {}
|
||||
#[cfg(target_arch = "x86_64")]
|
||||
unsafe impl Pod for x86_64::__m256 {}
|
||||
#[cfg(target_arch = "x86_64")]
|
||||
unsafe impl Pod for x86_64::__m256d {}
|
|
@ -0,0 +1,133 @@
|
|||
use super::*;
|
||||
|
||||
/// A trait which indicates that a type is a `repr(transparent)` wrapper around
|
||||
/// the `Wrapped` value.
|
||||
///
|
||||
/// This allows safely creating references to `T` from those to the `Wrapped`
|
||||
/// type, using the `wrap_ref` and `wrap_mut` functions.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// The safety contract of `TransparentWrapper` is relatively simple:
|
||||
///
|
||||
/// For a given `Wrapper` which implements `TransparentWrapper<Wrapped>`:
|
||||
///
|
||||
/// 1. Wrapper must be a `#[repr(transparent)]` wrapper around `Wrapped`. This
|
||||
/// either means that it must be a `#[repr(transparent)]` struct which
|
||||
/// contains a either a field of type `Wrapped` (or a field of some other
|
||||
/// transparent wrapper for `Wrapped`) as the only non-ZST field.
|
||||
///
|
||||
/// 2. Any fields *other* than the `Wrapped` field must be trivially
|
||||
/// constructable ZSTs, for example `PhantomData`, `PhantomPinned`, etc.
|
||||
///
|
||||
/// 3. The `Wrapper` may not impose additional alignment requirements over
|
||||
/// `Wrapped`.
|
||||
/// - Note: this is currently guaranteed by repr(transparent), but there
|
||||
/// have been discussions of lifting it, so it's stated here explictly.
|
||||
///
|
||||
/// 4. The `wrap_ref` and `wrap_mut` functions on `TransparentWrapper` may not
|
||||
/// be overridden.
|
||||
///
|
||||
/// ## Caveats
|
||||
///
|
||||
/// If the wrapper imposes additional constraints upon the wrapped type which
|
||||
/// are required for safety, it's responsible for ensuring those still hold --
|
||||
/// this generally requires preventing access to instances of the wrapped type,
|
||||
/// as implementing `TransparentWrapper<U> for T` means anybody can call
|
||||
/// `T::cast_ref(any_instance_of_u)`.
|
||||
///
|
||||
/// For example, it would be invalid to implement TransparentWrapper for `str`
|
||||
/// to implement `TransparentWrapper` around `[u8]` because of this.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ## Basic
|
||||
///
|
||||
/// ```
|
||||
/// use bytemuck::TransparentWrapper;
|
||||
/// # #[derive(Default)]
|
||||
/// # struct SomeStruct(u32);
|
||||
///
|
||||
/// #[repr(transparent)]
|
||||
/// struct MyWrapper(SomeStruct);
|
||||
///
|
||||
/// unsafe impl TransparentWrapper<SomeStruct> for MyWrapper {}
|
||||
///
|
||||
/// // interpret a reference to &SomeStruct as a &MyWrapper
|
||||
/// let thing = SomeStruct::default();
|
||||
/// let wrapped_ref: &MyWrapper = MyWrapper::wrap_ref(&thing);
|
||||
///
|
||||
/// // Works with &mut too.
|
||||
/// let mut mut_thing = SomeStruct::default();
|
||||
/// let wrapped_mut: &mut MyWrapper = MyWrapper::wrap_mut(&mut mut_thing);
|
||||
///
|
||||
/// # let _ = (wrapped_ref, wrapped_mut); // silence warnings
|
||||
/// ```
|
||||
///
|
||||
/// ## Use with dynamically sized types
|
||||
///
|
||||
/// ```
|
||||
/// use bytemuck::TransparentWrapper;
|
||||
///
|
||||
/// #[repr(transparent)]
|
||||
/// struct Slice<T>([T]);
|
||||
///
|
||||
/// unsafe impl<T> TransparentWrapper<[T]> for Slice<T> {}
|
||||
///
|
||||
/// let s = Slice::wrap_ref(&[1u32, 2, 3]);
|
||||
/// assert_eq!(&s.0, &[1, 2, 3]);
|
||||
///
|
||||
/// let mut buf = [1, 2, 3u8];
|
||||
/// let sm = Slice::wrap_mut(&mut buf);
|
||||
/// ```
|
||||
pub unsafe trait TransparentWrapper<Wrapped: ?Sized> {
|
||||
/// Convert a reference to a wrapped type into a reference to the wrapper.
|
||||
///
|
||||
/// This is a trait method so that you can write `MyType::wrap_ref(...)` in
|
||||
/// your code. It is part of the safety contract for this trait that if you
|
||||
/// implement `TransparentWrapper<_>` for your type you **must not** override
|
||||
/// this method.
|
||||
#[inline]
|
||||
fn wrap_ref(s: &Wrapped) -> &Self {
|
||||
unsafe {
|
||||
assert!(size_of::<*const Wrapped>() == size_of::<*const Self>());
|
||||
// Using a pointer cast doesn't work here because rustc can't tell that the
|
||||
// vtables match (if we lifted the ?Sized restriction, this would go away),
|
||||
// and transmute doesn't work for the usual reasons it doesn't work inside
|
||||
// generic functions.
|
||||
//
|
||||
// SAFETY: The unsafe contract requires that these have identical
|
||||
// representations. Using this transmute_copy instead of transmute here is
|
||||
// annoying, but is required as `Self` and `Wrapped` have unspecified
|
||||
// sizes still.
|
||||
let wrapped_ptr = s as *const Wrapped;
|
||||
let wrapper_ptr: *const Self = transmute_copy(&wrapped_ptr);
|
||||
&*wrapper_ptr
|
||||
}
|
||||
}
|
||||
|
||||
/// Convert a mut reference to a wrapped type into a mut reference to the
|
||||
/// wrapper.
|
||||
///
|
||||
/// This is a trait method so that you can write `MyType::wrap_mut(...)` in
|
||||
/// your code. It is part of the safety contract for this trait that if you implement
|
||||
/// `TransparentWrapper<_>` for your type you **must not** override this method.
|
||||
#[inline]
|
||||
fn wrap_mut(s: &mut Wrapped) -> &mut Self {
|
||||
unsafe {
|
||||
assert!(size_of::<*mut Wrapped>() == size_of::<*mut Self>());
|
||||
// Using a pointer cast doesn't work here because rustc can't tell that the
|
||||
// vtables match (if we lifted the ?Sized restriction, this would go away),
|
||||
// and transmute doesn't work for the usual reasons it doesn't work inside
|
||||
// generic functions.
|
||||
//
|
||||
// SAFETY: The unsafe contract requires that these have identical
|
||||
// representations. Using this transmute_copy instead of transmute here is
|
||||
// annoying, but is required as `Self` and `Wrapped` have unspecified
|
||||
// sizes still.
|
||||
let wrapped_ptr = s as *mut Wrapped;
|
||||
let wrapper_ptr: *mut Self = transmute_copy(&wrapped_ptr);
|
||||
&mut *wrapper_ptr
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,142 @@
|
|||
use super::*;
|
||||
|
||||
/// Trait for types that can be safely created with
|
||||
/// [`zeroed`](core::mem::zeroed).
|
||||
///
|
||||
/// An all-zeroes value may or may not be the same value as the
|
||||
/// [Default](core::default::Default) value of the type.
|
||||
///
|
||||
/// ## Safety
|
||||
///
|
||||
/// * Your type must be inhabited (eg: no
|
||||
/// [Infallible](core::convert::Infallible)).
|
||||
/// * Your type must be allowed to be an "all zeroes" bit pattern (eg: no
|
||||
/// [`NonNull<T>`](core::ptr::NonNull)).
|
||||
pub unsafe trait Zeroable: Sized {
|
||||
/// Calls [`zeroed`](core::mem::zeroed).
|
||||
///
|
||||
/// This is a trait method so that you can write `MyType::zeroed()` in your
|
||||
/// code. It is a contract of this trait that if you implement it on your type
|
||||
/// you **must not** override this method.
|
||||
#[inline]
|
||||
fn zeroed() -> Self {
|
||||
unsafe { core::mem::zeroed() }
|
||||
}
|
||||
}
|
||||
unsafe impl Zeroable for () {}
|
||||
unsafe impl Zeroable for bool {}
|
||||
unsafe impl Zeroable for char {}
|
||||
unsafe impl Zeroable for u8 {}
|
||||
unsafe impl Zeroable for i8 {}
|
||||
unsafe impl Zeroable for u16 {}
|
||||
unsafe impl Zeroable for i16 {}
|
||||
unsafe impl Zeroable for u32 {}
|
||||
unsafe impl Zeroable for i32 {}
|
||||
unsafe impl Zeroable for u64 {}
|
||||
unsafe impl Zeroable for i64 {}
|
||||
unsafe impl Zeroable for usize {}
|
||||
unsafe impl Zeroable for isize {}
|
||||
unsafe impl Zeroable for u128 {}
|
||||
unsafe impl Zeroable for i128 {}
|
||||
unsafe impl Zeroable for f32 {}
|
||||
unsafe impl Zeroable for f64 {}
|
||||
unsafe impl<T: Zeroable> Zeroable for Wrapping<T> {}
|
||||
|
||||
unsafe impl Zeroable for Option<NonZeroI8> {}
|
||||
unsafe impl Zeroable for Option<NonZeroI16> {}
|
||||
unsafe impl Zeroable for Option<NonZeroI32> {}
|
||||
unsafe impl Zeroable for Option<NonZeroI64> {}
|
||||
unsafe impl Zeroable for Option<NonZeroI128> {}
|
||||
unsafe impl Zeroable for Option<NonZeroIsize> {}
|
||||
unsafe impl Zeroable for Option<NonZeroU8> {}
|
||||
unsafe impl Zeroable for Option<NonZeroU16> {}
|
||||
unsafe impl Zeroable for Option<NonZeroU32> {}
|
||||
unsafe impl Zeroable for Option<NonZeroU64> {}
|
||||
unsafe impl Zeroable for Option<NonZeroU128> {}
|
||||
unsafe impl Zeroable for Option<NonZeroUsize> {}
|
||||
|
||||
unsafe impl<T> Zeroable for *mut T {}
|
||||
unsafe impl<T> Zeroable for *const T {}
|
||||
unsafe impl<T> Zeroable for Option<NonNull<T>> {}
|
||||
unsafe impl<T: Zeroable> Zeroable for PhantomData<T> {}
|
||||
unsafe impl<T: Zeroable> Zeroable for ManuallyDrop<T> {}
|
||||
|
||||
// 2.0: add MaybeUninit
|
||||
//unsafe impl<T> Zeroable for MaybeUninit<T> {}
|
||||
|
||||
unsafe impl<A: Zeroable> Zeroable for (A,) {}
|
||||
unsafe impl<A: Zeroable, B: Zeroable> Zeroable for (A, B) {}
|
||||
unsafe impl<A: Zeroable, B: Zeroable, C: Zeroable> Zeroable for (A, B, C) {}
|
||||
unsafe impl<A: Zeroable, B: Zeroable, C: Zeroable, D: Zeroable> Zeroable
|
||||
for (A, B, C, D)
|
||||
{
|
||||
}
|
||||
unsafe impl<A: Zeroable, B: Zeroable, C: Zeroable, D: Zeroable, E: Zeroable>
|
||||
Zeroable for (A, B, C, D, E)
|
||||
{
|
||||
}
|
||||
unsafe impl<
|
||||
A: Zeroable,
|
||||
B: Zeroable,
|
||||
C: Zeroable,
|
||||
D: Zeroable,
|
||||
E: Zeroable,
|
||||
F: Zeroable,
|
||||
> Zeroable for (A, B, C, D, E, F)
|
||||
{
|
||||
}
|
||||
unsafe impl<
|
||||
A: Zeroable,
|
||||
B: Zeroable,
|
||||
C: Zeroable,
|
||||
D: Zeroable,
|
||||
E: Zeroable,
|
||||
F: Zeroable,
|
||||
G: Zeroable,
|
||||
> Zeroable for (A, B, C, D, E, F, G)
|
||||
{
|
||||
}
|
||||
unsafe impl<
|
||||
A: Zeroable,
|
||||
B: Zeroable,
|
||||
C: Zeroable,
|
||||
D: Zeroable,
|
||||
E: Zeroable,
|
||||
F: Zeroable,
|
||||
G: Zeroable,
|
||||
H: Zeroable,
|
||||
> Zeroable for (A, B, C, D, E, F, G, H)
|
||||
{
|
||||
}
|
||||
|
||||
impl_unsafe_marker_for_array!(
|
||||
Zeroable, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
|
||||
19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 48, 64, 96, 128, 256,
|
||||
512, 1024, 2048, 4096
|
||||
);
|
||||
|
||||
#[cfg(target_arch = "x86")]
|
||||
unsafe impl Zeroable for x86::__m128i {}
|
||||
#[cfg(target_arch = "x86")]
|
||||
unsafe impl Zeroable for x86::__m128 {}
|
||||
#[cfg(target_arch = "x86")]
|
||||
unsafe impl Zeroable for x86::__m128d {}
|
||||
#[cfg(target_arch = "x86")]
|
||||
unsafe impl Zeroable for x86::__m256i {}
|
||||
#[cfg(target_arch = "x86")]
|
||||
unsafe impl Zeroable for x86::__m256 {}
|
||||
#[cfg(target_arch = "x86")]
|
||||
unsafe impl Zeroable for x86::__m256d {}
|
||||
|
||||
#[cfg(target_arch = "x86_64")]
|
||||
unsafe impl Zeroable for x86_64::__m128i {}
|
||||
#[cfg(target_arch = "x86_64")]
|
||||
unsafe impl Zeroable for x86_64::__m128 {}
|
||||
#[cfg(target_arch = "x86_64")]
|
||||
unsafe impl Zeroable for x86_64::__m128d {}
|
||||
#[cfg(target_arch = "x86_64")]
|
||||
unsafe impl Zeroable for x86_64::__m256i {}
|
||||
#[cfg(target_arch = "x86_64")]
|
||||
unsafe impl Zeroable for x86_64::__m256 {}
|
||||
#[cfg(target_arch = "x86_64")]
|
||||
unsafe impl Zeroable for x86_64::__m256d {}
|
|
@ -0,0 +1,90 @@
|
|||
use core::mem::size_of;
|
||||
|
||||
use bytemuck::*;
|
||||
|
||||
#[test]
|
||||
fn test_try_cast_slice() {
|
||||
// some align4 data
|
||||
let u32_slice: &[u32] = &[4, 5, 6];
|
||||
// the same data as align1
|
||||
let the_bytes: &[u8] = try_cast_slice(u32_slice).unwrap();
|
||||
|
||||
assert_eq!(
|
||||
u32_slice.as_ptr() as *const u32 as usize,
|
||||
the_bytes.as_ptr() as *const u8 as usize
|
||||
);
|
||||
assert_eq!(
|
||||
u32_slice.len() * size_of::<u32>(),
|
||||
the_bytes.len() * size_of::<u8>()
|
||||
);
|
||||
|
||||
// by taking one byte off the front, we're definitely mis-aligned for u32.
|
||||
let mis_aligned_bytes = &the_bytes[1..];
|
||||
assert_eq!(
|
||||
try_cast_slice::<u8, u32>(mis_aligned_bytes),
|
||||
Err(PodCastError::TargetAlignmentGreaterAndInputNotAligned)
|
||||
);
|
||||
|
||||
// by taking one byte off the end, we're aligned but would have slop bytes for u32
|
||||
let the_bytes_len_minus1 = the_bytes.len() - 1;
|
||||
let slop_bytes = &the_bytes[..the_bytes_len_minus1];
|
||||
assert_eq!(
|
||||
try_cast_slice::<u8, u32>(slop_bytes),
|
||||
Err(PodCastError::OutputSliceWouldHaveSlop)
|
||||
);
|
||||
|
||||
// if we don't mess with it we can up-alignment cast
|
||||
try_cast_slice::<u8, u32>(the_bytes).unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_try_cast_slice_mut() {
|
||||
// some align4 data
|
||||
let u32_slice: &mut [u32] = &mut [4, 5, 6];
|
||||
let u32_len = u32_slice.len();
|
||||
let u32_ptr = u32_slice.as_ptr();
|
||||
|
||||
// the same data as align1
|
||||
let the_bytes: &mut [u8] = try_cast_slice_mut(u32_slice).unwrap();
|
||||
let the_bytes_len = the_bytes.len();
|
||||
let the_bytes_ptr = the_bytes.as_ptr();
|
||||
|
||||
assert_eq!(
|
||||
u32_ptr as *const u32 as usize,
|
||||
the_bytes_ptr as *const u8 as usize
|
||||
);
|
||||
assert_eq!(u32_len * size_of::<u32>(), the_bytes_len * size_of::<u8>());
|
||||
|
||||
// by taking one byte off the front, we're definitely mis-aligned for u32.
|
||||
let mis_aligned_bytes = &mut the_bytes[1..];
|
||||
assert_eq!(
|
||||
try_cast_slice_mut::<u8, u32>(mis_aligned_bytes),
|
||||
Err(PodCastError::TargetAlignmentGreaterAndInputNotAligned)
|
||||
);
|
||||
|
||||
// by taking one byte off the end, we're aligned but would have slop bytes for u32
|
||||
let the_bytes_len_minus1 = the_bytes.len() - 1;
|
||||
let slop_bytes = &mut the_bytes[..the_bytes_len_minus1];
|
||||
assert_eq!(
|
||||
try_cast_slice_mut::<u8, u32>(slop_bytes),
|
||||
Err(PodCastError::OutputSliceWouldHaveSlop)
|
||||
);
|
||||
|
||||
// if we don't mess with it we can up-alignment cast
|
||||
try_cast_slice_mut::<u8, u32>(the_bytes).unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_types() {
|
||||
let _: i32 = cast(1.0_f32);
|
||||
let _: &mut i32 = cast_mut(&mut 1.0_f32);
|
||||
let _: &i32 = cast_ref(&1.0_f32);
|
||||
let _: &[i32] = cast_slice(&[1.0_f32]);
|
||||
let _: &mut [i32] = cast_slice_mut(&mut [1.0_f32]);
|
||||
//
|
||||
let _: Result<i32, PodCastError> = try_cast(1.0_f32);
|
||||
let _: Result<&mut i32, PodCastError> = try_cast_mut(&mut 1.0_f32);
|
||||
let _: Result<&i32, PodCastError> = try_cast_ref(&1.0_f32);
|
||||
let _: Result<&[i32], PodCastError> = try_cast_slice(&[1.0_f32]);
|
||||
let _: Result<&mut [i32], PodCastError> = try_cast_slice_mut(&mut [1.0_f32]);
|
||||
}
|
|
@ -0,0 +1,121 @@
|
|||
//! Cargo miri doesn't run doctests yet, so we duplicate these here. It's
|
||||
//! probably not that important to sweat keeping these perfectly up to date, but
|
||||
//! we should try to catch the cases where the primary tests are doctests.
|
||||
use bytemuck::*;
|
||||
|
||||
// Miri doesn't run on doctests, so... copypaste to the rescue.
|
||||
#[test]
|
||||
fn test_transparent_slice() {
|
||||
#[repr(transparent)]
|
||||
struct Slice<T>([T]);
|
||||
|
||||
unsafe impl<T> TransparentWrapper<[T]> for Slice<T> {}
|
||||
|
||||
let s = Slice::wrap_ref(&[1u32, 2, 3]);
|
||||
assert_eq!(&s.0, &[1, 2, 3]);
|
||||
|
||||
let mut buf = [1, 2, 3u8];
|
||||
let _sm = Slice::wrap_mut(&mut buf);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_transparent_basic() {
|
||||
#[derive(Default)]
|
||||
struct SomeStruct(u32);
|
||||
|
||||
#[repr(transparent)]
|
||||
struct MyWrapper(SomeStruct);
|
||||
|
||||
unsafe impl TransparentWrapper<SomeStruct> for MyWrapper {}
|
||||
|
||||
// interpret a reference to &SomeStruct as a &MyWrapper
|
||||
let thing = SomeStruct::default();
|
||||
let wrapped_ref: &MyWrapper = MyWrapper::wrap_ref(&thing);
|
||||
|
||||
// Works with &mut too.
|
||||
let mut mut_thing = SomeStruct::default();
|
||||
let wrapped_mut: &mut MyWrapper = MyWrapper::wrap_mut(&mut mut_thing);
|
||||
let _ = (wrapped_ref, wrapped_mut);
|
||||
}
|
||||
|
||||
// Work around miri not running doctests
|
||||
#[test]
|
||||
fn test_contiguous_doc() {
|
||||
#[repr(u8)]
|
||||
#[derive(Debug, Copy, Clone, PartialEq)]
|
||||
enum Foo {
|
||||
A = 0,
|
||||
B = 1,
|
||||
C = 2,
|
||||
D = 3,
|
||||
E = 4,
|
||||
}
|
||||
unsafe impl Contiguous for Foo {
|
||||
type Int = u8;
|
||||
const MIN_VALUE: u8 = Foo::A as u8;
|
||||
const MAX_VALUE: u8 = Foo::E as u8;
|
||||
}
|
||||
|
||||
assert_eq!(Foo::from_integer(3).unwrap(), Foo::D);
|
||||
assert_eq!(Foo::from_integer(8), None);
|
||||
assert_eq!(Foo::C.into_integer(), 2);
|
||||
assert_eq!(Foo::B.into_integer(), Foo::B as u8);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_offsetof_vertex() {
|
||||
#[repr(C)]
|
||||
struct Vertex {
|
||||
pos: [f32; 2],
|
||||
uv: [u16; 2],
|
||||
color: [u8; 4],
|
||||
}
|
||||
unsafe impl Zeroable for Vertex {}
|
||||
|
||||
let pos = offset_of!(Zeroable::zeroed(), Vertex, pos);
|
||||
let uv = offset_of!(Zeroable::zeroed(), Vertex, uv);
|
||||
let color = offset_of!(Zeroable::zeroed(), Vertex, color);
|
||||
|
||||
assert_eq!(pos, 0);
|
||||
assert_eq!(uv, 8);
|
||||
assert_eq!(color, 12);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_offsetof_nonpod() {
|
||||
#[derive(Default)]
|
||||
struct Foo {
|
||||
a: u8,
|
||||
b: &'static str,
|
||||
c: i32,
|
||||
}
|
||||
|
||||
let a_offset = offset_of!(Default::default(), Foo, a);
|
||||
let b_offset = offset_of!(Default::default(), Foo, b);
|
||||
let c_offset = offset_of!(Default::default(), Foo, c);
|
||||
|
||||
assert_ne!(a_offset, b_offset);
|
||||
assert_ne!(b_offset, c_offset);
|
||||
// We can't check against hardcoded values for a repr(Rust) type,
|
||||
// but prove to ourself this way.
|
||||
|
||||
let foo = Foo::default();
|
||||
// Note: offsets are in bytes.
|
||||
let as_bytes = &foo as *const _ as *const u8;
|
||||
|
||||
// we're using wrapping_offset here becasue it's not worth
|
||||
// the unsafe block, but it would be valid to use `add` instead,
|
||||
// as it cannot overflow.
|
||||
assert_eq!(
|
||||
&foo.a as *const _ as usize,
|
||||
as_bytes.wrapping_add(a_offset) as usize
|
||||
);
|
||||
assert_eq!(
|
||||
&foo.b as *const _ as usize,
|
||||
as_bytes.wrapping_add(b_offset) as usize
|
||||
);
|
||||
assert_eq!(
|
||||
&foo.c as *const _ as usize,
|
||||
as_bytes.wrapping_add(c_offset) as usize
|
||||
);
|
||||
}
|
|
@ -0,0 +1,29 @@
|
|||
//! The integration tests seem to always have `std` linked, so things that would
|
||||
//! depend on that can go here.
|
||||
|
||||
use bytemuck::*;
|
||||
|
||||
#[test]
|
||||
fn test_transparent_vtabled() {
|
||||
use core::fmt::Display;
|
||||
|
||||
#[repr(transparent)]
|
||||
struct DisplayTraitObj(dyn Display);
|
||||
|
||||
unsafe impl TransparentWrapper<dyn Display> for DisplayTraitObj {}
|
||||
|
||||
impl Display for DisplayTraitObj {
|
||||
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
|
||||
self.0.fmt(f)
|
||||
}
|
||||
}
|
||||
|
||||
let v = DisplayTraitObj::wrap_ref(&5i32);
|
||||
let s = format!("{}", v);
|
||||
assert_eq!(s, "5");
|
||||
|
||||
let mut x = 100i32;
|
||||
let v_mut = DisplayTraitObj::wrap_mut(&mut x);
|
||||
let s = format!("{}", v_mut);
|
||||
assert_eq!(s, "100");
|
||||
}
|
|
@ -39,6 +39,8 @@ rust_library(
|
|||
],
|
||||
version = "1.3.4",
|
||||
crate_features = [
|
||||
"default",
|
||||
"std",
|
||||
],
|
||||
)
|
||||
|
||||
|
|
|
@ -0,0 +1 @@
|
|||
{"files":{"Cargo.toml":"688bd8abbac3aa77f7576eb16fb9d893d6ea46ae73815fce3e93289e0388db39","LICENSE":"592dc80f1a865d20d61a2006a2d29ce34a2bc28cd7e868ab300fdeed6da154ca","README.md":"f185944aa94e2457b094d9c3946d743d36e1381c6a8bb9beca07a7e21ce2137f","src/lib.rs":"49b5bde7cf83947e0fbdfcd403e0e8e7f1825b5be50c1d0523a7b798c886c4f7"},"package":"0dbbb57365263e881e805dc77d94697c9118fd94d8da011240555aa7b23445bd"}
|
|
@ -0,0 +1,42 @@
|
|||
"""
|
||||
cargo-raze crate build file.
|
||||
|
||||
DO NOT EDIT! Replaced on runs of cargo-raze
|
||||
"""
|
||||
package(default_visibility = [
|
||||
# Public for visibility by "@raze__crate__version//" targets.
|
||||
#
|
||||
# Prefer access through "//third_party/cargo", which limits external
|
||||
# visibility to explicit Cargo.toml dependencies.
|
||||
"//visibility:public",
|
||||
])
|
||||
|
||||
licenses([
|
||||
"notice", # "MIT"
|
||||
])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_rust//rust:rust.bzl",
|
||||
"rust_library",
|
||||
"rust_binary",
|
||||
"rust_test",
|
||||
)
|
||||
|
||||
|
||||
|
||||
rust_library(
|
||||
name = "color_quant",
|
||||
crate_root = "src/lib.rs",
|
||||
crate_type = "lib",
|
||||
edition = "2015",
|
||||
srcs = glob(["**/*.rs"]),
|
||||
deps = [
|
||||
],
|
||||
rustc_flags = [
|
||||
"--cap-lints=allow",
|
||||
],
|
||||
version = "1.0.1",
|
||||
crate_features = [
|
||||
],
|
||||
)
|
||||
|
|
@ -0,0 +1,20 @@
|
|||
# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
|
||||
#
|
||||
# When uploading crates to the registry Cargo will automatically
|
||||
# "normalize" Cargo.toml files for maximal compatibility
|
||||
# with all versions of Cargo and also rewrite `path` dependencies
|
||||
# to registry (e.g. crates.io) dependencies
|
||||
#
|
||||
# If you believe there's an error in this file please file an
|
||||
# issue against the rust-lang/cargo repository. If you're
|
||||
# editing this file be aware that the upstream Cargo.toml
|
||||
# will likely look very different (and much more reasonable)
|
||||
|
||||
[package]
|
||||
name = "color_quant"
|
||||
version = "1.0.1"
|
||||
authors = ["nwin <nwin@users.noreply.github.com>"]
|
||||
description = "Color quantization library to reduce n colors to 256 colors."
|
||||
readme = "README.md"
|
||||
license = "MIT"
|
||||
repository = "https://github.com/PistonDevelopers/color_quant.git"
|
|
@ -0,0 +1,21 @@
|
|||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2016 PistonDevelopers
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
|
@ -0,0 +1,11 @@
|
|||
# Color quantization library
|
||||
This library provides a color quantizer based on the [NEUQUANT](http://members.ozemail.com.au/~dekker/NEUQUANT.HTML)
|
||||
quantization algorithm by Anthony Dekker.
|
||||
|
||||
### Usage
|
||||
|
||||
let data = vec![0; 40];
|
||||
let nq = color_quant::NeuQuant::new(10, 256, &data);
|
||||
let indixes: Vec<u8> = data.chunks(4).map(|pix| nq.index_of(pix) as u8).collect();
|
||||
let color_map = nq.color_map_rgba();
|
||||
|
|
@ -0,0 +1,440 @@
|
|||
/*
|
||||
NeuQuant Neural-Net Quantization Algorithm by Anthony Dekker, 1994.
|
||||
See "Kohonen neural networks for optimal colour quantization"
|
||||
in "Network: Computation in Neural Systems" Vol. 5 (1994) pp 351-367.
|
||||
for a discussion of the algorithm.
|
||||
See also http://members.ozemail.com.au/~dekker/NEUQUANT.HTML
|
||||
|
||||
Incorporated bugfixes and alpha channel handling from pngnq
|
||||
http://pngnq.sourceforge.net
|
||||
|
||||
Copyright (c) 2014 The Piston Developers
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
|
||||
NeuQuant Neural-Net Quantization Algorithm
|
||||
------------------------------------------
|
||||
|
||||
Copyright (c) 1994 Anthony Dekker
|
||||
|
||||
NEUQUANT Neural-Net quantization algorithm by Anthony Dekker, 1994.
|
||||
See "Kohonen neural networks for optimal colour quantization"
|
||||
in "Network: Computation in Neural Systems" Vol. 5 (1994) pp 351-367.
|
||||
for a discussion of the algorithm.
|
||||
See also http://members.ozemail.com.au/~dekker/NEUQUANT.HTML
|
||||
|
||||
Any party obtaining a copy of these files from the author, directly or
|
||||
indirectly, is granted, free of charge, a full and unrestricted irrevocable,
|
||||
world-wide, paid up, royalty-free, nonexclusive right and license to deal
|
||||
in this software and documentation files (the "Software"), including without
|
||||
limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
and/or sell copies of the Software, and to permit persons who receive
|
||||
copies from any such party to do so, with the only requirement being
|
||||
that this copyright notice remain intact.
|
||||
|
||||
*/
|
||||
//! # Color quantization library
|
||||
//! This library provides a color quantizer based on the [NEUQUANT](http://members.ozemail.com.au/~dekker/NEUQUANT.HTML)
|
||||
//! quantization algorithm by Anthony Dekker.
|
||||
//! ### Usage
|
||||
//! ```
|
||||
//! let data = vec![0; 40];
|
||||
//! let nq = color_quant::NeuQuant::new(10, 256, &data);
|
||||
//! let indixes: Vec<u8> = data.chunks(4).map(|pix| nq.index_of(pix) as u8).collect();
|
||||
//! let color_map = nq.color_map_rgba();
|
||||
//! ```
|
||||
//!
|
||||
use std::cmp::{
|
||||
max,
|
||||
min
|
||||
};
|
||||
|
||||
macro_rules! clamp(
|
||||
($x:expr) => (match $x {
|
||||
x if x < 0 => 0,
|
||||
x if x > 255 => 255,
|
||||
x => x
|
||||
})
|
||||
);
|
||||
|
||||
const CHANNELS: usize = 4;
|
||||
|
||||
const RADIUS_DEC: i32 = 30; // factor of 1/30 each cycle
|
||||
|
||||
const ALPHA_BIASSHIFT: i32 = 10; // alpha starts at 1
|
||||
const INIT_ALPHA: i32 = 1 << ALPHA_BIASSHIFT; // biased by 10 bits
|
||||
|
||||
const GAMMA: f64 = 1024.0;
|
||||
const BETA: f64 = 1.0 / GAMMA;
|
||||
const BETAGAMMA: f64 = BETA * GAMMA;
|
||||
|
||||
// four primes near 500 - assume no image has a length so large
|
||||
// that it is divisible by all four primes
|
||||
const PRIMES: [usize; 4] = [499, 491, 478, 503];
|
||||
|
||||
#[derive(Clone, Copy)]
|
||||
struct Quad<T> {
|
||||
r: T,
|
||||
g: T,
|
||||
b: T,
|
||||
a: T,
|
||||
}
|
||||
|
||||
type Neuron = Quad<f64>;
|
||||
type Color = Quad<i32>;
|
||||
|
||||
/// Neural network based color quantizer.
|
||||
pub struct NeuQuant {
|
||||
network: Vec<Neuron>,
|
||||
colormap: Vec<Color>,
|
||||
netindex: Vec<usize>,
|
||||
bias: Vec<f64>, // bias and freq arrays for learning
|
||||
freq: Vec<f64>,
|
||||
samplefac: i32,
|
||||
netsize: usize,
|
||||
}
|
||||
|
||||
impl NeuQuant {
|
||||
/// Creates a new neuronal network and trains it with the supplied data.
|
||||
///
|
||||
/// Pixels are assumed to be in RGBA format.
|
||||
/// `colors` should be $>=64$. `samplefac` determines the faction of
|
||||
/// the sample that will be used to train the network. Its value must be in the
|
||||
/// range $[1, 30]$. A value of $1$ thus produces the best result but is also
|
||||
/// slowest. $10$ is a good compromise between speed and quality.
|
||||
pub fn new(samplefac: i32, colors: usize, pixels: &[u8]) -> Self {
|
||||
let netsize = colors;
|
||||
let mut this = NeuQuant {
|
||||
network: Vec::with_capacity(netsize),
|
||||
colormap: Vec::with_capacity(netsize),
|
||||
netindex: vec![0; 256],
|
||||
bias: Vec::with_capacity(netsize),
|
||||
freq: Vec::with_capacity(netsize),
|
||||
samplefac: samplefac,
|
||||
netsize: colors
|
||||
};
|
||||
this.init(pixels);
|
||||
this
|
||||
}
|
||||
|
||||
/// Initializes the neuronal network and trains it with the supplied data.
|
||||
///
|
||||
/// This method gets called by `Self::new`.
|
||||
pub fn init(&mut self, pixels: &[u8]) {
|
||||
self.network.clear();
|
||||
self.colormap.clear();
|
||||
self.bias.clear();
|
||||
self.freq.clear();
|
||||
let freq = (self.netsize as f64).recip();
|
||||
for i in 0..self.netsize {
|
||||
let tmp = (i as f64) * 256.0 / (self.netsize as f64);
|
||||
// Sets alpha values at 0 for dark pixels.
|
||||
let a = if i < 16 { i as f64 * 16.0 } else { 255.0 };
|
||||
self.network.push(Neuron { r: tmp, g: tmp, b: tmp, a: a});
|
||||
self.colormap.push(Color { r: 0, g: 0, b: 0, a: 255 });
|
||||
self.freq.push(freq);
|
||||
self.bias.push(0.0);
|
||||
}
|
||||
self.learn(pixels);
|
||||
self.build_colormap();
|
||||
self.inxbuild();
|
||||
}
|
||||
|
||||
/// Maps the rgba-pixel in-place to the best-matching color in the color map.
|
||||
#[inline(always)]
|
||||
pub fn map_pixel(&self, pixel: &mut [u8]) {
|
||||
assert!(pixel.len() == 4);
|
||||
match (pixel[0], pixel[1], pixel[2], pixel[3]) {
|
||||
(r, g, b, a) => {
|
||||
let i = self.inxsearch(b, g, r, a);
|
||||
pixel[0] = self.colormap[i].r as u8;
|
||||
pixel[1] = self.colormap[i].g as u8;
|
||||
pixel[2] = self.colormap[i].b as u8;
|
||||
pixel[3] = self.colormap[i].a as u8;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Finds the best-matching index in the color map.
|
||||
///
|
||||
/// `pixel` is assumed to be in RGBA format.
|
||||
#[inline(always)]
|
||||
pub fn index_of(&self, pixel: &[u8]) -> usize {
|
||||
assert!(pixel.len() == 4);
|
||||
match (pixel[0], pixel[1], pixel[2], pixel[3]) {
|
||||
(r, g, b, a) => {
|
||||
self.inxsearch(b, g, r, a)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the RGBA color map calculated from the sample.
|
||||
pub fn color_map_rgba(&self) -> Vec<u8> {
|
||||
let mut map = Vec::with_capacity(self.netsize * 4);
|
||||
for entry in &self.colormap {
|
||||
map.push(entry.r as u8);
|
||||
map.push(entry.g as u8);
|
||||
map.push(entry.b as u8);
|
||||
map.push(entry.a as u8);
|
||||
}
|
||||
map
|
||||
}
|
||||
|
||||
/// Returns the RGBA color map calculated from the sample.
|
||||
pub fn color_map_rgb(&self) -> Vec<u8> {
|
||||
let mut map = Vec::with_capacity(self.netsize * 3);
|
||||
for entry in &self.colormap {
|
||||
map.push(entry.r as u8);
|
||||
map.push(entry.g as u8);
|
||||
map.push(entry.b as u8);
|
||||
}
|
||||
map
|
||||
}
|
||||
|
||||
/// Move neuron i towards biased (a,b,g,r) by factor alpha
|
||||
fn altersingle(&mut self, alpha: f64, i: i32, quad: Quad<f64>) {
|
||||
let n = &mut self.network[i as usize];
|
||||
n.b -= alpha * (n.b - quad.b);
|
||||
n.g -= alpha * (n.g - quad.g);
|
||||
n.r -= alpha * (n.r - quad.r);
|
||||
n.a -= alpha * (n.a - quad.a);
|
||||
}
|
||||
|
||||
/// Move neuron adjacent neurons towards biased (a,b,g,r) by factor alpha
|
||||
fn alterneigh(&mut self, alpha: f64, rad: i32, i: i32, quad: Quad<f64>) {
|
||||
let lo = max(i - rad, 0);
|
||||
let hi = min(i + rad, self.netsize as i32);
|
||||
let mut j = i + 1;
|
||||
let mut k = i - 1;
|
||||
let mut q = 0;
|
||||
|
||||
while (j < hi) || (k > lo) {
|
||||
let rad_sq = rad as f64 * rad as f64;
|
||||
let alpha = (alpha * (rad_sq - q as f64 * q as f64)) / rad_sq;
|
||||
q += 1;
|
||||
if j < hi {
|
||||
let p = &mut self.network[j as usize];
|
||||
p.b -= alpha * (p.b - quad.b);
|
||||
p.g -= alpha * (p.g - quad.g);
|
||||
p.r -= alpha * (p.r - quad.r);
|
||||
p.a -= alpha * (p.a - quad.a);
|
||||
j += 1;
|
||||
}
|
||||
if k > lo {
|
||||
let p = &mut self.network[k as usize];
|
||||
p.b -= alpha * (p.b - quad.b);
|
||||
p.g -= alpha * (p.g - quad.g);
|
||||
p.r -= alpha * (p.r - quad.r);
|
||||
p.a -= alpha * (p.a - quad.a);
|
||||
k -= 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Search for biased BGR values
|
||||
/// finds closest neuron (min dist) and updates freq
|
||||
/// finds best neuron (min dist-bias) and returns position
|
||||
/// for frequently chosen neurons, freq[i] is high and bias[i] is negative
|
||||
/// bias[i] = gamma*((1/self.netsize)-freq[i])
|
||||
fn contest (&mut self, b: f64, g: f64, r: f64, a: f64) -> i32 {
|
||||
use std::f64;
|
||||
|
||||
let mut bestd = f64::MAX;
|
||||
let mut bestbiasd: f64 = bestd;
|
||||
let mut bestpos = -1;
|
||||
let mut bestbiaspos: i32 = bestpos;
|
||||
|
||||
for i in 0..self.netsize {
|
||||
let bestbiasd_biased = bestbiasd + self.bias[i];
|
||||
let mut dist;
|
||||
let n = &self.network[i];
|
||||
dist = (n.b - b).abs();
|
||||
dist += (n.r - r).abs();
|
||||
if dist < bestd || dist < bestbiasd_biased {
|
||||
dist += (n.g - g).abs();
|
||||
dist += (n.a - a).abs();
|
||||
if dist < bestd {bestd=dist; bestpos=i as i32;}
|
||||
let biasdist = dist - self.bias [i];
|
||||
if biasdist < bestbiasd {bestbiasd=biasdist; bestbiaspos=i as i32;}
|
||||
}
|
||||
self.freq[i] -= BETA * self.freq[i];
|
||||
self.bias[i] += BETAGAMMA * self.freq[i];
|
||||
}
|
||||
self.freq[bestpos as usize] += BETA;
|
||||
self.bias[bestpos as usize] -= BETAGAMMA;
|
||||
return bestbiaspos;
|
||||
}
|
||||
|
||||
/// Main learning loop
|
||||
/// Note: the number of learning cycles is crucial and the parameters are not
|
||||
/// optimized for net sizes < 26 or > 256. 1064 colors seems to work fine
|
||||
fn learn(&mut self, pixels: &[u8]) {
|
||||
let initrad: i32 = self.netsize as i32/8; // for 256 cols, radius starts at 32
|
||||
let radiusbiasshift: i32 = 6;
|
||||
let radiusbias: i32 = 1 << radiusbiasshift;
|
||||
let init_bias_radius: i32 = initrad*radiusbias;
|
||||
let mut bias_radius = init_bias_radius;
|
||||
let alphadec = 30 + ((self.samplefac-1)/3);
|
||||
let lengthcount = pixels.len() / CHANNELS;
|
||||
let samplepixels = lengthcount / self.samplefac as usize;
|
||||
// learning cycles
|
||||
let n_cycles = match self.netsize >> 1 { n if n <= 100 => 100, n => n};
|
||||
let delta = match samplepixels / n_cycles { 0 => 1, n => n };
|
||||
let mut alpha = INIT_ALPHA;
|
||||
|
||||
let mut rad = bias_radius >> radiusbiasshift;
|
||||
if rad <= 1 {rad = 0};
|
||||
|
||||
let mut pos = 0;
|
||||
let step = *PRIMES.iter()
|
||||
.find(|&&prime| lengthcount % prime != 0)
|
||||
.unwrap_or(&PRIMES[3]);
|
||||
|
||||
let mut i = 0;
|
||||
while i < samplepixels {
|
||||
let (r, g, b, a) = {
|
||||
let p = &pixels[CHANNELS * pos..][..CHANNELS];
|
||||
(p[0] as f64, p[1] as f64, p[2] as f64, p[3] as f64)
|
||||
};
|
||||
|
||||
let j = self.contest (b, g, r, a);
|
||||
|
||||
let alpha_ = (1.0 * alpha as f64) / INIT_ALPHA as f64;
|
||||
self.altersingle(alpha_, j, Quad { b: b, g: g, r: r, a: a });
|
||||
if rad > 0 {
|
||||
self.alterneigh(alpha_, rad, j, Quad { b: b, g: g, r: r, a: a })
|
||||
};
|
||||
|
||||
pos += step;
|
||||
while pos >= lengthcount { pos -= lengthcount };
|
||||
|
||||
i += 1;
|
||||
if i%delta == 0 {
|
||||
alpha -= alpha / alphadec;
|
||||
bias_radius -= bias_radius / RADIUS_DEC;
|
||||
rad = bias_radius >> radiusbiasshift;
|
||||
if rad <= 1 {rad = 0};
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// initializes the color map
|
||||
fn build_colormap(&mut self) {
|
||||
for i in 0usize..self.netsize {
|
||||
self.colormap[i].b = clamp!(self.network[i].b.round() as i32);
|
||||
self.colormap[i].g = clamp!(self.network[i].g.round() as i32);
|
||||
self.colormap[i].r = clamp!(self.network[i].r.round() as i32);
|
||||
self.colormap[i].a = clamp!(self.network[i].a.round() as i32);
|
||||
}
|
||||
}
|
||||
|
||||
/// Insertion sort of network and building of netindex[0..255]
|
||||
fn inxbuild(&mut self) {
|
||||
let mut previouscol = 0;
|
||||
let mut startpos = 0;
|
||||
|
||||
for i in 0..self.netsize {
|
||||
let mut p = self.colormap[i];
|
||||
let mut q;
|
||||
let mut smallpos = i;
|
||||
let mut smallval = p.g as usize; // index on g
|
||||
// find smallest in i..netsize-1
|
||||
for j in (i + 1)..self.netsize {
|
||||
q = self.colormap[j];
|
||||
if (q.g as usize) < smallval { // index on g
|
||||
smallpos = j;
|
||||
smallval = q.g as usize; // index on g
|
||||
}
|
||||
}
|
||||
q = self.colormap[smallpos];
|
||||
// swap p (i) and q (smallpos) entries
|
||||
if i != smallpos {
|
||||
let mut j;
|
||||
j = q; q = p; p = j;
|
||||
self.colormap[i] = p;
|
||||
self.colormap[smallpos] = q;
|
||||
}
|
||||
// smallval entry is now in position i
|
||||
if smallval != previouscol {
|
||||
self.netindex[previouscol] = (startpos + i)>>1;
|
||||
for j in (previouscol + 1)..smallval {
|
||||
self.netindex[j] = i
|
||||
}
|
||||
previouscol = smallval;
|
||||
startpos = i;
|
||||
}
|
||||
}
|
||||
let max_netpos = self.netsize - 1;
|
||||
self.netindex[previouscol] = (startpos + max_netpos)>>1;
|
||||
for j in (previouscol + 1)..256 { self.netindex[j] = max_netpos }; // really 256
|
||||
}
|
||||
|
||||
/// Search for best matching color
|
||||
fn inxsearch(&self, b: u8, g: u8, r: u8, a: u8) -> usize {
|
||||
let mut bestd = 1 << 30; // ~ 1_000_000
|
||||
let mut best = 0;
|
||||
// start at netindex[g] and work outwards
|
||||
let mut i = self.netindex[g as usize];
|
||||
let mut j = if i > 0 { i - 1 } else { 0 };
|
||||
|
||||
while (i < self.netsize) || (j > 0) {
|
||||
if i < self.netsize {
|
||||
let p = self.colormap[i];
|
||||
let mut e = p.g - g as i32;
|
||||
let mut dist = e*e; // inx key
|
||||
if dist >= bestd { break }
|
||||
else {
|
||||
e = p.b - b as i32;
|
||||
dist += e*e;
|
||||
if dist < bestd {
|
||||
e = p.r - r as i32;
|
||||
dist += e*e;
|
||||
if dist < bestd {
|
||||
e = p.a - a as i32;
|
||||
dist += e*e;
|
||||
if dist < bestd { bestd = dist; best = i;}
|
||||
}
|
||||
}
|
||||
i += 1;
|
||||
}
|
||||
}
|
||||
if j > 0 {
|
||||
let p = self.colormap[j];
|
||||
let mut e = p.g - g as i32;
|
||||
let mut dist = e*e; // inx key
|
||||
if dist >= bestd { break }
|
||||
else {
|
||||
e = p.b - b as i32;
|
||||
dist += e*e;
|
||||
if dist < bestd {
|
||||
e = p.r - r as i32;
|
||||
dist += e*e;
|
||||
if dist < bestd {
|
||||
e = p.a - a as i32;
|
||||
dist += e*e;
|
||||
if dist < bestd { bestd = dist; best = j; }
|
||||
}
|
||||
}
|
||||
j -= 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
best
|
||||
}
|
||||
}
|
|
@ -0,0 +1 @@
|
|||
{"files":{"Cargo.toml":"c85c0241394119d8887c5e5624aba9a1a1fd21578f1a1e3f2a0d50d95d752cff","LICENSE-APACHE":"c6596eb7be8581c18be736c846fb9173b69eccf6ef94c5135893ec56bd92ba08","LICENSE-MIT":"61d383b05b87d78f94d2937e2580cce47226d17823c0430fbcad09596537efcf","README.md":"c0891c7ff327441bf16da593b0e721951f9f6d10bb26f9356aba6a7b0b0c4575","benches/bench.rs":"9a45a7ebc8fecf7f9976bea0e3c00c13731c0b3566536b0bc83788986e801770","build.rs":"4ccc50c3da67eb27f0b622440d2b7aee2f73fa9c71884571f3c041122231d105","src/baseline.rs":"bbe8fe49ceccbf9749052fa9c2756cf95f0fc79a063e5d3b509e3600283464ea","src/combine.rs":"7147fc4002190d36d253ea5e194e0419035b087304bcb17887efe09a8a198815","src/lib.rs":"25c55822d7fd53ff1ff0769bcffbdbcade00d45ac042a541b7189c2e94b91ee7","src/specialized/aarch64.rs":"cc8097e68f1269cee32aa856b4f7e4ba7b7472df6c2f4cecd600d292a838fe83","src/specialized/mod.rs":"bc92450e8522e9df202b346b3a209153cbb0d6587804cbfd2b947fda0f190ed6","src/specialized/pclmulqdq.rs":"6ace803b42ff70a571fd8b5f3f7c2d5a836873ce28759381c2882319b8edba70","src/table.rs":"3201c520d97c5e2cf80b8a03d72fa2e3f1270bbdf93c2fbf85498a8ea39bc64b"},"package":"ba125de2af0df55319f41944744ad91c71113bf74a4646efff39afe1f6842db1"}
|
|
@ -0,0 +1,47 @@
|
|||
"""
|
||||
cargo-raze crate build file.
|
||||
|
||||
DO NOT EDIT! Replaced on runs of cargo-raze
|
||||
"""
|
||||
package(default_visibility = [
|
||||
# Public for visibility by "@raze__crate__version//" targets.
|
||||
#
|
||||
# Prefer access through "//third_party/cargo", which limits external
|
||||
# visibility to explicit Cargo.toml dependencies.
|
||||
"//visibility:public",
|
||||
])
|
||||
|
||||
licenses([
|
||||
"restricted", # "MIT OR Apache-2.0"
|
||||
])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_rust//rust:rust.bzl",
|
||||
"rust_library",
|
||||
"rust_binary",
|
||||
"rust_test",
|
||||
)
|
||||
|
||||
|
||||
# Unsupported target "bench" with type "bench" omitted
|
||||
# Unsupported target "build-script-build" with type "custom-build" omitted
|
||||
|
||||
rust_library(
|
||||
name = "crc32fast",
|
||||
crate_root = "src/lib.rs",
|
||||
crate_type = "lib",
|
||||
edition = "2015",
|
||||
srcs = glob(["**/*.rs"]),
|
||||
deps = [
|
||||
"//third_party/cargo/vendor/cfg-if-0.1.10:cfg_if",
|
||||
],
|
||||
rustc_flags = [
|
||||
"--cap-lints=allow",
|
||||
],
|
||||
version = "1.2.0",
|
||||
crate_features = [
|
||||
"default",
|
||||
"std",
|
||||
],
|
||||
)
|
||||
|
|
@ -0,0 +1,41 @@
|
|||
# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
|
||||
#
|
||||
# When uploading crates to the registry Cargo will automatically
|
||||
# "normalize" Cargo.toml files for maximal compatibility
|
||||
# with all versions of Cargo and also rewrite `path` dependencies
|
||||
# to registry (e.g. crates.io) dependencies
|
||||
#
|
||||
# If you believe there's an error in this file please file an
|
||||
# issue against the rust-lang/cargo repository. If you're
|
||||
# editing this file be aware that the upstream Cargo.toml
|
||||
# will likely look very different (and much more reasonable)
|
||||
|
||||
[package]
|
||||
name = "crc32fast"
|
||||
version = "1.2.0"
|
||||
authors = ["Sam Rijs <srijs@airpost.net>", "Alex Crichton <alex@alexcrichton.com>"]
|
||||
description = "Fast, SIMD-accelerated CRC32 (IEEE) checksum computation"
|
||||
readme = "README.md"
|
||||
keywords = ["checksum", "crc", "crc32", "simd", "fast"]
|
||||
license = "MIT OR Apache-2.0"
|
||||
repository = "https://github.com/srijs/rust-crc32fast"
|
||||
|
||||
[[bench]]
|
||||
name = "bench"
|
||||
harness = false
|
||||
[dependencies.cfg-if]
|
||||
version = "0.1"
|
||||
[dev-dependencies.bencher]
|
||||
version = "0.1"
|
||||
|
||||
[dev-dependencies.quickcheck]
|
||||
version = "0.6"
|
||||
default-features = false
|
||||
|
||||
[dev-dependencies.rand]
|
||||
version = "0.4"
|
||||
|
||||
[features]
|
||||
default = ["std"]
|
||||
nightly = []
|
||||
std = []
|
|
@ -0,0 +1,202 @@
|
|||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "{}"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright {yyyy} {name of copyright owner}
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
|
@ -0,0 +1,21 @@
|
|||
MIT License
|
||||
|
||||
Copyright (c) 2018 Sam Rijs, Alex Crichton and contributors
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
|
@ -0,0 +1,81 @@
|
|||
# crc32fast [![Build Status][travis-img]][travis] [![Crates.io][crates-img]][crates] [![Documentation][docs-img]][docs]
|
||||
|
||||
[travis-img]: https://travis-ci.com/srijs/rust-crc32fast.svg?branch=master
|
||||
[travis]: https://travis-ci.com/srijs/rust-crc32fast
|
||||
[crates-img]: https://img.shields.io/crates/v/crc32fast.svg
|
||||
[crates]: https://crates.io/crates/crc32fast
|
||||
[docs-img]: https://docs.rs/crc32fast/badge.svg
|
||||
[docs]: https://docs.rs/crc32fast
|
||||
|
||||
_Fast, SIMD-accelerated CRC32 (IEEE) checksum computation_
|
||||
|
||||
## Usage
|
||||
|
||||
```rust
|
||||
extern crate crc32fast;
|
||||
|
||||
use crc32fast::Hasher;
|
||||
|
||||
let mut hasher = Hasher::new();
|
||||
hasher.update(b"foo bar baz");
|
||||
let checksum = hasher.finalize();
|
||||
```
|
||||
|
||||
## Performance
|
||||
|
||||
This crate contains multiple CRC32 implementations:
|
||||
|
||||
- A fast baseline implementation which processes up to 16 bytes per iteration
|
||||
- An optimized implementation for modern `x86` using `sse` and `pclmulqdq` instructions
|
||||
- An optimized implementation for `aarch64` using `crc32` instructions
|
||||
|
||||
Calling the `Hasher::new` constructor at runtime will perform a feature detection to select the most
|
||||
optimal implementation for the current CPU feature set.
|
||||
|
||||
| crate | version | variant | ns/iter | MB/s |
|
||||
|-------------------------------------|---------|-----------|---------|------|
|
||||
| [crc](https://crates.io/crates/crc) | 1.8.1 | n/a | 4,926 | 207 |
|
||||
| crc32fast (this crate) | 1.0.0 | baseline | 683 | 1499 |
|
||||
| crc32fast (this crate) | 1.0.0 | pclmulqdq | 140 | 7314 |
|
||||
|
||||
## Memory Safety
|
||||
|
||||
Due to the use of SIMD intrinsics for the optimized implementations, this crate contains some amount of `unsafe` code.
|
||||
|
||||
In order to ensure memory safety, the relevant code has been fuzz tested using [afl.rs](https://github.com/rust-fuzz/afl.rs) with millions of iterations in both `debug` and `release` build settings. You can inspect the test setup in the `fuzz` sub-directory, which also has instructions on how to run the tests yourself.
|
||||
|
||||
On top of that, every commit is tested using an address sanitizer in CI to catch any out of bounds memory accesses.
|
||||
|
||||
Even though neither fuzzing not sanitization has revealed any safety bugs yet, please don't hesitate to file an issue if you run into any crashes or other unexpected behaviour.
|
||||
|
||||
## Available feature flags
|
||||
|
||||
### `std` (default: enabled)
|
||||
|
||||
This library supports being built without the Rust `std` library, which is useful for low-level use-cases such as embedded where no operating system is available. To build the crate in a `no_std` context, disable the default `std` feature.
|
||||
|
||||
Note: Because runtime CPU feature detection requires OS support, the specialized SIMD implementations will be unavailable when the `std` feature is disabled.
|
||||
|
||||
### `nightly` (default: disabled)
|
||||
|
||||
This feature flag enables unstable features that are only available on the `nightly` channel. Keep in mind that when enabling this feature flag, you
|
||||
might experience breaking changes when updating compiler versions.
|
||||
|
||||
Currently, enabling this feature flag will make the optimized `aarch64` implementation available.
|
||||
|
||||
## License
|
||||
|
||||
This project is licensed under either of
|
||||
|
||||
* Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
http://www.apache.org/licenses/LICENSE-2.0)
|
||||
* MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
||||
http://opensource.org/licenses/MIT)
|
||||
|
||||
at your option.
|
||||
|
||||
### Contribution
|
||||
|
||||
Unless you explicitly state otherwise, any contribution intentionally submitted
|
||||
for inclusion in this project by you, as defined in the Apache-2.0 license,
|
||||
shall be dual licensed as above, without any additional terms or conditions.
|
|
@ -0,0 +1,49 @@
|
|||
#[macro_use]
|
||||
extern crate bencher;
|
||||
extern crate crc32fast;
|
||||
extern crate rand;
|
||||
|
||||
use bencher::Bencher;
|
||||
use crc32fast::Hasher;
|
||||
use rand::Rng;
|
||||
|
||||
fn bench(b: &mut Bencher, size: usize, hasher_init: Hasher) {
|
||||
let mut bytes = vec![0u8; size];
|
||||
rand::thread_rng().fill_bytes(&mut bytes);
|
||||
|
||||
b.iter(|| {
|
||||
let mut hasher = hasher_init.clone();
|
||||
hasher.update(&bytes);
|
||||
bencher::black_box(hasher.finalize())
|
||||
});
|
||||
|
||||
b.bytes = size as u64;
|
||||
}
|
||||
|
||||
fn bench_kilobyte_baseline(b: &mut Bencher) {
|
||||
bench(b, 1024, Hasher::internal_new_baseline(0))
|
||||
}
|
||||
|
||||
fn bench_kilobyte_specialized(b: &mut Bencher) {
|
||||
bench(b, 1024, Hasher::internal_new_specialized(0).unwrap())
|
||||
}
|
||||
|
||||
fn bench_megabyte_baseline(b: &mut Bencher) {
|
||||
bench(b, 1024 * 1024, Hasher::internal_new_baseline(0))
|
||||
}
|
||||
|
||||
fn bench_megabyte_specialized(b: &mut Bencher) {
|
||||
bench(b, 1024 * 1024, Hasher::internal_new_specialized(0).unwrap())
|
||||
}
|
||||
|
||||
benchmark_group!(
|
||||
bench_baseline,
|
||||
bench_kilobyte_baseline,
|
||||
bench_megabyte_baseline
|
||||
);
|
||||
benchmark_group!(
|
||||
bench_specialized,
|
||||
bench_kilobyte_specialized,
|
||||
bench_megabyte_specialized
|
||||
);
|
||||
benchmark_main!(bench_baseline, bench_specialized);
|
|
@ -0,0 +1,35 @@
|
|||
use std::env;
|
||||
use std::process::Command;
|
||||
use std::str;
|
||||
|
||||
fn main() {
|
||||
println!("cargo:rerun-if-changed=build.rs");
|
||||
|
||||
let minor = match rustc_minor_version() {
|
||||
Some(n) => n,
|
||||
None => return,
|
||||
};
|
||||
|
||||
if minor >= 27 {
|
||||
println!("cargo:rustc-cfg=crc32fast_stdarchx86");
|
||||
}
|
||||
}
|
||||
|
||||
fn rustc_minor_version() -> Option<u32> {
|
||||
macro_rules! otry {
|
||||
($e:expr) => {
|
||||
match $e {
|
||||
Some(e) => e,
|
||||
None => return None,
|
||||
}
|
||||
};
|
||||
}
|
||||
let rustc = otry!(env::var_os("RUSTC"));
|
||||
let output = otry!(Command::new(rustc).arg("--version").output().ok());
|
||||
let version = otry!(str::from_utf8(&output.stdout).ok());
|
||||
let mut pieces = version.split('.');
|
||||
if pieces.next() != Some("rustc 1") {
|
||||
return None;
|
||||
}
|
||||
otry!(pieces.next()).parse().ok()
|
||||
}
|
|
@ -0,0 +1,94 @@
|
|||
use table::CRC32_TABLE;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct State {
|
||||
state: u32,
|
||||
}
|
||||
|
||||
impl State {
|
||||
pub fn new(state: u32) -> Self {
|
||||
State { state }
|
||||
}
|
||||
|
||||
pub fn update(&mut self, buf: &[u8]) {
|
||||
self.state = update_fast_16(self.state, buf);
|
||||
}
|
||||
|
||||
pub fn finalize(self) -> u32 {
|
||||
self.state
|
||||
}
|
||||
|
||||
pub fn reset(&mut self) {
|
||||
self.state = 0;
|
||||
}
|
||||
|
||||
pub fn combine(&mut self, other: u32, amount: u64) {
|
||||
self.state = ::combine::combine(self.state, other, amount);
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn update_fast_16(prev: u32, mut buf: &[u8]) -> u32 {
|
||||
const UNROLL: usize = 4;
|
||||
const BYTES_AT_ONCE: usize = 16 * UNROLL;
|
||||
|
||||
let mut crc = !prev;
|
||||
|
||||
while buf.len() >= BYTES_AT_ONCE {
|
||||
for _ in 0..UNROLL {
|
||||
crc = CRC32_TABLE[0x0][buf[0xf] as usize]
|
||||
^ CRC32_TABLE[0x1][buf[0xe] as usize]
|
||||
^ CRC32_TABLE[0x2][buf[0xd] as usize]
|
||||
^ CRC32_TABLE[0x3][buf[0xc] as usize]
|
||||
^ CRC32_TABLE[0x4][buf[0xb] as usize]
|
||||
^ CRC32_TABLE[0x5][buf[0xa] as usize]
|
||||
^ CRC32_TABLE[0x6][buf[0x9] as usize]
|
||||
^ CRC32_TABLE[0x7][buf[0x8] as usize]
|
||||
^ CRC32_TABLE[0x8][buf[0x7] as usize]
|
||||
^ CRC32_TABLE[0x9][buf[0x6] as usize]
|
||||
^ CRC32_TABLE[0xa][buf[0x5] as usize]
|
||||
^ CRC32_TABLE[0xb][buf[0x4] as usize]
|
||||
^ CRC32_TABLE[0xc][buf[0x3] as usize ^ ((crc >> 0x18) & 0xFF) as usize]
|
||||
^ CRC32_TABLE[0xd][buf[0x2] as usize ^ ((crc >> 0x10) & 0xFF) as usize]
|
||||
^ CRC32_TABLE[0xe][buf[0x1] as usize ^ ((crc >> 0x08) & 0xFF) as usize]
|
||||
^ CRC32_TABLE[0xf][buf[0x0] as usize ^ ((crc >> 0x00) & 0xFF) as usize];
|
||||
buf = &buf[16..];
|
||||
}
|
||||
}
|
||||
|
||||
update_slow(!crc, buf)
|
||||
}
|
||||
|
||||
pub(crate) fn update_slow(prev: u32, buf: &[u8]) -> u32 {
|
||||
let mut crc = !prev;
|
||||
|
||||
for &byte in buf.iter() {
|
||||
crc = CRC32_TABLE[0][((crc as u8) ^ byte) as usize] ^ (crc >> 8);
|
||||
}
|
||||
|
||||
!crc
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
#[test]
|
||||
fn slow() {
|
||||
assert_eq!(super::update_slow(0, b""), 0);
|
||||
|
||||
// test vectors from the iPXE project (input and output are bitwise negated)
|
||||
assert_eq!(super::update_slow(!0x12345678, b""), !0x12345678);
|
||||
assert_eq!(super::update_slow(!0xffffffff, b"hello world"), !0xf2b5ee7a);
|
||||
assert_eq!(super::update_slow(!0xffffffff, b"hello"), !0xc9ef5979);
|
||||
assert_eq!(super::update_slow(!0xc9ef5979, b" world"), !0xf2b5ee7a);
|
||||
|
||||
// Some vectors found on Rosetta code
|
||||
assert_eq!(super::update_slow(0, b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"), 0x190A55AD);
|
||||
assert_eq!(super::update_slow(0, b"\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF"), 0xFF6CAB0B);
|
||||
assert_eq!(super::update_slow(0, b"\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0A\x0B\x0C\x0D\x0E\x0F\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1A\x1B\x1C\x1D\x1E\x1F"), 0x91267E8A);
|
||||
}
|
||||
|
||||
quickcheck! {
|
||||
fn fast_16_is_the_same_as_slow(crc: u32, bytes: Vec<u8>) -> bool {
|
||||
super::update_fast_16(crc, &bytes) == super::update_slow(crc, &bytes)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,77 @@
|
|||
const GF2_DIM: usize = 32;
|
||||
|
||||
fn gf2_matrix_times(mat: &[u32; GF2_DIM], mut vec: u32) -> u32 {
|
||||
let mut sum = 0;
|
||||
let mut idx = 0;
|
||||
while vec > 0 {
|
||||
if vec & 1 == 1 {
|
||||
sum ^= mat[idx];
|
||||
}
|
||||
vec >>= 1;
|
||||
idx += 1;
|
||||
}
|
||||
return sum;
|
||||
}
|
||||
|
||||
fn gf2_matrix_square(square: &mut [u32; GF2_DIM], mat: &[u32; GF2_DIM]) {
|
||||
for n in 0..GF2_DIM {
|
||||
square[n] = gf2_matrix_times(mat, mat[n]);
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn combine(mut crc1: u32, crc2: u32, mut len2: u64) -> u32 {
|
||||
let mut row: u32;
|
||||
let mut even = [0u32; GF2_DIM]; /* even-power-of-two zeros operator */
|
||||
let mut odd = [0u32; GF2_DIM]; /* odd-power-of-two zeros operator */
|
||||
|
||||
/* degenerate case (also disallow negative lengths) */
|
||||
if len2 <= 0 {
|
||||
return crc1;
|
||||
}
|
||||
|
||||
/* put operator for one zero bit in odd */
|
||||
odd[0] = 0xedb88320; /* CRC-32 polynomial */
|
||||
row = 1;
|
||||
for n in 1..GF2_DIM {
|
||||
odd[n] = row;
|
||||
row <<= 1;
|
||||
}
|
||||
|
||||
/* put operator for two zero bits in even */
|
||||
gf2_matrix_square(&mut even, &odd);
|
||||
|
||||
/* put operator for four zero bits in odd */
|
||||
gf2_matrix_square(&mut odd, &even);
|
||||
|
||||
/* apply len2 zeros to crc1 (first square will put the operator for one
|
||||
zero byte, eight zero bits, in even) */
|
||||
loop {
|
||||
/* apply zeros operator for this bit of len2 */
|
||||
gf2_matrix_square(&mut even, &odd);
|
||||
if len2 & 1 == 1 {
|
||||
crc1 = gf2_matrix_times(&even, crc1);
|
||||
}
|
||||
len2 >>= 1;
|
||||
|
||||
/* if no more bits set, then done */
|
||||
if len2 == 0 {
|
||||
break;
|
||||
}
|
||||
|
||||
/* another iteration of the loop with odd and even swapped */
|
||||
gf2_matrix_square(&mut odd, &even);
|
||||
if len2 & 1 == 1 {
|
||||
crc1 = gf2_matrix_times(&odd, crc1);
|
||||
}
|
||||
len2 >>= 1;
|
||||
|
||||
/* if no more bits set, then done */
|
||||
if len2 == 0 {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/* return combined crc */
|
||||
crc1 ^= crc2;
|
||||
return crc1;
|
||||
}
|
|
@ -0,0 +1,178 @@
|
|||
//! ## Example
|
||||
//!
|
||||
//! ```rust
|
||||
//! use crc32fast::Hasher;
|
||||
//!
|
||||
//! let mut hasher = Hasher::new();
|
||||
//! hasher.update(b"foo bar baz");
|
||||
//! let checksum = hasher.finalize();
|
||||
//! ```
|
||||
//!
|
||||
//! ## Performance
|
||||
//!
|
||||
//! This crate contains multiple CRC32 implementations:
|
||||
//!
|
||||
//! - A fast baseline implementation which processes up to 16 bytes per iteration
|
||||
//! - An optimized implementation for modern `x86` using `sse` and `pclmulqdq` instructions
|
||||
//!
|
||||
//! Calling the `Hasher::new` constructor at runtime will perform a feature detection to select the most
|
||||
//! optimal implementation for the current CPU feature set.
|
||||
|
||||
#![cfg_attr(not(feature = "std"), no_std)]
|
||||
#![cfg_attr(
|
||||
all(feature = "nightly", target_arch = "aarch64"),
|
||||
feature(stdsimd, aarch64_target_feature)
|
||||
)]
|
||||
|
||||
#[deny(missing_docs)]
|
||||
#[cfg(test)]
|
||||
#[macro_use]
|
||||
extern crate quickcheck;
|
||||
|
||||
#[macro_use]
|
||||
extern crate cfg_if;
|
||||
|
||||
#[cfg(feature = "std")]
|
||||
use std as core;
|
||||
|
||||
use core::fmt;
|
||||
use core::hash;
|
||||
|
||||
mod baseline;
|
||||
mod combine;
|
||||
mod specialized;
|
||||
mod table;
|
||||
|
||||
#[derive(Clone)]
|
||||
enum State {
|
||||
Baseline(baseline::State),
|
||||
Specialized(specialized::State),
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
/// Represents an in-progress CRC32 computation.
|
||||
pub struct Hasher {
|
||||
amount: u64,
|
||||
state: State,
|
||||
}
|
||||
|
||||
const DEFAULT_INIT_STATE: u32 = 0;
|
||||
|
||||
impl Hasher {
|
||||
/// Create a new `Hasher`.
|
||||
///
|
||||
/// This will perform a CPU feature detection at runtime to select the most
|
||||
/// optimal implementation for the current processor architecture.
|
||||
pub fn new() -> Self {
|
||||
Self::new_with_initial(DEFAULT_INIT_STATE)
|
||||
}
|
||||
|
||||
/// Create a new `Hasher` with an initial CRC32 state.
|
||||
///
|
||||
/// This works just like `Hasher::new`, except that it allows for an initial
|
||||
/// CRC32 state to be passed in.
|
||||
pub fn new_with_initial(init: u32) -> Self {
|
||||
Self::internal_new_specialized(init).unwrap_or_else(|| Self::internal_new_baseline(init))
|
||||
}
|
||||
|
||||
#[doc(hidden)]
|
||||
// Internal-only API. Don't use.
|
||||
pub fn internal_new_baseline(init: u32) -> Self {
|
||||
Hasher {
|
||||
amount: 0,
|
||||
state: State::Baseline(baseline::State::new(init)),
|
||||
}
|
||||
}
|
||||
|
||||
#[doc(hidden)]
|
||||
// Internal-only API. Don't use.
|
||||
pub fn internal_new_specialized(init: u32) -> Option<Self> {
|
||||
{
|
||||
if let Some(state) = specialized::State::new(init) {
|
||||
return Some(Hasher {
|
||||
amount: 0,
|
||||
state: State::Specialized(state),
|
||||
});
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
/// Process the given byte slice and update the hash state.
|
||||
pub fn update(&mut self, buf: &[u8]) {
|
||||
self.amount += buf.len() as u64;
|
||||
match self.state {
|
||||
State::Baseline(ref mut state) => state.update(buf),
|
||||
State::Specialized(ref mut state) => state.update(buf),
|
||||
}
|
||||
}
|
||||
|
||||
/// Finalize the hash state and return the computed CRC32 value.
|
||||
pub fn finalize(self) -> u32 {
|
||||
match self.state {
|
||||
State::Baseline(state) => state.finalize(),
|
||||
State::Specialized(state) => state.finalize(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Reset the hash state.
|
||||
pub fn reset(&mut self) {
|
||||
self.amount = 0;
|
||||
match self.state {
|
||||
State::Baseline(ref mut state) => state.reset(),
|
||||
State::Specialized(ref mut state) => state.reset(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Combine the hash state with the hash state for the subsequent block of bytes.
|
||||
pub fn combine(&mut self, other: &Self) {
|
||||
self.amount += other.amount;
|
||||
let other_crc = other.clone().finalize();
|
||||
match self.state {
|
||||
State::Baseline(ref mut state) => state.combine(other_crc, other.amount),
|
||||
State::Specialized(ref mut state) => state.combine(other_crc, other.amount),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Debug for Hasher {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
f.debug_struct("crc32fast::Hasher").finish()
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for Hasher {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
impl hash::Hasher for Hasher {
|
||||
fn write(&mut self, bytes: &[u8]) {
|
||||
self.update(bytes)
|
||||
}
|
||||
|
||||
fn finish(&self) -> u64 {
|
||||
u64::from(self.clone().finalize())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::Hasher;
|
||||
|
||||
quickcheck! {
|
||||
fn combine(bytes_1: Vec<u8>, bytes_2: Vec<u8>) -> bool {
|
||||
let mut hash_a = Hasher::new();
|
||||
hash_a.update(&bytes_1);
|
||||
hash_a.update(&bytes_2);
|
||||
let mut hash_b = Hasher::new();
|
||||
hash_b.update(&bytes_2);
|
||||
let mut hash_c = Hasher::new();
|
||||
hash_c.update(&bytes_1);
|
||||
hash_c.combine(&hash_b);
|
||||
|
||||
hash_a.finalize() == hash_c.finalize()
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,88 @@
|
|||
use std::arch::aarch64 as arch;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct State {
|
||||
state: u32,
|
||||
}
|
||||
|
||||
impl State {
|
||||
pub fn new(state: u32) -> Option<Self> {
|
||||
if is_aarch64_feature_detected!("crc") {
|
||||
// SAFETY: The conditions above ensure that all
|
||||
// required instructions are supported by the CPU.
|
||||
Some(Self { state })
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
pub fn update(&mut self, buf: &[u8]) {
|
||||
// SAFETY: The `State::new` constructor ensures that all
|
||||
// required instructions are supported by the CPU.
|
||||
self.state = unsafe { calculate(self.state, buf) }
|
||||
}
|
||||
|
||||
pub fn finalize(self) -> u32 {
|
||||
self.state
|
||||
}
|
||||
|
||||
pub fn reset(&mut self) {
|
||||
self.state = 0;
|
||||
}
|
||||
|
||||
pub fn combine(&mut self, other: u32, amount: u64) {
|
||||
self.state = ::combine::combine(self.state, other, amount);
|
||||
}
|
||||
}
|
||||
|
||||
// target_feature is necessary to allow rustc to inline the crc32* wrappers
|
||||
#[target_feature(enable = "crc")]
|
||||
pub unsafe fn calculate(crc: u32, data: &[u8]) -> u32 {
|
||||
let mut c32 = !crc;
|
||||
let (pre_quad, quads, post_quad) = data.align_to::<u64>();
|
||||
|
||||
c32 = pre_quad.iter().fold(c32, |acc, &b| arch::__crc32b(acc, b));
|
||||
|
||||
// unrolling increases performance by a lot
|
||||
let mut quad_iter = quads.chunks_exact(8);
|
||||
for chunk in &mut quad_iter {
|
||||
c32 = arch::__crc32d(c32, chunk[0]);
|
||||
c32 = arch::__crc32d(c32, chunk[1]);
|
||||
c32 = arch::__crc32d(c32, chunk[2]);
|
||||
c32 = arch::__crc32d(c32, chunk[3]);
|
||||
c32 = arch::__crc32d(c32, chunk[4]);
|
||||
c32 = arch::__crc32d(c32, chunk[5]);
|
||||
c32 = arch::__crc32d(c32, chunk[6]);
|
||||
c32 = arch::__crc32d(c32, chunk[7]);
|
||||
}
|
||||
c32 = quad_iter
|
||||
.remainder()
|
||||
.iter()
|
||||
.fold(c32, |acc, &q| arch::__crc32d(acc, q));
|
||||
|
||||
c32 = post_quad.iter().fold(c32, |acc, &b| arch::__crc32b(acc, b));
|
||||
|
||||
!c32
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
quickcheck! {
|
||||
fn check_against_baseline(init: u32, chunks: Vec<(Vec<u8>, usize)>) -> bool {
|
||||
let mut baseline = super::super::super::baseline::State::new(init);
|
||||
let mut aarch64 = super::State::new(init).expect("not supported");
|
||||
for (chunk, mut offset) in chunks {
|
||||
// simulate random alignments by offsetting the slice by up to 15 bytes
|
||||
offset &= 0xF;
|
||||
if chunk.len() <= offset {
|
||||
baseline.update(&chunk);
|
||||
aarch64.update(&chunk);
|
||||
} else {
|
||||
baseline.update(&chunk[offset..]);
|
||||
aarch64.update(&chunk[offset..]);
|
||||
}
|
||||
}
|
||||
aarch64.finalize() == baseline.finalize()
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,36 @@
|
|||
cfg_if! {
|
||||
if #[cfg(all(
|
||||
crc32fast_stdarchx86,
|
||||
any(target_arch = "x86", target_arch = "x86_64")
|
||||
))] {
|
||||
mod pclmulqdq;
|
||||
pub use self::pclmulqdq::State;
|
||||
} else if #[cfg(all(feature = "nightly", target_arch = "aarch64"))] {
|
||||
mod aarch64;
|
||||
pub use self::aarch64::State;
|
||||
} else {
|
||||
#[derive(Clone)]
|
||||
pub enum State {}
|
||||
impl State {
|
||||
pub fn new(_: u32) -> Option<Self> {
|
||||
None
|
||||
}
|
||||
|
||||
pub fn update(&mut self, _buf: &[u8]) {
|
||||
match *self {}
|
||||
}
|
||||
|
||||
pub fn finalize(self) -> u32 {
|
||||
match self{}
|
||||
}
|
||||
|
||||
pub fn reset(&mut self) {
|
||||
match *self {}
|
||||
}
|
||||
|
||||
pub fn combine(&mut self, _other: u32, _amount: u64) {
|
||||
match *self {}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,225 @@
|
|||
#[cfg(target_arch = "x86")]
|
||||
use core::arch::x86 as arch;
|
||||
#[cfg(target_arch = "x86_64")]
|
||||
use core::arch::x86_64 as arch;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct State {
|
||||
state: u32,
|
||||
}
|
||||
|
||||
impl State {
|
||||
#[cfg(not(feature = "std"))]
|
||||
pub fn new(state: u32) -> Option<Self> {
|
||||
if cfg!(target_feature = "pclmulqdq")
|
||||
&& cfg!(target_feature = "sse2")
|
||||
&& cfg!(target_feature = "sse4.1")
|
||||
{
|
||||
// SAFETY: The conditions above ensure that all
|
||||
// required instructions are supported by the CPU.
|
||||
Some(Self { state })
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "std")]
|
||||
pub fn new(state: u32) -> Option<Self> {
|
||||
if is_x86_feature_detected!("pclmulqdq")
|
||||
&& is_x86_feature_detected!("sse2")
|
||||
&& is_x86_feature_detected!("sse4.1")
|
||||
{
|
||||
// SAFETY: The conditions above ensure that all
|
||||
// required instructions are supported by the CPU.
|
||||
Some(Self { state })
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
pub fn update(&mut self, buf: &[u8]) {
|
||||
// SAFETY: The `State::new` constructor ensures that all
|
||||
// required instructions are supported by the CPU.
|
||||
self.state = unsafe { calculate(self.state, buf) }
|
||||
}
|
||||
|
||||
pub fn finalize(self) -> u32 {
|
||||
self.state
|
||||
}
|
||||
|
||||
pub fn reset(&mut self) {
|
||||
self.state = 0;
|
||||
}
|
||||
|
||||
pub fn combine(&mut self, other: u32, amount: u64) {
|
||||
self.state = ::combine::combine(self.state, other, amount);
|
||||
}
|
||||
}
|
||||
|
||||
const K1: i64 = 0x154442bd4;
|
||||
const K2: i64 = 0x1c6e41596;
|
||||
const K3: i64 = 0x1751997d0;
|
||||
const K4: i64 = 0x0ccaa009e;
|
||||
const K5: i64 = 0x163cd6124;
|
||||
const K6: i64 = 0x1db710640;
|
||||
|
||||
const P_X: i64 = 0x1DB710641;
|
||||
const U_PRIME: i64 = 0x1F7011641;
|
||||
|
||||
#[cfg(feature = "std")]
|
||||
unsafe fn debug(s: &str, a: arch::__m128i) -> arch::__m128i {
|
||||
if false {
|
||||
union A {
|
||||
a: arch::__m128i,
|
||||
b: [u8; 16],
|
||||
}
|
||||
let x = A { a }.b;
|
||||
print!(" {:20} | ", s);
|
||||
for x in x.iter() {
|
||||
print!("{:02x} ", x);
|
||||
}
|
||||
println!();
|
||||
}
|
||||
return a;
|
||||
}
|
||||
|
||||
#[cfg(not(feature = "std"))]
|
||||
unsafe fn debug(_s: &str, a: arch::__m128i) -> arch::__m128i {
|
||||
a
|
||||
}
|
||||
|
||||
#[target_feature(enable = "pclmulqdq", enable = "sse2", enable = "sse4.1")]
|
||||
pub unsafe fn calculate(crc: u32, mut data: &[u8]) -> u32 {
|
||||
// In theory we can accelerate smaller chunks too, but for now just rely on
|
||||
// the fallback implementation as it's too much hassle and doesn't seem too
|
||||
// beneficial.
|
||||
if data.len() < 128 {
|
||||
return ::baseline::update_fast_16(crc, data);
|
||||
}
|
||||
|
||||
// Step 1: fold by 4 loop
|
||||
let mut x3 = get(&mut data);
|
||||
let mut x2 = get(&mut data);
|
||||
let mut x1 = get(&mut data);
|
||||
let mut x0 = get(&mut data);
|
||||
|
||||
// fold in our initial value, part of the incremental crc checksum
|
||||
x3 = arch::_mm_xor_si128(x3, arch::_mm_cvtsi32_si128(!crc as i32));
|
||||
|
||||
let k1k2 = arch::_mm_set_epi64x(K2, K1);
|
||||
while data.len() >= 64 {
|
||||
x3 = reduce128(x3, get(&mut data), k1k2);
|
||||
x2 = reduce128(x2, get(&mut data), k1k2);
|
||||
x1 = reduce128(x1, get(&mut data), k1k2);
|
||||
x0 = reduce128(x0, get(&mut data), k1k2);
|
||||
}
|
||||
|
||||
let k3k4 = arch::_mm_set_epi64x(K4, K3);
|
||||
let mut x = reduce128(x3, x2, k3k4);
|
||||
x = reduce128(x, x1, k3k4);
|
||||
x = reduce128(x, x0, k3k4);
|
||||
|
||||
// Step 2: fold by 1 loop
|
||||
while data.len() >= 16 {
|
||||
x = reduce128(x, get(&mut data), k3k4);
|
||||
}
|
||||
|
||||
debug("128 > 64 init", x);
|
||||
|
||||
// Perform step 3, reduction from 128 bits to 64 bits. This is
|
||||
// significantly different from the paper and basically doesn't follow it
|
||||
// at all. It's not really clear why, but implementations of this algorithm
|
||||
// in Chrome/Linux diverge in the same way. It is beyond me why this is
|
||||
// different than the paper, maybe the paper has like errata or something?
|
||||
// Unclear.
|
||||
//
|
||||
// It's also not clear to me what's actually happening here and/or why, but
|
||||
// algebraically what's happening is:
|
||||
//
|
||||
// x = (x[0:63] • K4) ^ x[64:127] // 96 bit result
|
||||
// x = ((x[0:31] as u64) • K5) ^ x[32:95] // 64 bit result
|
||||
//
|
||||
// It's... not clear to me what's going on here. The paper itself is pretty
|
||||
// vague on this part but definitely uses different constants at least.
|
||||
// It's not clear to me, reading the paper, where the xor operations are
|
||||
// happening or why things are shifting around. This implementation...
|
||||
// appears to work though!
|
||||
drop(K6);
|
||||
let x = arch::_mm_xor_si128(
|
||||
arch::_mm_clmulepi64_si128(x, k3k4, 0x10),
|
||||
arch::_mm_srli_si128(x, 8),
|
||||
);
|
||||
let x = arch::_mm_xor_si128(
|
||||
arch::_mm_clmulepi64_si128(
|
||||
arch::_mm_and_si128(x, arch::_mm_set_epi32(0, 0, 0, !0)),
|
||||
arch::_mm_set_epi64x(0, K5),
|
||||
0x00,
|
||||
),
|
||||
arch::_mm_srli_si128(x, 4),
|
||||
);
|
||||
debug("128 > 64 xx", x);
|
||||
|
||||
// Perform a Barrett reduction from our now 64 bits to 32 bits. The
|
||||
// algorithm for this is described at the end of the paper, and note that
|
||||
// this also implements the "bit reflected input" variant.
|
||||
let pu = arch::_mm_set_epi64x(U_PRIME, P_X);
|
||||
|
||||
// T1(x) = ⌊(R(x) % x^32)⌋ • μ
|
||||
let t1 = arch::_mm_clmulepi64_si128(
|
||||
arch::_mm_and_si128(x, arch::_mm_set_epi32(0, 0, 0, !0)),
|
||||
pu,
|
||||
0x10,
|
||||
);
|
||||
// T2(x) = ⌊(T1(x) % x^32)⌋ • P(x)
|
||||
let t2 = arch::_mm_clmulepi64_si128(
|
||||
arch::_mm_and_si128(t1, arch::_mm_set_epi32(0, 0, 0, !0)),
|
||||
pu,
|
||||
0x00,
|
||||
);
|
||||
// We're doing the bit-reflected variant, so get the upper 32-bits of the
|
||||
// 64-bit result instead of the lower 32-bits.
|
||||
//
|
||||
// C(x) = R(x) ^ T2(x) / x^32
|
||||
let c = arch::_mm_extract_epi32(arch::_mm_xor_si128(x, t2), 1) as u32;
|
||||
|
||||
if !data.is_empty() {
|
||||
::baseline::update_fast_16(!c, data)
|
||||
} else {
|
||||
!c
|
||||
}
|
||||
}
|
||||
|
||||
unsafe fn reduce128(a: arch::__m128i, b: arch::__m128i, keys: arch::__m128i) -> arch::__m128i {
|
||||
let t1 = arch::_mm_clmulepi64_si128(a, keys, 0x00);
|
||||
let t2 = arch::_mm_clmulepi64_si128(a, keys, 0x11);
|
||||
arch::_mm_xor_si128(arch::_mm_xor_si128(b, t1), t2)
|
||||
}
|
||||
|
||||
unsafe fn get(a: &mut &[u8]) -> arch::__m128i {
|
||||
debug_assert!(a.len() >= 16);
|
||||
let r = arch::_mm_loadu_si128(a.as_ptr() as *const arch::__m128i);
|
||||
*a = &a[16..];
|
||||
return r;
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
quickcheck! {
|
||||
fn check_against_baseline(init: u32, chunks: Vec<(Vec<u8>, usize)>) -> bool {
|
||||
let mut baseline = super::super::super::baseline::State::new(init);
|
||||
let mut pclmulqdq = super::State::new(init).expect("not supported");
|
||||
for (chunk, mut offset) in chunks {
|
||||
// simulate random alignments by offsetting the slice by up to 15 bytes
|
||||
offset &= 0xF;
|
||||
if chunk.len() <= offset {
|
||||
baseline.update(&chunk);
|
||||
pclmulqdq.update(&chunk);
|
||||
} else {
|
||||
baseline.update(&chunk[offset..]);
|
||||
pclmulqdq.update(&chunk[offset..]);
|
||||
}
|
||||
}
|
||||
pclmulqdq.finalize() == baseline.finalize()
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,626 @@
|
|||
pub const CRC32_TABLE: [[u32; 256]; 16] = [
|
||||
[
|
||||
0x00000000, 0x77073096, 0xee0e612c, 0x990951ba, 0x076dc419, 0x706af48f, 0xe963a535,
|
||||
0x9e6495a3, 0x0edb8832, 0x79dcb8a4, 0xe0d5e91e, 0x97d2d988, 0x09b64c2b, 0x7eb17cbd,
|
||||
0xe7b82d07, 0x90bf1d91, 0x1db71064, 0x6ab020f2, 0xf3b97148, 0x84be41de, 0x1adad47d,
|
||||
0x6ddde4eb, 0xf4d4b551, 0x83d385c7, 0x136c9856, 0x646ba8c0, 0xfd62f97a, 0x8a65c9ec,
|
||||
0x14015c4f, 0x63066cd9, 0xfa0f3d63, 0x8d080df5, 0x3b6e20c8, 0x4c69105e, 0xd56041e4,
|
||||
0xa2677172, 0x3c03e4d1, 0x4b04d447, 0xd20d85fd, 0xa50ab56b, 0x35b5a8fa, 0x42b2986c,
|
||||
0xdbbbc9d6, 0xacbcf940, 0x32d86ce3, 0x45df5c75, 0xdcd60dcf, 0xabd13d59, 0x26d930ac,
|
||||
0x51de003a, 0xc8d75180, 0xbfd06116, 0x21b4f4b5, 0x56b3c423, 0xcfba9599, 0xb8bda50f,
|
||||
0x2802b89e, 0x5f058808, 0xc60cd9b2, 0xb10be924, 0x2f6f7c87, 0x58684c11, 0xc1611dab,
|
||||
0xb6662d3d, 0x76dc4190, 0x01db7106, 0x98d220bc, 0xefd5102a, 0x71b18589, 0x06b6b51f,
|
||||
0x9fbfe4a5, 0xe8b8d433, 0x7807c9a2, 0x0f00f934, 0x9609a88e, 0xe10e9818, 0x7f6a0dbb,
|
||||
0x086d3d2d, 0x91646c97, 0xe6635c01, 0x6b6b51f4, 0x1c6c6162, 0x856530d8, 0xf262004e,
|
||||
0x6c0695ed, 0x1b01a57b, 0x8208f4c1, 0xf50fc457, 0x65b0d9c6, 0x12b7e950, 0x8bbeb8ea,
|
||||
0xfcb9887c, 0x62dd1ddf, 0x15da2d49, 0x8cd37cf3, 0xfbd44c65, 0x4db26158, 0x3ab551ce,
|
||||
0xa3bc0074, 0xd4bb30e2, 0x4adfa541, 0x3dd895d7, 0xa4d1c46d, 0xd3d6f4fb, 0x4369e96a,
|
||||
0x346ed9fc, 0xad678846, 0xda60b8d0, 0x44042d73, 0x33031de5, 0xaa0a4c5f, 0xdd0d7cc9,
|
||||
0x5005713c, 0x270241aa, 0xbe0b1010, 0xc90c2086, 0x5768b525, 0x206f85b3, 0xb966d409,
|
||||
0xce61e49f, 0x5edef90e, 0x29d9c998, 0xb0d09822, 0xc7d7a8b4, 0x59b33d17, 0x2eb40d81,
|
||||
0xb7bd5c3b, 0xc0ba6cad, 0xedb88320, 0x9abfb3b6, 0x03b6e20c, 0x74b1d29a, 0xead54739,
|
||||
0x9dd277af, 0x04db2615, 0x73dc1683, 0xe3630b12, 0x94643b84, 0x0d6d6a3e, 0x7a6a5aa8,
|
||||
0xe40ecf0b, 0x9309ff9d, 0x0a00ae27, 0x7d079eb1, 0xf00f9344, 0x8708a3d2, 0x1e01f268,
|
||||
0x6906c2fe, 0xf762575d, 0x806567cb, 0x196c3671, 0x6e6b06e7, 0xfed41b76, 0x89d32be0,
|
||||
0x10da7a5a, 0x67dd4acc, 0xf9b9df6f, 0x8ebeeff9, 0x17b7be43, 0x60b08ed5, 0xd6d6a3e8,
|
||||
0xa1d1937e, 0x38d8c2c4, 0x4fdff252, 0xd1bb67f1, 0xa6bc5767, 0x3fb506dd, 0x48b2364b,
|
||||
0xd80d2bda, 0xaf0a1b4c, 0x36034af6, 0x41047a60, 0xdf60efc3, 0xa867df55, 0x316e8eef,
|
||||
0x4669be79, 0xcb61b38c, 0xbc66831a, 0x256fd2a0, 0x5268e236, 0xcc0c7795, 0xbb0b4703,
|
||||
0x220216b9, 0x5505262f, 0xc5ba3bbe, 0xb2bd0b28, 0x2bb45a92, 0x5cb36a04, 0xc2d7ffa7,
|
||||
0xb5d0cf31, 0x2cd99e8b, 0x5bdeae1d, 0x9b64c2b0, 0xec63f226, 0x756aa39c, 0x026d930a,
|
||||
0x9c0906a9, 0xeb0e363f, 0x72076785, 0x05005713, 0x95bf4a82, 0xe2b87a14, 0x7bb12bae,
|
||||
0x0cb61b38, 0x92d28e9b, 0xe5d5be0d, 0x7cdcefb7, 0x0bdbdf21, 0x86d3d2d4, 0xf1d4e242,
|
||||
0x68ddb3f8, 0x1fda836e, 0x81be16cd, 0xf6b9265b, 0x6fb077e1, 0x18b74777, 0x88085ae6,
|
||||
0xff0f6a70, 0x66063bca, 0x11010b5c, 0x8f659eff, 0xf862ae69, 0x616bffd3, 0x166ccf45,
|
||||
0xa00ae278, 0xd70dd2ee, 0x4e048354, 0x3903b3c2, 0xa7672661, 0xd06016f7, 0x4969474d,
|
||||
0x3e6e77db, 0xaed16a4a, 0xd9d65adc, 0x40df0b66, 0x37d83bf0, 0xa9bcae53, 0xdebb9ec5,
|
||||
0x47b2cf7f, 0x30b5ffe9, 0xbdbdf21c, 0xcabac28a, 0x53b39330, 0x24b4a3a6, 0xbad03605,
|
||||
0xcdd70693, 0x54de5729, 0x23d967bf, 0xb3667a2e, 0xc4614ab8, 0x5d681b02, 0x2a6f2b94,
|
||||
0xb40bbe37, 0xc30c8ea1, 0x5a05df1b, 0x2d02ef8d,
|
||||
],
|
||||
[
|
||||
0x00000000, 0x191B3141, 0x32366282, 0x2B2D53C3, 0x646CC504, 0x7D77F445, 0x565AA786,
|
||||
0x4F4196C7, 0xC8D98A08, 0xD1C2BB49, 0xFAEFE88A, 0xE3F4D9CB, 0xACB54F0C, 0xB5AE7E4D,
|
||||
0x9E832D8E, 0x87981CCF, 0x4AC21251, 0x53D92310, 0x78F470D3, 0x61EF4192, 0x2EAED755,
|
||||
0x37B5E614, 0x1C98B5D7, 0x05838496, 0x821B9859, 0x9B00A918, 0xB02DFADB, 0xA936CB9A,
|
||||
0xE6775D5D, 0xFF6C6C1C, 0xD4413FDF, 0xCD5A0E9E, 0x958424A2, 0x8C9F15E3, 0xA7B24620,
|
||||
0xBEA97761, 0xF1E8E1A6, 0xE8F3D0E7, 0xC3DE8324, 0xDAC5B265, 0x5D5DAEAA, 0x44469FEB,
|
||||
0x6F6BCC28, 0x7670FD69, 0x39316BAE, 0x202A5AEF, 0x0B07092C, 0x121C386D, 0xDF4636F3,
|
||||
0xC65D07B2, 0xED705471, 0xF46B6530, 0xBB2AF3F7, 0xA231C2B6, 0x891C9175, 0x9007A034,
|
||||
0x179FBCFB, 0x0E848DBA, 0x25A9DE79, 0x3CB2EF38, 0x73F379FF, 0x6AE848BE, 0x41C51B7D,
|
||||
0x58DE2A3C, 0xF0794F05, 0xE9627E44, 0xC24F2D87, 0xDB541CC6, 0x94158A01, 0x8D0EBB40,
|
||||
0xA623E883, 0xBF38D9C2, 0x38A0C50D, 0x21BBF44C, 0x0A96A78F, 0x138D96CE, 0x5CCC0009,
|
||||
0x45D73148, 0x6EFA628B, 0x77E153CA, 0xBABB5D54, 0xA3A06C15, 0x888D3FD6, 0x91960E97,
|
||||
0xDED79850, 0xC7CCA911, 0xECE1FAD2, 0xF5FACB93, 0x7262D75C, 0x6B79E61D, 0x4054B5DE,
|
||||
0x594F849F, 0x160E1258, 0x0F152319, 0x243870DA, 0x3D23419B, 0x65FD6BA7, 0x7CE65AE6,
|
||||
0x57CB0925, 0x4ED03864, 0x0191AEA3, 0x188A9FE2, 0x33A7CC21, 0x2ABCFD60, 0xAD24E1AF,
|
||||
0xB43FD0EE, 0x9F12832D, 0x8609B26C, 0xC94824AB, 0xD05315EA, 0xFB7E4629, 0xE2657768,
|
||||
0x2F3F79F6, 0x362448B7, 0x1D091B74, 0x04122A35, 0x4B53BCF2, 0x52488DB3, 0x7965DE70,
|
||||
0x607EEF31, 0xE7E6F3FE, 0xFEFDC2BF, 0xD5D0917C, 0xCCCBA03D, 0x838A36FA, 0x9A9107BB,
|
||||
0xB1BC5478, 0xA8A76539, 0x3B83984B, 0x2298A90A, 0x09B5FAC9, 0x10AECB88, 0x5FEF5D4F,
|
||||
0x46F46C0E, 0x6DD93FCD, 0x74C20E8C, 0xF35A1243, 0xEA412302, 0xC16C70C1, 0xD8774180,
|
||||
0x9736D747, 0x8E2DE606, 0xA500B5C5, 0xBC1B8484, 0x71418A1A, 0x685ABB5B, 0x4377E898,
|
||||
0x5A6CD9D9, 0x152D4F1E, 0x0C367E5F, 0x271B2D9C, 0x3E001CDD, 0xB9980012, 0xA0833153,
|
||||
0x8BAE6290, 0x92B553D1, 0xDDF4C516, 0xC4EFF457, 0xEFC2A794, 0xF6D996D5, 0xAE07BCE9,
|
||||
0xB71C8DA8, 0x9C31DE6B, 0x852AEF2A, 0xCA6B79ED, 0xD37048AC, 0xF85D1B6F, 0xE1462A2E,
|
||||
0x66DE36E1, 0x7FC507A0, 0x54E85463, 0x4DF36522, 0x02B2F3E5, 0x1BA9C2A4, 0x30849167,
|
||||
0x299FA026, 0xE4C5AEB8, 0xFDDE9FF9, 0xD6F3CC3A, 0xCFE8FD7B, 0x80A96BBC, 0x99B25AFD,
|
||||
0xB29F093E, 0xAB84387F, 0x2C1C24B0, 0x350715F1, 0x1E2A4632, 0x07317773, 0x4870E1B4,
|
||||
0x516BD0F5, 0x7A468336, 0x635DB277, 0xCBFAD74E, 0xD2E1E60F, 0xF9CCB5CC, 0xE0D7848D,
|
||||
0xAF96124A, 0xB68D230B, 0x9DA070C8, 0x84BB4189, 0x03235D46, 0x1A386C07, 0x31153FC4,
|
||||
0x280E0E85, 0x674F9842, 0x7E54A903, 0x5579FAC0, 0x4C62CB81, 0x8138C51F, 0x9823F45E,
|
||||
0xB30EA79D, 0xAA1596DC, 0xE554001B, 0xFC4F315A, 0xD7626299, 0xCE7953D8, 0x49E14F17,
|
||||
0x50FA7E56, 0x7BD72D95, 0x62CC1CD4, 0x2D8D8A13, 0x3496BB52, 0x1FBBE891, 0x06A0D9D0,
|
||||
0x5E7EF3EC, 0x4765C2AD, 0x6C48916E, 0x7553A02F, 0x3A1236E8, 0x230907A9, 0x0824546A,
|
||||
0x113F652B, 0x96A779E4, 0x8FBC48A5, 0xA4911B66, 0xBD8A2A27, 0xF2CBBCE0, 0xEBD08DA1,
|
||||
0xC0FDDE62, 0xD9E6EF23, 0x14BCE1BD, 0x0DA7D0FC, 0x268A833F, 0x3F91B27E, 0x70D024B9,
|
||||
0x69CB15F8, 0x42E6463B, 0x5BFD777A, 0xDC656BB5, 0xC57E5AF4, 0xEE530937, 0xF7483876,
|
||||
0xB809AEB1, 0xA1129FF0, 0x8A3FCC33, 0x9324FD72,
|
||||
],
|
||||
[
|
||||
0x00000000, 0x01C26A37, 0x0384D46E, 0x0246BE59, 0x0709A8DC, 0x06CBC2EB, 0x048D7CB2,
|
||||
0x054F1685, 0x0E1351B8, 0x0FD13B8F, 0x0D9785D6, 0x0C55EFE1, 0x091AF964, 0x08D89353,
|
||||
0x0A9E2D0A, 0x0B5C473D, 0x1C26A370, 0x1DE4C947, 0x1FA2771E, 0x1E601D29, 0x1B2F0BAC,
|
||||
0x1AED619B, 0x18ABDFC2, 0x1969B5F5, 0x1235F2C8, 0x13F798FF, 0x11B126A6, 0x10734C91,
|
||||
0x153C5A14, 0x14FE3023, 0x16B88E7A, 0x177AE44D, 0x384D46E0, 0x398F2CD7, 0x3BC9928E,
|
||||
0x3A0BF8B9, 0x3F44EE3C, 0x3E86840B, 0x3CC03A52, 0x3D025065, 0x365E1758, 0x379C7D6F,
|
||||
0x35DAC336, 0x3418A901, 0x3157BF84, 0x3095D5B3, 0x32D36BEA, 0x331101DD, 0x246BE590,
|
||||
0x25A98FA7, 0x27EF31FE, 0x262D5BC9, 0x23624D4C, 0x22A0277B, 0x20E69922, 0x2124F315,
|
||||
0x2A78B428, 0x2BBADE1F, 0x29FC6046, 0x283E0A71, 0x2D711CF4, 0x2CB376C3, 0x2EF5C89A,
|
||||
0x2F37A2AD, 0x709A8DC0, 0x7158E7F7, 0x731E59AE, 0x72DC3399, 0x7793251C, 0x76514F2B,
|
||||
0x7417F172, 0x75D59B45, 0x7E89DC78, 0x7F4BB64F, 0x7D0D0816, 0x7CCF6221, 0x798074A4,
|
||||
0x78421E93, 0x7A04A0CA, 0x7BC6CAFD, 0x6CBC2EB0, 0x6D7E4487, 0x6F38FADE, 0x6EFA90E9,
|
||||
0x6BB5866C, 0x6A77EC5B, 0x68315202, 0x69F33835, 0x62AF7F08, 0x636D153F, 0x612BAB66,
|
||||
0x60E9C151, 0x65A6D7D4, 0x6464BDE3, 0x662203BA, 0x67E0698D, 0x48D7CB20, 0x4915A117,
|
||||
0x4B531F4E, 0x4A917579, 0x4FDE63FC, 0x4E1C09CB, 0x4C5AB792, 0x4D98DDA5, 0x46C49A98,
|
||||
0x4706F0AF, 0x45404EF6, 0x448224C1, 0x41CD3244, 0x400F5873, 0x4249E62A, 0x438B8C1D,
|
||||
0x54F16850, 0x55330267, 0x5775BC3E, 0x56B7D609, 0x53F8C08C, 0x523AAABB, 0x507C14E2,
|
||||
0x51BE7ED5, 0x5AE239E8, 0x5B2053DF, 0x5966ED86, 0x58A487B1, 0x5DEB9134, 0x5C29FB03,
|
||||
0x5E6F455A, 0x5FAD2F6D, 0xE1351B80, 0xE0F771B7, 0xE2B1CFEE, 0xE373A5D9, 0xE63CB35C,
|
||||
0xE7FED96B, 0xE5B86732, 0xE47A0D05, 0xEF264A38, 0xEEE4200F, 0xECA29E56, 0xED60F461,
|
||||
0xE82FE2E4, 0xE9ED88D3, 0xEBAB368A, 0xEA695CBD, 0xFD13B8F0, 0xFCD1D2C7, 0xFE976C9E,
|
||||
0xFF5506A9, 0xFA1A102C, 0xFBD87A1B, 0xF99EC442, 0xF85CAE75, 0xF300E948, 0xF2C2837F,
|
||||
0xF0843D26, 0xF1465711, 0xF4094194, 0xF5CB2BA3, 0xF78D95FA, 0xF64FFFCD, 0xD9785D60,
|
||||
0xD8BA3757, 0xDAFC890E, 0xDB3EE339, 0xDE71F5BC, 0xDFB39F8B, 0xDDF521D2, 0xDC374BE5,
|
||||
0xD76B0CD8, 0xD6A966EF, 0xD4EFD8B6, 0xD52DB281, 0xD062A404, 0xD1A0CE33, 0xD3E6706A,
|
||||
0xD2241A5D, 0xC55EFE10, 0xC49C9427, 0xC6DA2A7E, 0xC7184049, 0xC25756CC, 0xC3953CFB,
|
||||
0xC1D382A2, 0xC011E895, 0xCB4DAFA8, 0xCA8FC59F, 0xC8C97BC6, 0xC90B11F1, 0xCC440774,
|
||||
0xCD866D43, 0xCFC0D31A, 0xCE02B92D, 0x91AF9640, 0x906DFC77, 0x922B422E, 0x93E92819,
|
||||
0x96A63E9C, 0x976454AB, 0x9522EAF2, 0x94E080C5, 0x9FBCC7F8, 0x9E7EADCF, 0x9C381396,
|
||||
0x9DFA79A1, 0x98B56F24, 0x99770513, 0x9B31BB4A, 0x9AF3D17D, 0x8D893530, 0x8C4B5F07,
|
||||
0x8E0DE15E, 0x8FCF8B69, 0x8A809DEC, 0x8B42F7DB, 0x89044982, 0x88C623B5, 0x839A6488,
|
||||
0x82580EBF, 0x801EB0E6, 0x81DCDAD1, 0x8493CC54, 0x8551A663, 0x8717183A, 0x86D5720D,
|
||||
0xA9E2D0A0, 0xA820BA97, 0xAA6604CE, 0xABA46EF9, 0xAEEB787C, 0xAF29124B, 0xAD6FAC12,
|
||||
0xACADC625, 0xA7F18118, 0xA633EB2F, 0xA4755576, 0xA5B73F41, 0xA0F829C4, 0xA13A43F3,
|
||||
0xA37CFDAA, 0xA2BE979D, 0xB5C473D0, 0xB40619E7, 0xB640A7BE, 0xB782CD89, 0xB2CDDB0C,
|
||||
0xB30FB13B, 0xB1490F62, 0xB08B6555, 0xBBD72268, 0xBA15485F, 0xB853F606, 0xB9919C31,
|
||||
0xBCDE8AB4, 0xBD1CE083, 0xBF5A5EDA, 0xBE9834ED,
|
||||
],
|
||||
[
|
||||
0x00000000, 0xB8BC6765, 0xAA09C88B, 0x12B5AFEE, 0x8F629757, 0x37DEF032, 0x256B5FDC,
|
||||
0x9DD738B9, 0xC5B428EF, 0x7D084F8A, 0x6FBDE064, 0xD7018701, 0x4AD6BFB8, 0xF26AD8DD,
|
||||
0xE0DF7733, 0x58631056, 0x5019579F, 0xE8A530FA, 0xFA109F14, 0x42ACF871, 0xDF7BC0C8,
|
||||
0x67C7A7AD, 0x75720843, 0xCDCE6F26, 0x95AD7F70, 0x2D111815, 0x3FA4B7FB, 0x8718D09E,
|
||||
0x1ACFE827, 0xA2738F42, 0xB0C620AC, 0x087A47C9, 0xA032AF3E, 0x188EC85B, 0x0A3B67B5,
|
||||
0xB28700D0, 0x2F503869, 0x97EC5F0C, 0x8559F0E2, 0x3DE59787, 0x658687D1, 0xDD3AE0B4,
|
||||
0xCF8F4F5A, 0x7733283F, 0xEAE41086, 0x525877E3, 0x40EDD80D, 0xF851BF68, 0xF02BF8A1,
|
||||
0x48979FC4, 0x5A22302A, 0xE29E574F, 0x7F496FF6, 0xC7F50893, 0xD540A77D, 0x6DFCC018,
|
||||
0x359FD04E, 0x8D23B72B, 0x9F9618C5, 0x272A7FA0, 0xBAFD4719, 0x0241207C, 0x10F48F92,
|
||||
0xA848E8F7, 0x9B14583D, 0x23A83F58, 0x311D90B6, 0x89A1F7D3, 0x1476CF6A, 0xACCAA80F,
|
||||
0xBE7F07E1, 0x06C36084, 0x5EA070D2, 0xE61C17B7, 0xF4A9B859, 0x4C15DF3C, 0xD1C2E785,
|
||||
0x697E80E0, 0x7BCB2F0E, 0xC377486B, 0xCB0D0FA2, 0x73B168C7, 0x6104C729, 0xD9B8A04C,
|
||||
0x446F98F5, 0xFCD3FF90, 0xEE66507E, 0x56DA371B, 0x0EB9274D, 0xB6054028, 0xA4B0EFC6,
|
||||
0x1C0C88A3, 0x81DBB01A, 0x3967D77F, 0x2BD27891, 0x936E1FF4, 0x3B26F703, 0x839A9066,
|
||||
0x912F3F88, 0x299358ED, 0xB4446054, 0x0CF80731, 0x1E4DA8DF, 0xA6F1CFBA, 0xFE92DFEC,
|
||||
0x462EB889, 0x549B1767, 0xEC277002, 0x71F048BB, 0xC94C2FDE, 0xDBF98030, 0x6345E755,
|
||||
0x6B3FA09C, 0xD383C7F9, 0xC1366817, 0x798A0F72, 0xE45D37CB, 0x5CE150AE, 0x4E54FF40,
|
||||
0xF6E89825, 0xAE8B8873, 0x1637EF16, 0x048240F8, 0xBC3E279D, 0x21E91F24, 0x99557841,
|
||||
0x8BE0D7AF, 0x335CB0CA, 0xED59B63B, 0x55E5D15E, 0x47507EB0, 0xFFEC19D5, 0x623B216C,
|
||||
0xDA874609, 0xC832E9E7, 0x708E8E82, 0x28ED9ED4, 0x9051F9B1, 0x82E4565F, 0x3A58313A,
|
||||
0xA78F0983, 0x1F336EE6, 0x0D86C108, 0xB53AA66D, 0xBD40E1A4, 0x05FC86C1, 0x1749292F,
|
||||
0xAFF54E4A, 0x322276F3, 0x8A9E1196, 0x982BBE78, 0x2097D91D, 0x78F4C94B, 0xC048AE2E,
|
||||
0xD2FD01C0, 0x6A4166A5, 0xF7965E1C, 0x4F2A3979, 0x5D9F9697, 0xE523F1F2, 0x4D6B1905,
|
||||
0xF5D77E60, 0xE762D18E, 0x5FDEB6EB, 0xC2098E52, 0x7AB5E937, 0x680046D9, 0xD0BC21BC,
|
||||
0x88DF31EA, 0x3063568F, 0x22D6F961, 0x9A6A9E04, 0x07BDA6BD, 0xBF01C1D8, 0xADB46E36,
|
||||
0x15080953, 0x1D724E9A, 0xA5CE29FF, 0xB77B8611, 0x0FC7E174, 0x9210D9CD, 0x2AACBEA8,
|
||||
0x38191146, 0x80A57623, 0xD8C66675, 0x607A0110, 0x72CFAEFE, 0xCA73C99B, 0x57A4F122,
|
||||
0xEF189647, 0xFDAD39A9, 0x45115ECC, 0x764DEE06, 0xCEF18963, 0xDC44268D, 0x64F841E8,
|
||||
0xF92F7951, 0x41931E34, 0x5326B1DA, 0xEB9AD6BF, 0xB3F9C6E9, 0x0B45A18C, 0x19F00E62,
|
||||
0xA14C6907, 0x3C9B51BE, 0x842736DB, 0x96929935, 0x2E2EFE50, 0x2654B999, 0x9EE8DEFC,
|
||||
0x8C5D7112, 0x34E11677, 0xA9362ECE, 0x118A49AB, 0x033FE645, 0xBB838120, 0xE3E09176,
|
||||
0x5B5CF613, 0x49E959FD, 0xF1553E98, 0x6C820621, 0xD43E6144, 0xC68BCEAA, 0x7E37A9CF,
|
||||
0xD67F4138, 0x6EC3265D, 0x7C7689B3, 0xC4CAEED6, 0x591DD66F, 0xE1A1B10A, 0xF3141EE4,
|
||||
0x4BA87981, 0x13CB69D7, 0xAB770EB2, 0xB9C2A15C, 0x017EC639, 0x9CA9FE80, 0x241599E5,
|
||||
0x36A0360B, 0x8E1C516E, 0x866616A7, 0x3EDA71C2, 0x2C6FDE2C, 0x94D3B949, 0x090481F0,
|
||||
0xB1B8E695, 0xA30D497B, 0x1BB12E1E, 0x43D23E48, 0xFB6E592D, 0xE9DBF6C3, 0x516791A6,
|
||||
0xCCB0A91F, 0x740CCE7A, 0x66B96194, 0xDE0506F1,
|
||||
],
|
||||
[
|
||||
0x00000000, 0x3D6029B0, 0x7AC05360, 0x47A07AD0, 0xF580A6C0, 0xC8E08F70, 0x8F40F5A0,
|
||||
0xB220DC10, 0x30704BC1, 0x0D106271, 0x4AB018A1, 0x77D03111, 0xC5F0ED01, 0xF890C4B1,
|
||||
0xBF30BE61, 0x825097D1, 0x60E09782, 0x5D80BE32, 0x1A20C4E2, 0x2740ED52, 0x95603142,
|
||||
0xA80018F2, 0xEFA06222, 0xD2C04B92, 0x5090DC43, 0x6DF0F5F3, 0x2A508F23, 0x1730A693,
|
||||
0xA5107A83, 0x98705333, 0xDFD029E3, 0xE2B00053, 0xC1C12F04, 0xFCA106B4, 0xBB017C64,
|
||||
0x866155D4, 0x344189C4, 0x0921A074, 0x4E81DAA4, 0x73E1F314, 0xF1B164C5, 0xCCD14D75,
|
||||
0x8B7137A5, 0xB6111E15, 0x0431C205, 0x3951EBB5, 0x7EF19165, 0x4391B8D5, 0xA121B886,
|
||||
0x9C419136, 0xDBE1EBE6, 0xE681C256, 0x54A11E46, 0x69C137F6, 0x2E614D26, 0x13016496,
|
||||
0x9151F347, 0xAC31DAF7, 0xEB91A027, 0xD6F18997, 0x64D15587, 0x59B17C37, 0x1E1106E7,
|
||||
0x23712F57, 0x58F35849, 0x659371F9, 0x22330B29, 0x1F532299, 0xAD73FE89, 0x9013D739,
|
||||
0xD7B3ADE9, 0xEAD38459, 0x68831388, 0x55E33A38, 0x124340E8, 0x2F236958, 0x9D03B548,
|
||||
0xA0639CF8, 0xE7C3E628, 0xDAA3CF98, 0x3813CFCB, 0x0573E67B, 0x42D39CAB, 0x7FB3B51B,
|
||||
0xCD93690B, 0xF0F340BB, 0xB7533A6B, 0x8A3313DB, 0x0863840A, 0x3503ADBA, 0x72A3D76A,
|
||||
0x4FC3FEDA, 0xFDE322CA, 0xC0830B7A, 0x872371AA, 0xBA43581A, 0x9932774D, 0xA4525EFD,
|
||||
0xE3F2242D, 0xDE920D9D, 0x6CB2D18D, 0x51D2F83D, 0x167282ED, 0x2B12AB5D, 0xA9423C8C,
|
||||
0x9422153C, 0xD3826FEC, 0xEEE2465C, 0x5CC29A4C, 0x61A2B3FC, 0x2602C92C, 0x1B62E09C,
|
||||
0xF9D2E0CF, 0xC4B2C97F, 0x8312B3AF, 0xBE729A1F, 0x0C52460F, 0x31326FBF, 0x7692156F,
|
||||
0x4BF23CDF, 0xC9A2AB0E, 0xF4C282BE, 0xB362F86E, 0x8E02D1DE, 0x3C220DCE, 0x0142247E,
|
||||
0x46E25EAE, 0x7B82771E, 0xB1E6B092, 0x8C869922, 0xCB26E3F2, 0xF646CA42, 0x44661652,
|
||||
0x79063FE2, 0x3EA64532, 0x03C66C82, 0x8196FB53, 0xBCF6D2E3, 0xFB56A833, 0xC6368183,
|
||||
0x74165D93, 0x49767423, 0x0ED60EF3, 0x33B62743, 0xD1062710, 0xEC660EA0, 0xABC67470,
|
||||
0x96A65DC0, 0x248681D0, 0x19E6A860, 0x5E46D2B0, 0x6326FB00, 0xE1766CD1, 0xDC164561,
|
||||
0x9BB63FB1, 0xA6D61601, 0x14F6CA11, 0x2996E3A1, 0x6E369971, 0x5356B0C1, 0x70279F96,
|
||||
0x4D47B626, 0x0AE7CCF6, 0x3787E546, 0x85A73956, 0xB8C710E6, 0xFF676A36, 0xC2074386,
|
||||
0x4057D457, 0x7D37FDE7, 0x3A978737, 0x07F7AE87, 0xB5D77297, 0x88B75B27, 0xCF1721F7,
|
||||
0xF2770847, 0x10C70814, 0x2DA721A4, 0x6A075B74, 0x576772C4, 0xE547AED4, 0xD8278764,
|
||||
0x9F87FDB4, 0xA2E7D404, 0x20B743D5, 0x1DD76A65, 0x5A7710B5, 0x67173905, 0xD537E515,
|
||||
0xE857CCA5, 0xAFF7B675, 0x92979FC5, 0xE915E8DB, 0xD475C16B, 0x93D5BBBB, 0xAEB5920B,
|
||||
0x1C954E1B, 0x21F567AB, 0x66551D7B, 0x5B3534CB, 0xD965A31A, 0xE4058AAA, 0xA3A5F07A,
|
||||
0x9EC5D9CA, 0x2CE505DA, 0x11852C6A, 0x562556BA, 0x6B457F0A, 0x89F57F59, 0xB49556E9,
|
||||
0xF3352C39, 0xCE550589, 0x7C75D999, 0x4115F029, 0x06B58AF9, 0x3BD5A349, 0xB9853498,
|
||||
0x84E51D28, 0xC34567F8, 0xFE254E48, 0x4C059258, 0x7165BBE8, 0x36C5C138, 0x0BA5E888,
|
||||
0x28D4C7DF, 0x15B4EE6F, 0x521494BF, 0x6F74BD0F, 0xDD54611F, 0xE03448AF, 0xA794327F,
|
||||
0x9AF41BCF, 0x18A48C1E, 0x25C4A5AE, 0x6264DF7E, 0x5F04F6CE, 0xED242ADE, 0xD044036E,
|
||||
0x97E479BE, 0xAA84500E, 0x4834505D, 0x755479ED, 0x32F4033D, 0x0F942A8D, 0xBDB4F69D,
|
||||
0x80D4DF2D, 0xC774A5FD, 0xFA148C4D, 0x78441B9C, 0x4524322C, 0x028448FC, 0x3FE4614C,
|
||||
0x8DC4BD5C, 0xB0A494EC, 0xF704EE3C, 0xCA64C78C,
|
||||
],
|
||||
[
|
||||
0x00000000, 0xCB5CD3A5, 0x4DC8A10B, 0x869472AE, 0x9B914216, 0x50CD91B3, 0xD659E31D,
|
||||
0x1D0530B8, 0xEC53826D, 0x270F51C8, 0xA19B2366, 0x6AC7F0C3, 0x77C2C07B, 0xBC9E13DE,
|
||||
0x3A0A6170, 0xF156B2D5, 0x03D6029B, 0xC88AD13E, 0x4E1EA390, 0x85427035, 0x9847408D,
|
||||
0x531B9328, 0xD58FE186, 0x1ED33223, 0xEF8580F6, 0x24D95353, 0xA24D21FD, 0x6911F258,
|
||||
0x7414C2E0, 0xBF481145, 0x39DC63EB, 0xF280B04E, 0x07AC0536, 0xCCF0D693, 0x4A64A43D,
|
||||
0x81387798, 0x9C3D4720, 0x57619485, 0xD1F5E62B, 0x1AA9358E, 0xEBFF875B, 0x20A354FE,
|
||||
0xA6372650, 0x6D6BF5F5, 0x706EC54D, 0xBB3216E8, 0x3DA66446, 0xF6FAB7E3, 0x047A07AD,
|
||||
0xCF26D408, 0x49B2A6A6, 0x82EE7503, 0x9FEB45BB, 0x54B7961E, 0xD223E4B0, 0x197F3715,
|
||||
0xE82985C0, 0x23755665, 0xA5E124CB, 0x6EBDF76E, 0x73B8C7D6, 0xB8E41473, 0x3E7066DD,
|
||||
0xF52CB578, 0x0F580A6C, 0xC404D9C9, 0x4290AB67, 0x89CC78C2, 0x94C9487A, 0x5F959BDF,
|
||||
0xD901E971, 0x125D3AD4, 0xE30B8801, 0x28575BA4, 0xAEC3290A, 0x659FFAAF, 0x789ACA17,
|
||||
0xB3C619B2, 0x35526B1C, 0xFE0EB8B9, 0x0C8E08F7, 0xC7D2DB52, 0x4146A9FC, 0x8A1A7A59,
|
||||
0x971F4AE1, 0x5C439944, 0xDAD7EBEA, 0x118B384F, 0xE0DD8A9A, 0x2B81593F, 0xAD152B91,
|
||||
0x6649F834, 0x7B4CC88C, 0xB0101B29, 0x36846987, 0xFDD8BA22, 0x08F40F5A, 0xC3A8DCFF,
|
||||
0x453CAE51, 0x8E607DF4, 0x93654D4C, 0x58399EE9, 0xDEADEC47, 0x15F13FE2, 0xE4A78D37,
|
||||
0x2FFB5E92, 0xA96F2C3C, 0x6233FF99, 0x7F36CF21, 0xB46A1C84, 0x32FE6E2A, 0xF9A2BD8F,
|
||||
0x0B220DC1, 0xC07EDE64, 0x46EAACCA, 0x8DB67F6F, 0x90B34FD7, 0x5BEF9C72, 0xDD7BEEDC,
|
||||
0x16273D79, 0xE7718FAC, 0x2C2D5C09, 0xAAB92EA7, 0x61E5FD02, 0x7CE0CDBA, 0xB7BC1E1F,
|
||||
0x31286CB1, 0xFA74BF14, 0x1EB014D8, 0xD5ECC77D, 0x5378B5D3, 0x98246676, 0x852156CE,
|
||||
0x4E7D856B, 0xC8E9F7C5, 0x03B52460, 0xF2E396B5, 0x39BF4510, 0xBF2B37BE, 0x7477E41B,
|
||||
0x6972D4A3, 0xA22E0706, 0x24BA75A8, 0xEFE6A60D, 0x1D661643, 0xD63AC5E6, 0x50AEB748,
|
||||
0x9BF264ED, 0x86F75455, 0x4DAB87F0, 0xCB3FF55E, 0x006326FB, 0xF135942E, 0x3A69478B,
|
||||
0xBCFD3525, 0x77A1E680, 0x6AA4D638, 0xA1F8059D, 0x276C7733, 0xEC30A496, 0x191C11EE,
|
||||
0xD240C24B, 0x54D4B0E5, 0x9F886340, 0x828D53F8, 0x49D1805D, 0xCF45F2F3, 0x04192156,
|
||||
0xF54F9383, 0x3E134026, 0xB8873288, 0x73DBE12D, 0x6EDED195, 0xA5820230, 0x2316709E,
|
||||
0xE84AA33B, 0x1ACA1375, 0xD196C0D0, 0x5702B27E, 0x9C5E61DB, 0x815B5163, 0x4A0782C6,
|
||||
0xCC93F068, 0x07CF23CD, 0xF6999118, 0x3DC542BD, 0xBB513013, 0x700DE3B6, 0x6D08D30E,
|
||||
0xA65400AB, 0x20C07205, 0xEB9CA1A0, 0x11E81EB4, 0xDAB4CD11, 0x5C20BFBF, 0x977C6C1A,
|
||||
0x8A795CA2, 0x41258F07, 0xC7B1FDA9, 0x0CED2E0C, 0xFDBB9CD9, 0x36E74F7C, 0xB0733DD2,
|
||||
0x7B2FEE77, 0x662ADECF, 0xAD760D6A, 0x2BE27FC4, 0xE0BEAC61, 0x123E1C2F, 0xD962CF8A,
|
||||
0x5FF6BD24, 0x94AA6E81, 0x89AF5E39, 0x42F38D9C, 0xC467FF32, 0x0F3B2C97, 0xFE6D9E42,
|
||||
0x35314DE7, 0xB3A53F49, 0x78F9ECEC, 0x65FCDC54, 0xAEA00FF1, 0x28347D5F, 0xE368AEFA,
|
||||
0x16441B82, 0xDD18C827, 0x5B8CBA89, 0x90D0692C, 0x8DD55994, 0x46898A31, 0xC01DF89F,
|
||||
0x0B412B3A, 0xFA1799EF, 0x314B4A4A, 0xB7DF38E4, 0x7C83EB41, 0x6186DBF9, 0xAADA085C,
|
||||
0x2C4E7AF2, 0xE712A957, 0x15921919, 0xDECECABC, 0x585AB812, 0x93066BB7, 0x8E035B0F,
|
||||
0x455F88AA, 0xC3CBFA04, 0x089729A1, 0xF9C19B74, 0x329D48D1, 0xB4093A7F, 0x7F55E9DA,
|
||||
0x6250D962, 0xA90C0AC7, 0x2F987869, 0xE4C4ABCC,
|
||||
],
|
||||
[
|
||||
0x00000000, 0xA6770BB4, 0x979F1129, 0x31E81A9D, 0xF44F2413, 0x52382FA7, 0x63D0353A,
|
||||
0xC5A73E8E, 0x33EF4E67, 0x959845D3, 0xA4705F4E, 0x020754FA, 0xC7A06A74, 0x61D761C0,
|
||||
0x503F7B5D, 0xF64870E9, 0x67DE9CCE, 0xC1A9977A, 0xF0418DE7, 0x56368653, 0x9391B8DD,
|
||||
0x35E6B369, 0x040EA9F4, 0xA279A240, 0x5431D2A9, 0xF246D91D, 0xC3AEC380, 0x65D9C834,
|
||||
0xA07EF6BA, 0x0609FD0E, 0x37E1E793, 0x9196EC27, 0xCFBD399C, 0x69CA3228, 0x582228B5,
|
||||
0xFE552301, 0x3BF21D8F, 0x9D85163B, 0xAC6D0CA6, 0x0A1A0712, 0xFC5277FB, 0x5A257C4F,
|
||||
0x6BCD66D2, 0xCDBA6D66, 0x081D53E8, 0xAE6A585C, 0x9F8242C1, 0x39F54975, 0xA863A552,
|
||||
0x0E14AEE6, 0x3FFCB47B, 0x998BBFCF, 0x5C2C8141, 0xFA5B8AF5, 0xCBB39068, 0x6DC49BDC,
|
||||
0x9B8CEB35, 0x3DFBE081, 0x0C13FA1C, 0xAA64F1A8, 0x6FC3CF26, 0xC9B4C492, 0xF85CDE0F,
|
||||
0x5E2BD5BB, 0x440B7579, 0xE27C7ECD, 0xD3946450, 0x75E36FE4, 0xB044516A, 0x16335ADE,
|
||||
0x27DB4043, 0x81AC4BF7, 0x77E43B1E, 0xD19330AA, 0xE07B2A37, 0x460C2183, 0x83AB1F0D,
|
||||
0x25DC14B9, 0x14340E24, 0xB2430590, 0x23D5E9B7, 0x85A2E203, 0xB44AF89E, 0x123DF32A,
|
||||
0xD79ACDA4, 0x71EDC610, 0x4005DC8D, 0xE672D739, 0x103AA7D0, 0xB64DAC64, 0x87A5B6F9,
|
||||
0x21D2BD4D, 0xE47583C3, 0x42028877, 0x73EA92EA, 0xD59D995E, 0x8BB64CE5, 0x2DC14751,
|
||||
0x1C295DCC, 0xBA5E5678, 0x7FF968F6, 0xD98E6342, 0xE86679DF, 0x4E11726B, 0xB8590282,
|
||||
0x1E2E0936, 0x2FC613AB, 0x89B1181F, 0x4C162691, 0xEA612D25, 0xDB8937B8, 0x7DFE3C0C,
|
||||
0xEC68D02B, 0x4A1FDB9F, 0x7BF7C102, 0xDD80CAB6, 0x1827F438, 0xBE50FF8C, 0x8FB8E511,
|
||||
0x29CFEEA5, 0xDF879E4C, 0x79F095F8, 0x48188F65, 0xEE6F84D1, 0x2BC8BA5F, 0x8DBFB1EB,
|
||||
0xBC57AB76, 0x1A20A0C2, 0x8816EAF2, 0x2E61E146, 0x1F89FBDB, 0xB9FEF06F, 0x7C59CEE1,
|
||||
0xDA2EC555, 0xEBC6DFC8, 0x4DB1D47C, 0xBBF9A495, 0x1D8EAF21, 0x2C66B5BC, 0x8A11BE08,
|
||||
0x4FB68086, 0xE9C18B32, 0xD82991AF, 0x7E5E9A1B, 0xEFC8763C, 0x49BF7D88, 0x78576715,
|
||||
0xDE206CA1, 0x1B87522F, 0xBDF0599B, 0x8C184306, 0x2A6F48B2, 0xDC27385B, 0x7A5033EF,
|
||||
0x4BB82972, 0xEDCF22C6, 0x28681C48, 0x8E1F17FC, 0xBFF70D61, 0x198006D5, 0x47ABD36E,
|
||||
0xE1DCD8DA, 0xD034C247, 0x7643C9F3, 0xB3E4F77D, 0x1593FCC9, 0x247BE654, 0x820CEDE0,
|
||||
0x74449D09, 0xD23396BD, 0xE3DB8C20, 0x45AC8794, 0x800BB91A, 0x267CB2AE, 0x1794A833,
|
||||
0xB1E3A387, 0x20754FA0, 0x86024414, 0xB7EA5E89, 0x119D553D, 0xD43A6BB3, 0x724D6007,
|
||||
0x43A57A9A, 0xE5D2712E, 0x139A01C7, 0xB5ED0A73, 0x840510EE, 0x22721B5A, 0xE7D525D4,
|
||||
0x41A22E60, 0x704A34FD, 0xD63D3F49, 0xCC1D9F8B, 0x6A6A943F, 0x5B828EA2, 0xFDF58516,
|
||||
0x3852BB98, 0x9E25B02C, 0xAFCDAAB1, 0x09BAA105, 0xFFF2D1EC, 0x5985DA58, 0x686DC0C5,
|
||||
0xCE1ACB71, 0x0BBDF5FF, 0xADCAFE4B, 0x9C22E4D6, 0x3A55EF62, 0xABC30345, 0x0DB408F1,
|
||||
0x3C5C126C, 0x9A2B19D8, 0x5F8C2756, 0xF9FB2CE2, 0xC813367F, 0x6E643DCB, 0x982C4D22,
|
||||
0x3E5B4696, 0x0FB35C0B, 0xA9C457BF, 0x6C636931, 0xCA146285, 0xFBFC7818, 0x5D8B73AC,
|
||||
0x03A0A617, 0xA5D7ADA3, 0x943FB73E, 0x3248BC8A, 0xF7EF8204, 0x519889B0, 0x6070932D,
|
||||
0xC6079899, 0x304FE870, 0x9638E3C4, 0xA7D0F959, 0x01A7F2ED, 0xC400CC63, 0x6277C7D7,
|
||||
0x539FDD4A, 0xF5E8D6FE, 0x647E3AD9, 0xC209316D, 0xF3E12BF0, 0x55962044, 0x90311ECA,
|
||||
0x3646157E, 0x07AE0FE3, 0xA1D90457, 0x579174BE, 0xF1E67F0A, 0xC00E6597, 0x66796E23,
|
||||
0xA3DE50AD, 0x05A95B19, 0x34414184, 0x92364A30,
|
||||
],
|
||||
[
|
||||
0x00000000, 0xCCAA009E, 0x4225077D, 0x8E8F07E3, 0x844A0EFA, 0x48E00E64, 0xC66F0987,
|
||||
0x0AC50919, 0xD3E51BB5, 0x1F4F1B2B, 0x91C01CC8, 0x5D6A1C56, 0x57AF154F, 0x9B0515D1,
|
||||
0x158A1232, 0xD92012AC, 0x7CBB312B, 0xB01131B5, 0x3E9E3656, 0xF23436C8, 0xF8F13FD1,
|
||||
0x345B3F4F, 0xBAD438AC, 0x767E3832, 0xAF5E2A9E, 0x63F42A00, 0xED7B2DE3, 0x21D12D7D,
|
||||
0x2B142464, 0xE7BE24FA, 0x69312319, 0xA59B2387, 0xF9766256, 0x35DC62C8, 0xBB53652B,
|
||||
0x77F965B5, 0x7D3C6CAC, 0xB1966C32, 0x3F196BD1, 0xF3B36B4F, 0x2A9379E3, 0xE639797D,
|
||||
0x68B67E9E, 0xA41C7E00, 0xAED97719, 0x62737787, 0xECFC7064, 0x205670FA, 0x85CD537D,
|
||||
0x496753E3, 0xC7E85400, 0x0B42549E, 0x01875D87, 0xCD2D5D19, 0x43A25AFA, 0x8F085A64,
|
||||
0x562848C8, 0x9A824856, 0x140D4FB5, 0xD8A74F2B, 0xD2624632, 0x1EC846AC, 0x9047414F,
|
||||
0x5CED41D1, 0x299DC2ED, 0xE537C273, 0x6BB8C590, 0xA712C50E, 0xADD7CC17, 0x617DCC89,
|
||||
0xEFF2CB6A, 0x2358CBF4, 0xFA78D958, 0x36D2D9C6, 0xB85DDE25, 0x74F7DEBB, 0x7E32D7A2,
|
||||
0xB298D73C, 0x3C17D0DF, 0xF0BDD041, 0x5526F3C6, 0x998CF358, 0x1703F4BB, 0xDBA9F425,
|
||||
0xD16CFD3C, 0x1DC6FDA2, 0x9349FA41, 0x5FE3FADF, 0x86C3E873, 0x4A69E8ED, 0xC4E6EF0E,
|
||||
0x084CEF90, 0x0289E689, 0xCE23E617, 0x40ACE1F4, 0x8C06E16A, 0xD0EBA0BB, 0x1C41A025,
|
||||
0x92CEA7C6, 0x5E64A758, 0x54A1AE41, 0x980BAEDF, 0x1684A93C, 0xDA2EA9A2, 0x030EBB0E,
|
||||
0xCFA4BB90, 0x412BBC73, 0x8D81BCED, 0x8744B5F4, 0x4BEEB56A, 0xC561B289, 0x09CBB217,
|
||||
0xAC509190, 0x60FA910E, 0xEE7596ED, 0x22DF9673, 0x281A9F6A, 0xE4B09FF4, 0x6A3F9817,
|
||||
0xA6959889, 0x7FB58A25, 0xB31F8ABB, 0x3D908D58, 0xF13A8DC6, 0xFBFF84DF, 0x37558441,
|
||||
0xB9DA83A2, 0x7570833C, 0x533B85DA, 0x9F918544, 0x111E82A7, 0xDDB48239, 0xD7718B20,
|
||||
0x1BDB8BBE, 0x95548C5D, 0x59FE8CC3, 0x80DE9E6F, 0x4C749EF1, 0xC2FB9912, 0x0E51998C,
|
||||
0x04949095, 0xC83E900B, 0x46B197E8, 0x8A1B9776, 0x2F80B4F1, 0xE32AB46F, 0x6DA5B38C,
|
||||
0xA10FB312, 0xABCABA0B, 0x6760BA95, 0xE9EFBD76, 0x2545BDE8, 0xFC65AF44, 0x30CFAFDA,
|
||||
0xBE40A839, 0x72EAA8A7, 0x782FA1BE, 0xB485A120, 0x3A0AA6C3, 0xF6A0A65D, 0xAA4DE78C,
|
||||
0x66E7E712, 0xE868E0F1, 0x24C2E06F, 0x2E07E976, 0xE2ADE9E8, 0x6C22EE0B, 0xA088EE95,
|
||||
0x79A8FC39, 0xB502FCA7, 0x3B8DFB44, 0xF727FBDA, 0xFDE2F2C3, 0x3148F25D, 0xBFC7F5BE,
|
||||
0x736DF520, 0xD6F6D6A7, 0x1A5CD639, 0x94D3D1DA, 0x5879D144, 0x52BCD85D, 0x9E16D8C3,
|
||||
0x1099DF20, 0xDC33DFBE, 0x0513CD12, 0xC9B9CD8C, 0x4736CA6F, 0x8B9CCAF1, 0x8159C3E8,
|
||||
0x4DF3C376, 0xC37CC495, 0x0FD6C40B, 0x7AA64737, 0xB60C47A9, 0x3883404A, 0xF42940D4,
|
||||
0xFEEC49CD, 0x32464953, 0xBCC94EB0, 0x70634E2E, 0xA9435C82, 0x65E95C1C, 0xEB665BFF,
|
||||
0x27CC5B61, 0x2D095278, 0xE1A352E6, 0x6F2C5505, 0xA386559B, 0x061D761C, 0xCAB77682,
|
||||
0x44387161, 0x889271FF, 0x825778E6, 0x4EFD7878, 0xC0727F9B, 0x0CD87F05, 0xD5F86DA9,
|
||||
0x19526D37, 0x97DD6AD4, 0x5B776A4A, 0x51B26353, 0x9D1863CD, 0x1397642E, 0xDF3D64B0,
|
||||
0x83D02561, 0x4F7A25FF, 0xC1F5221C, 0x0D5F2282, 0x079A2B9B, 0xCB302B05, 0x45BF2CE6,
|
||||
0x89152C78, 0x50353ED4, 0x9C9F3E4A, 0x121039A9, 0xDEBA3937, 0xD47F302E, 0x18D530B0,
|
||||
0x965A3753, 0x5AF037CD, 0xFF6B144A, 0x33C114D4, 0xBD4E1337, 0x71E413A9, 0x7B211AB0,
|
||||
0xB78B1A2E, 0x39041DCD, 0xF5AE1D53, 0x2C8E0FFF, 0xE0240F61, 0x6EAB0882, 0xA201081C,
|
||||
0xA8C40105, 0x646E019B, 0xEAE10678, 0x264B06E6,
|
||||
],
|
||||
[
|
||||
0x00000000, 0x177B1443, 0x2EF62886, 0x398D3CC5, 0x5DEC510C, 0x4A97454F, 0x731A798A,
|
||||
0x64616DC9, 0xBBD8A218, 0xACA3B65B, 0x952E8A9E, 0x82559EDD, 0xE634F314, 0xF14FE757,
|
||||
0xC8C2DB92, 0xDFB9CFD1, 0xACC04271, 0xBBBB5632, 0x82366AF7, 0x954D7EB4, 0xF12C137D,
|
||||
0xE657073E, 0xDFDA3BFB, 0xC8A12FB8, 0x1718E069, 0x0063F42A, 0x39EEC8EF, 0x2E95DCAC,
|
||||
0x4AF4B165, 0x5D8FA526, 0x640299E3, 0x73798DA0, 0x82F182A3, 0x958A96E0, 0xAC07AA25,
|
||||
0xBB7CBE66, 0xDF1DD3AF, 0xC866C7EC, 0xF1EBFB29, 0xE690EF6A, 0x392920BB, 0x2E5234F8,
|
||||
0x17DF083D, 0x00A41C7E, 0x64C571B7, 0x73BE65F4, 0x4A335931, 0x5D484D72, 0x2E31C0D2,
|
||||
0x394AD491, 0x00C7E854, 0x17BCFC17, 0x73DD91DE, 0x64A6859D, 0x5D2BB958, 0x4A50AD1B,
|
||||
0x95E962CA, 0x82927689, 0xBB1F4A4C, 0xAC645E0F, 0xC80533C6, 0xDF7E2785, 0xE6F31B40,
|
||||
0xF1880F03, 0xDE920307, 0xC9E91744, 0xF0642B81, 0xE71F3FC2, 0x837E520B, 0x94054648,
|
||||
0xAD887A8D, 0xBAF36ECE, 0x654AA11F, 0x7231B55C, 0x4BBC8999, 0x5CC79DDA, 0x38A6F013,
|
||||
0x2FDDE450, 0x1650D895, 0x012BCCD6, 0x72524176, 0x65295535, 0x5CA469F0, 0x4BDF7DB3,
|
||||
0x2FBE107A, 0x38C50439, 0x014838FC, 0x16332CBF, 0xC98AE36E, 0xDEF1F72D, 0xE77CCBE8,
|
||||
0xF007DFAB, 0x9466B262, 0x831DA621, 0xBA909AE4, 0xADEB8EA7, 0x5C6381A4, 0x4B1895E7,
|
||||
0x7295A922, 0x65EEBD61, 0x018FD0A8, 0x16F4C4EB, 0x2F79F82E, 0x3802EC6D, 0xE7BB23BC,
|
||||
0xF0C037FF, 0xC94D0B3A, 0xDE361F79, 0xBA5772B0, 0xAD2C66F3, 0x94A15A36, 0x83DA4E75,
|
||||
0xF0A3C3D5, 0xE7D8D796, 0xDE55EB53, 0xC92EFF10, 0xAD4F92D9, 0xBA34869A, 0x83B9BA5F,
|
||||
0x94C2AE1C, 0x4B7B61CD, 0x5C00758E, 0x658D494B, 0x72F65D08, 0x169730C1, 0x01EC2482,
|
||||
0x38611847, 0x2F1A0C04, 0x6655004F, 0x712E140C, 0x48A328C9, 0x5FD83C8A, 0x3BB95143,
|
||||
0x2CC24500, 0x154F79C5, 0x02346D86, 0xDD8DA257, 0xCAF6B614, 0xF37B8AD1, 0xE4009E92,
|
||||
0x8061F35B, 0x971AE718, 0xAE97DBDD, 0xB9ECCF9E, 0xCA95423E, 0xDDEE567D, 0xE4636AB8,
|
||||
0xF3187EFB, 0x97791332, 0x80020771, 0xB98F3BB4, 0xAEF42FF7, 0x714DE026, 0x6636F465,
|
||||
0x5FBBC8A0, 0x48C0DCE3, 0x2CA1B12A, 0x3BDAA569, 0x025799AC, 0x152C8DEF, 0xE4A482EC,
|
||||
0xF3DF96AF, 0xCA52AA6A, 0xDD29BE29, 0xB948D3E0, 0xAE33C7A3, 0x97BEFB66, 0x80C5EF25,
|
||||
0x5F7C20F4, 0x480734B7, 0x718A0872, 0x66F11C31, 0x029071F8, 0x15EB65BB, 0x2C66597E,
|
||||
0x3B1D4D3D, 0x4864C09D, 0x5F1FD4DE, 0x6692E81B, 0x71E9FC58, 0x15889191, 0x02F385D2,
|
||||
0x3B7EB917, 0x2C05AD54, 0xF3BC6285, 0xE4C776C6, 0xDD4A4A03, 0xCA315E40, 0xAE503389,
|
||||
0xB92B27CA, 0x80A61B0F, 0x97DD0F4C, 0xB8C70348, 0xAFBC170B, 0x96312BCE, 0x814A3F8D,
|
||||
0xE52B5244, 0xF2504607, 0xCBDD7AC2, 0xDCA66E81, 0x031FA150, 0x1464B513, 0x2DE989D6,
|
||||
0x3A929D95, 0x5EF3F05C, 0x4988E41F, 0x7005D8DA, 0x677ECC99, 0x14074139, 0x037C557A,
|
||||
0x3AF169BF, 0x2D8A7DFC, 0x49EB1035, 0x5E900476, 0x671D38B3, 0x70662CF0, 0xAFDFE321,
|
||||
0xB8A4F762, 0x8129CBA7, 0x9652DFE4, 0xF233B22D, 0xE548A66E, 0xDCC59AAB, 0xCBBE8EE8,
|
||||
0x3A3681EB, 0x2D4D95A8, 0x14C0A96D, 0x03BBBD2E, 0x67DAD0E7, 0x70A1C4A4, 0x492CF861,
|
||||
0x5E57EC22, 0x81EE23F3, 0x969537B0, 0xAF180B75, 0xB8631F36, 0xDC0272FF, 0xCB7966BC,
|
||||
0xF2F45A79, 0xE58F4E3A, 0x96F6C39A, 0x818DD7D9, 0xB800EB1C, 0xAF7BFF5F, 0xCB1A9296,
|
||||
0xDC6186D5, 0xE5ECBA10, 0xF297AE53, 0x2D2E6182, 0x3A5575C1, 0x03D84904, 0x14A35D47,
|
||||
0x70C2308E, 0x67B924CD, 0x5E341808, 0x494F0C4B,
|
||||
],
|
||||
[
|
||||
0x00000000, 0xEFC26B3E, 0x04F5D03D, 0xEB37BB03, 0x09EBA07A, 0xE629CB44, 0x0D1E7047,
|
||||
0xE2DC1B79, 0x13D740F4, 0xFC152BCA, 0x172290C9, 0xF8E0FBF7, 0x1A3CE08E, 0xF5FE8BB0,
|
||||
0x1EC930B3, 0xF10B5B8D, 0x27AE81E8, 0xC86CEAD6, 0x235B51D5, 0xCC993AEB, 0x2E452192,
|
||||
0xC1874AAC, 0x2AB0F1AF, 0xC5729A91, 0x3479C11C, 0xDBBBAA22, 0x308C1121, 0xDF4E7A1F,
|
||||
0x3D926166, 0xD2500A58, 0x3967B15B, 0xD6A5DA65, 0x4F5D03D0, 0xA09F68EE, 0x4BA8D3ED,
|
||||
0xA46AB8D3, 0x46B6A3AA, 0xA974C894, 0x42437397, 0xAD8118A9, 0x5C8A4324, 0xB348281A,
|
||||
0x587F9319, 0xB7BDF827, 0x5561E35E, 0xBAA38860, 0x51943363, 0xBE56585D, 0x68F38238,
|
||||
0x8731E906, 0x6C065205, 0x83C4393B, 0x61182242, 0x8EDA497C, 0x65EDF27F, 0x8A2F9941,
|
||||
0x7B24C2CC, 0x94E6A9F2, 0x7FD112F1, 0x901379CF, 0x72CF62B6, 0x9D0D0988, 0x763AB28B,
|
||||
0x99F8D9B5, 0x9EBA07A0, 0x71786C9E, 0x9A4FD79D, 0x758DBCA3, 0x9751A7DA, 0x7893CCE4,
|
||||
0x93A477E7, 0x7C661CD9, 0x8D6D4754, 0x62AF2C6A, 0x89989769, 0x665AFC57, 0x8486E72E,
|
||||
0x6B448C10, 0x80733713, 0x6FB15C2D, 0xB9148648, 0x56D6ED76, 0xBDE15675, 0x52233D4B,
|
||||
0xB0FF2632, 0x5F3D4D0C, 0xB40AF60F, 0x5BC89D31, 0xAAC3C6BC, 0x4501AD82, 0xAE361681,
|
||||
0x41F47DBF, 0xA32866C6, 0x4CEA0DF8, 0xA7DDB6FB, 0x481FDDC5, 0xD1E70470, 0x3E256F4E,
|
||||
0xD512D44D, 0x3AD0BF73, 0xD80CA40A, 0x37CECF34, 0xDCF97437, 0x333B1F09, 0xC2304484,
|
||||
0x2DF22FBA, 0xC6C594B9, 0x2907FF87, 0xCBDBE4FE, 0x24198FC0, 0xCF2E34C3, 0x20EC5FFD,
|
||||
0xF6498598, 0x198BEEA6, 0xF2BC55A5, 0x1D7E3E9B, 0xFFA225E2, 0x10604EDC, 0xFB57F5DF,
|
||||
0x14959EE1, 0xE59EC56C, 0x0A5CAE52, 0xE16B1551, 0x0EA97E6F, 0xEC756516, 0x03B70E28,
|
||||
0xE880B52B, 0x0742DE15, 0xE6050901, 0x09C7623F, 0xE2F0D93C, 0x0D32B202, 0xEFEEA97B,
|
||||
0x002CC245, 0xEB1B7946, 0x04D91278, 0xF5D249F5, 0x1A1022CB, 0xF12799C8, 0x1EE5F2F6,
|
||||
0xFC39E98F, 0x13FB82B1, 0xF8CC39B2, 0x170E528C, 0xC1AB88E9, 0x2E69E3D7, 0xC55E58D4,
|
||||
0x2A9C33EA, 0xC8402893, 0x278243AD, 0xCCB5F8AE, 0x23779390, 0xD27CC81D, 0x3DBEA323,
|
||||
0xD6891820, 0x394B731E, 0xDB976867, 0x34550359, 0xDF62B85A, 0x30A0D364, 0xA9580AD1,
|
||||
0x469A61EF, 0xADADDAEC, 0x426FB1D2, 0xA0B3AAAB, 0x4F71C195, 0xA4467A96, 0x4B8411A8,
|
||||
0xBA8F4A25, 0x554D211B, 0xBE7A9A18, 0x51B8F126, 0xB364EA5F, 0x5CA68161, 0xB7913A62,
|
||||
0x5853515C, 0x8EF68B39, 0x6134E007, 0x8A035B04, 0x65C1303A, 0x871D2B43, 0x68DF407D,
|
||||
0x83E8FB7E, 0x6C2A9040, 0x9D21CBCD, 0x72E3A0F3, 0x99D41BF0, 0x761670CE, 0x94CA6BB7,
|
||||
0x7B080089, 0x903FBB8A, 0x7FFDD0B4, 0x78BF0EA1, 0x977D659F, 0x7C4ADE9C, 0x9388B5A2,
|
||||
0x7154AEDB, 0x9E96C5E5, 0x75A17EE6, 0x9A6315D8, 0x6B684E55, 0x84AA256B, 0x6F9D9E68,
|
||||
0x805FF556, 0x6283EE2F, 0x8D418511, 0x66763E12, 0x89B4552C, 0x5F118F49, 0xB0D3E477,
|
||||
0x5BE45F74, 0xB426344A, 0x56FA2F33, 0xB938440D, 0x520FFF0E, 0xBDCD9430, 0x4CC6CFBD,
|
||||
0xA304A483, 0x48331F80, 0xA7F174BE, 0x452D6FC7, 0xAAEF04F9, 0x41D8BFFA, 0xAE1AD4C4,
|
||||
0x37E20D71, 0xD820664F, 0x3317DD4C, 0xDCD5B672, 0x3E09AD0B, 0xD1CBC635, 0x3AFC7D36,
|
||||
0xD53E1608, 0x24354D85, 0xCBF726BB, 0x20C09DB8, 0xCF02F686, 0x2DDEEDFF, 0xC21C86C1,
|
||||
0x292B3DC2, 0xC6E956FC, 0x104C8C99, 0xFF8EE7A7, 0x14B95CA4, 0xFB7B379A, 0x19A72CE3,
|
||||
0xF66547DD, 0x1D52FCDE, 0xF29097E0, 0x039BCC6D, 0xEC59A753, 0x076E1C50, 0xE8AC776E,
|
||||
0x0A706C17, 0xE5B20729, 0x0E85BC2A, 0xE147D714,
|
||||
],
|
||||
[
|
||||
0x00000000, 0xC18EDFC0, 0x586CB9C1, 0x99E26601, 0xB0D97382, 0x7157AC42, 0xE8B5CA43,
|
||||
0x293B1583, 0xBAC3E145, 0x7B4D3E85, 0xE2AF5884, 0x23218744, 0x0A1A92C7, 0xCB944D07,
|
||||
0x52762B06, 0x93F8F4C6, 0xAEF6C4CB, 0x6F781B0B, 0xF69A7D0A, 0x3714A2CA, 0x1E2FB749,
|
||||
0xDFA16889, 0x46430E88, 0x87CDD148, 0x1435258E, 0xD5BBFA4E, 0x4C599C4F, 0x8DD7438F,
|
||||
0xA4EC560C, 0x656289CC, 0xFC80EFCD, 0x3D0E300D, 0x869C8FD7, 0x47125017, 0xDEF03616,
|
||||
0x1F7EE9D6, 0x3645FC55, 0xF7CB2395, 0x6E294594, 0xAFA79A54, 0x3C5F6E92, 0xFDD1B152,
|
||||
0x6433D753, 0xA5BD0893, 0x8C861D10, 0x4D08C2D0, 0xD4EAA4D1, 0x15647B11, 0x286A4B1C,
|
||||
0xE9E494DC, 0x7006F2DD, 0xB1882D1D, 0x98B3389E, 0x593DE75E, 0xC0DF815F, 0x01515E9F,
|
||||
0x92A9AA59, 0x53277599, 0xCAC51398, 0x0B4BCC58, 0x2270D9DB, 0xE3FE061B, 0x7A1C601A,
|
||||
0xBB92BFDA, 0xD64819EF, 0x17C6C62F, 0x8E24A02E, 0x4FAA7FEE, 0x66916A6D, 0xA71FB5AD,
|
||||
0x3EFDD3AC, 0xFF730C6C, 0x6C8BF8AA, 0xAD05276A, 0x34E7416B, 0xF5699EAB, 0xDC528B28,
|
||||
0x1DDC54E8, 0x843E32E9, 0x45B0ED29, 0x78BEDD24, 0xB93002E4, 0x20D264E5, 0xE15CBB25,
|
||||
0xC867AEA6, 0x09E97166, 0x900B1767, 0x5185C8A7, 0xC27D3C61, 0x03F3E3A1, 0x9A1185A0,
|
||||
0x5B9F5A60, 0x72A44FE3, 0xB32A9023, 0x2AC8F622, 0xEB4629E2, 0x50D49638, 0x915A49F8,
|
||||
0x08B82FF9, 0xC936F039, 0xE00DE5BA, 0x21833A7A, 0xB8615C7B, 0x79EF83BB, 0xEA17777D,
|
||||
0x2B99A8BD, 0xB27BCEBC, 0x73F5117C, 0x5ACE04FF, 0x9B40DB3F, 0x02A2BD3E, 0xC32C62FE,
|
||||
0xFE2252F3, 0x3FAC8D33, 0xA64EEB32, 0x67C034F2, 0x4EFB2171, 0x8F75FEB1, 0x169798B0,
|
||||
0xD7194770, 0x44E1B3B6, 0x856F6C76, 0x1C8D0A77, 0xDD03D5B7, 0xF438C034, 0x35B61FF4,
|
||||
0xAC5479F5, 0x6DDAA635, 0x77E1359F, 0xB66FEA5F, 0x2F8D8C5E, 0xEE03539E, 0xC738461D,
|
||||
0x06B699DD, 0x9F54FFDC, 0x5EDA201C, 0xCD22D4DA, 0x0CAC0B1A, 0x954E6D1B, 0x54C0B2DB,
|
||||
0x7DFBA758, 0xBC757898, 0x25971E99, 0xE419C159, 0xD917F154, 0x18992E94, 0x817B4895,
|
||||
0x40F59755, 0x69CE82D6, 0xA8405D16, 0x31A23B17, 0xF02CE4D7, 0x63D41011, 0xA25ACFD1,
|
||||
0x3BB8A9D0, 0xFA367610, 0xD30D6393, 0x1283BC53, 0x8B61DA52, 0x4AEF0592, 0xF17DBA48,
|
||||
0x30F36588, 0xA9110389, 0x689FDC49, 0x41A4C9CA, 0x802A160A, 0x19C8700B, 0xD846AFCB,
|
||||
0x4BBE5B0D, 0x8A3084CD, 0x13D2E2CC, 0xD25C3D0C, 0xFB67288F, 0x3AE9F74F, 0xA30B914E,
|
||||
0x62854E8E, 0x5F8B7E83, 0x9E05A143, 0x07E7C742, 0xC6691882, 0xEF520D01, 0x2EDCD2C1,
|
||||
0xB73EB4C0, 0x76B06B00, 0xE5489FC6, 0x24C64006, 0xBD242607, 0x7CAAF9C7, 0x5591EC44,
|
||||
0x941F3384, 0x0DFD5585, 0xCC738A45, 0xA1A92C70, 0x6027F3B0, 0xF9C595B1, 0x384B4A71,
|
||||
0x11705FF2, 0xD0FE8032, 0x491CE633, 0x889239F3, 0x1B6ACD35, 0xDAE412F5, 0x430674F4,
|
||||
0x8288AB34, 0xABB3BEB7, 0x6A3D6177, 0xF3DF0776, 0x3251D8B6, 0x0F5FE8BB, 0xCED1377B,
|
||||
0x5733517A, 0x96BD8EBA, 0xBF869B39, 0x7E0844F9, 0xE7EA22F8, 0x2664FD38, 0xB59C09FE,
|
||||
0x7412D63E, 0xEDF0B03F, 0x2C7E6FFF, 0x05457A7C, 0xC4CBA5BC, 0x5D29C3BD, 0x9CA71C7D,
|
||||
0x2735A3A7, 0xE6BB7C67, 0x7F591A66, 0xBED7C5A6, 0x97ECD025, 0x56620FE5, 0xCF8069E4,
|
||||
0x0E0EB624, 0x9DF642E2, 0x5C789D22, 0xC59AFB23, 0x041424E3, 0x2D2F3160, 0xECA1EEA0,
|
||||
0x754388A1, 0xB4CD5761, 0x89C3676C, 0x484DB8AC, 0xD1AFDEAD, 0x1021016D, 0x391A14EE,
|
||||
0xF894CB2E, 0x6176AD2F, 0xA0F872EF, 0x33008629, 0xF28E59E9, 0x6B6C3FE8, 0xAAE2E028,
|
||||
0x83D9F5AB, 0x42572A6B, 0xDBB54C6A, 0x1A3B93AA,
|
||||
],
|
||||
[
|
||||
0x00000000, 0x9BA54C6F, 0xEC3B9E9F, 0x779ED2F0, 0x03063B7F, 0x98A37710, 0xEF3DA5E0,
|
||||
0x7498E98F, 0x060C76FE, 0x9DA93A91, 0xEA37E861, 0x7192A40E, 0x050A4D81, 0x9EAF01EE,
|
||||
0xE931D31E, 0x72949F71, 0x0C18EDFC, 0x97BDA193, 0xE0237363, 0x7B863F0C, 0x0F1ED683,
|
||||
0x94BB9AEC, 0xE325481C, 0x78800473, 0x0A149B02, 0x91B1D76D, 0xE62F059D, 0x7D8A49F2,
|
||||
0x0912A07D, 0x92B7EC12, 0xE5293EE2, 0x7E8C728D, 0x1831DBF8, 0x83949797, 0xF40A4567,
|
||||
0x6FAF0908, 0x1B37E087, 0x8092ACE8, 0xF70C7E18, 0x6CA93277, 0x1E3DAD06, 0x8598E169,
|
||||
0xF2063399, 0x69A37FF6, 0x1D3B9679, 0x869EDA16, 0xF10008E6, 0x6AA54489, 0x14293604,
|
||||
0x8F8C7A6B, 0xF812A89B, 0x63B7E4F4, 0x172F0D7B, 0x8C8A4114, 0xFB1493E4, 0x60B1DF8B,
|
||||
0x122540FA, 0x89800C95, 0xFE1EDE65, 0x65BB920A, 0x11237B85, 0x8A8637EA, 0xFD18E51A,
|
||||
0x66BDA975, 0x3063B7F0, 0xABC6FB9F, 0xDC58296F, 0x47FD6500, 0x33658C8F, 0xA8C0C0E0,
|
||||
0xDF5E1210, 0x44FB5E7F, 0x366FC10E, 0xADCA8D61, 0xDA545F91, 0x41F113FE, 0x3569FA71,
|
||||
0xAECCB61E, 0xD95264EE, 0x42F72881, 0x3C7B5A0C, 0xA7DE1663, 0xD040C493, 0x4BE588FC,
|
||||
0x3F7D6173, 0xA4D82D1C, 0xD346FFEC, 0x48E3B383, 0x3A772CF2, 0xA1D2609D, 0xD64CB26D,
|
||||
0x4DE9FE02, 0x3971178D, 0xA2D45BE2, 0xD54A8912, 0x4EEFC57D, 0x28526C08, 0xB3F72067,
|
||||
0xC469F297, 0x5FCCBEF8, 0x2B545777, 0xB0F11B18, 0xC76FC9E8, 0x5CCA8587, 0x2E5E1AF6,
|
||||
0xB5FB5699, 0xC2658469, 0x59C0C806, 0x2D582189, 0xB6FD6DE6, 0xC163BF16, 0x5AC6F379,
|
||||
0x244A81F4, 0xBFEFCD9B, 0xC8711F6B, 0x53D45304, 0x274CBA8B, 0xBCE9F6E4, 0xCB772414,
|
||||
0x50D2687B, 0x2246F70A, 0xB9E3BB65, 0xCE7D6995, 0x55D825FA, 0x2140CC75, 0xBAE5801A,
|
||||
0xCD7B52EA, 0x56DE1E85, 0x60C76FE0, 0xFB62238F, 0x8CFCF17F, 0x1759BD10, 0x63C1549F,
|
||||
0xF86418F0, 0x8FFACA00, 0x145F866F, 0x66CB191E, 0xFD6E5571, 0x8AF08781, 0x1155CBEE,
|
||||
0x65CD2261, 0xFE686E0E, 0x89F6BCFE, 0x1253F091, 0x6CDF821C, 0xF77ACE73, 0x80E41C83,
|
||||
0x1B4150EC, 0x6FD9B963, 0xF47CF50C, 0x83E227FC, 0x18476B93, 0x6AD3F4E2, 0xF176B88D,
|
||||
0x86E86A7D, 0x1D4D2612, 0x69D5CF9D, 0xF27083F2, 0x85EE5102, 0x1E4B1D6D, 0x78F6B418,
|
||||
0xE353F877, 0x94CD2A87, 0x0F6866E8, 0x7BF08F67, 0xE055C308, 0x97CB11F8, 0x0C6E5D97,
|
||||
0x7EFAC2E6, 0xE55F8E89, 0x92C15C79, 0x09641016, 0x7DFCF999, 0xE659B5F6, 0x91C76706,
|
||||
0x0A622B69, 0x74EE59E4, 0xEF4B158B, 0x98D5C77B, 0x03708B14, 0x77E8629B, 0xEC4D2EF4,
|
||||
0x9BD3FC04, 0x0076B06B, 0x72E22F1A, 0xE9476375, 0x9ED9B185, 0x057CFDEA, 0x71E41465,
|
||||
0xEA41580A, 0x9DDF8AFA, 0x067AC695, 0x50A4D810, 0xCB01947F, 0xBC9F468F, 0x273A0AE0,
|
||||
0x53A2E36F, 0xC807AF00, 0xBF997DF0, 0x243C319F, 0x56A8AEEE, 0xCD0DE281, 0xBA933071,
|
||||
0x21367C1E, 0x55AE9591, 0xCE0BD9FE, 0xB9950B0E, 0x22304761, 0x5CBC35EC, 0xC7197983,
|
||||
0xB087AB73, 0x2B22E71C, 0x5FBA0E93, 0xC41F42FC, 0xB381900C, 0x2824DC63, 0x5AB04312,
|
||||
0xC1150F7D, 0xB68BDD8D, 0x2D2E91E2, 0x59B6786D, 0xC2133402, 0xB58DE6F2, 0x2E28AA9D,
|
||||
0x489503E8, 0xD3304F87, 0xA4AE9D77, 0x3F0BD118, 0x4B933897, 0xD03674F8, 0xA7A8A608,
|
||||
0x3C0DEA67, 0x4E997516, 0xD53C3979, 0xA2A2EB89, 0x3907A7E6, 0x4D9F4E69, 0xD63A0206,
|
||||
0xA1A4D0F6, 0x3A019C99, 0x448DEE14, 0xDF28A27B, 0xA8B6708B, 0x33133CE4, 0x478BD56B,
|
||||
0xDC2E9904, 0xABB04BF4, 0x3015079B, 0x428198EA, 0xD924D485, 0xAEBA0675, 0x351F4A1A,
|
||||
0x4187A395, 0xDA22EFFA, 0xADBC3D0A, 0x36197165,
|
||||
],
|
||||
[
|
||||
0x00000000, 0xDD96D985, 0x605CB54B, 0xBDCA6CCE, 0xC0B96A96, 0x1D2FB313, 0xA0E5DFDD,
|
||||
0x7D730658, 0x5A03D36D, 0x87950AE8, 0x3A5F6626, 0xE7C9BFA3, 0x9ABAB9FB, 0x472C607E,
|
||||
0xFAE60CB0, 0x2770D535, 0xB407A6DA, 0x69917F5F, 0xD45B1391, 0x09CDCA14, 0x74BECC4C,
|
||||
0xA92815C9, 0x14E27907, 0xC974A082, 0xEE0475B7, 0x3392AC32, 0x8E58C0FC, 0x53CE1979,
|
||||
0x2EBD1F21, 0xF32BC6A4, 0x4EE1AA6A, 0x937773EF, 0xB37E4BF5, 0x6EE89270, 0xD322FEBE,
|
||||
0x0EB4273B, 0x73C72163, 0xAE51F8E6, 0x139B9428, 0xCE0D4DAD, 0xE97D9898, 0x34EB411D,
|
||||
0x89212DD3, 0x54B7F456, 0x29C4F20E, 0xF4522B8B, 0x49984745, 0x940E9EC0, 0x0779ED2F,
|
||||
0xDAEF34AA, 0x67255864, 0xBAB381E1, 0xC7C087B9, 0x1A565E3C, 0xA79C32F2, 0x7A0AEB77,
|
||||
0x5D7A3E42, 0x80ECE7C7, 0x3D268B09, 0xE0B0528C, 0x9DC354D4, 0x40558D51, 0xFD9FE19F,
|
||||
0x2009381A, 0xBD8D91AB, 0x601B482E, 0xDDD124E0, 0x0047FD65, 0x7D34FB3D, 0xA0A222B8,
|
||||
0x1D684E76, 0xC0FE97F3, 0xE78E42C6, 0x3A189B43, 0x87D2F78D, 0x5A442E08, 0x27372850,
|
||||
0xFAA1F1D5, 0x476B9D1B, 0x9AFD449E, 0x098A3771, 0xD41CEEF4, 0x69D6823A, 0xB4405BBF,
|
||||
0xC9335DE7, 0x14A58462, 0xA96FE8AC, 0x74F93129, 0x5389E41C, 0x8E1F3D99, 0x33D55157,
|
||||
0xEE4388D2, 0x93308E8A, 0x4EA6570F, 0xF36C3BC1, 0x2EFAE244, 0x0EF3DA5E, 0xD36503DB,
|
||||
0x6EAF6F15, 0xB339B690, 0xCE4AB0C8, 0x13DC694D, 0xAE160583, 0x7380DC06, 0x54F00933,
|
||||
0x8966D0B6, 0x34ACBC78, 0xE93A65FD, 0x944963A5, 0x49DFBA20, 0xF415D6EE, 0x29830F6B,
|
||||
0xBAF47C84, 0x6762A501, 0xDAA8C9CF, 0x073E104A, 0x7A4D1612, 0xA7DBCF97, 0x1A11A359,
|
||||
0xC7877ADC, 0xE0F7AFE9, 0x3D61766C, 0x80AB1AA2, 0x5D3DC327, 0x204EC57F, 0xFDD81CFA,
|
||||
0x40127034, 0x9D84A9B1, 0xA06A2517, 0x7DFCFC92, 0xC036905C, 0x1DA049D9, 0x60D34F81,
|
||||
0xBD459604, 0x008FFACA, 0xDD19234F, 0xFA69F67A, 0x27FF2FFF, 0x9A354331, 0x47A39AB4,
|
||||
0x3AD09CEC, 0xE7464569, 0x5A8C29A7, 0x871AF022, 0x146D83CD, 0xC9FB5A48, 0x74313686,
|
||||
0xA9A7EF03, 0xD4D4E95B, 0x094230DE, 0xB4885C10, 0x691E8595, 0x4E6E50A0, 0x93F88925,
|
||||
0x2E32E5EB, 0xF3A43C6E, 0x8ED73A36, 0x5341E3B3, 0xEE8B8F7D, 0x331D56F8, 0x13146EE2,
|
||||
0xCE82B767, 0x7348DBA9, 0xAEDE022C, 0xD3AD0474, 0x0E3BDDF1, 0xB3F1B13F, 0x6E6768BA,
|
||||
0x4917BD8F, 0x9481640A, 0x294B08C4, 0xF4DDD141, 0x89AED719, 0x54380E9C, 0xE9F26252,
|
||||
0x3464BBD7, 0xA713C838, 0x7A8511BD, 0xC74F7D73, 0x1AD9A4F6, 0x67AAA2AE, 0xBA3C7B2B,
|
||||
0x07F617E5, 0xDA60CE60, 0xFD101B55, 0x2086C2D0, 0x9D4CAE1E, 0x40DA779B, 0x3DA971C3,
|
||||
0xE03FA846, 0x5DF5C488, 0x80631D0D, 0x1DE7B4BC, 0xC0716D39, 0x7DBB01F7, 0xA02DD872,
|
||||
0xDD5EDE2A, 0x00C807AF, 0xBD026B61, 0x6094B2E4, 0x47E467D1, 0x9A72BE54, 0x27B8D29A,
|
||||
0xFA2E0B1F, 0x875D0D47, 0x5ACBD4C2, 0xE701B80C, 0x3A976189, 0xA9E01266, 0x7476CBE3,
|
||||
0xC9BCA72D, 0x142A7EA8, 0x695978F0, 0xB4CFA175, 0x0905CDBB, 0xD493143E, 0xF3E3C10B,
|
||||
0x2E75188E, 0x93BF7440, 0x4E29ADC5, 0x335AAB9D, 0xEECC7218, 0x53061ED6, 0x8E90C753,
|
||||
0xAE99FF49, 0x730F26CC, 0xCEC54A02, 0x13539387, 0x6E2095DF, 0xB3B64C5A, 0x0E7C2094,
|
||||
0xD3EAF911, 0xF49A2C24, 0x290CF5A1, 0x94C6996F, 0x495040EA, 0x342346B2, 0xE9B59F37,
|
||||
0x547FF3F9, 0x89E92A7C, 0x1A9E5993, 0xC7088016, 0x7AC2ECD8, 0xA754355D, 0xDA273305,
|
||||
0x07B1EA80, 0xBA7B864E, 0x67ED5FCB, 0x409D8AFE, 0x9D0B537B, 0x20C13FB5, 0xFD57E630,
|
||||
0x8024E068, 0x5DB239ED, 0xE0785523, 0x3DEE8CA6,
|
||||
],
|
||||
[
|
||||
0x00000000, 0x9D0FE176, 0xE16EC4AD, 0x7C6125DB, 0x19AC8F1B, 0x84A36E6D, 0xF8C24BB6,
|
||||
0x65CDAAC0, 0x33591E36, 0xAE56FF40, 0xD237DA9B, 0x4F383BED, 0x2AF5912D, 0xB7FA705B,
|
||||
0xCB9B5580, 0x5694B4F6, 0x66B23C6C, 0xFBBDDD1A, 0x87DCF8C1, 0x1AD319B7, 0x7F1EB377,
|
||||
0xE2115201, 0x9E7077DA, 0x037F96AC, 0x55EB225A, 0xC8E4C32C, 0xB485E6F7, 0x298A0781,
|
||||
0x4C47AD41, 0xD1484C37, 0xAD2969EC, 0x3026889A, 0xCD6478D8, 0x506B99AE, 0x2C0ABC75,
|
||||
0xB1055D03, 0xD4C8F7C3, 0x49C716B5, 0x35A6336E, 0xA8A9D218, 0xFE3D66EE, 0x63328798,
|
||||
0x1F53A243, 0x825C4335, 0xE791E9F5, 0x7A9E0883, 0x06FF2D58, 0x9BF0CC2E, 0xABD644B4,
|
||||
0x36D9A5C2, 0x4AB88019, 0xD7B7616F, 0xB27ACBAF, 0x2F752AD9, 0x53140F02, 0xCE1BEE74,
|
||||
0x988F5A82, 0x0580BBF4, 0x79E19E2F, 0xE4EE7F59, 0x8123D599, 0x1C2C34EF, 0x604D1134,
|
||||
0xFD42F042, 0x41B9F7F1, 0xDCB61687, 0xA0D7335C, 0x3DD8D22A, 0x581578EA, 0xC51A999C,
|
||||
0xB97BBC47, 0x24745D31, 0x72E0E9C7, 0xEFEF08B1, 0x938E2D6A, 0x0E81CC1C, 0x6B4C66DC,
|
||||
0xF64387AA, 0x8A22A271, 0x172D4307, 0x270BCB9D, 0xBA042AEB, 0xC6650F30, 0x5B6AEE46,
|
||||
0x3EA74486, 0xA3A8A5F0, 0xDFC9802B, 0x42C6615D, 0x1452D5AB, 0x895D34DD, 0xF53C1106,
|
||||
0x6833F070, 0x0DFE5AB0, 0x90F1BBC6, 0xEC909E1D, 0x719F7F6B, 0x8CDD8F29, 0x11D26E5F,
|
||||
0x6DB34B84, 0xF0BCAAF2, 0x95710032, 0x087EE144, 0x741FC49F, 0xE91025E9, 0xBF84911F,
|
||||
0x228B7069, 0x5EEA55B2, 0xC3E5B4C4, 0xA6281E04, 0x3B27FF72, 0x4746DAA9, 0xDA493BDF,
|
||||
0xEA6FB345, 0x77605233, 0x0B0177E8, 0x960E969E, 0xF3C33C5E, 0x6ECCDD28, 0x12ADF8F3,
|
||||
0x8FA21985, 0xD936AD73, 0x44394C05, 0x385869DE, 0xA55788A8, 0xC09A2268, 0x5D95C31E,
|
||||
0x21F4E6C5, 0xBCFB07B3, 0x8373EFE2, 0x1E7C0E94, 0x621D2B4F, 0xFF12CA39, 0x9ADF60F9,
|
||||
0x07D0818F, 0x7BB1A454, 0xE6BE4522, 0xB02AF1D4, 0x2D2510A2, 0x51443579, 0xCC4BD40F,
|
||||
0xA9867ECF, 0x34899FB9, 0x48E8BA62, 0xD5E75B14, 0xE5C1D38E, 0x78CE32F8, 0x04AF1723,
|
||||
0x99A0F655, 0xFC6D5C95, 0x6162BDE3, 0x1D039838, 0x800C794E, 0xD698CDB8, 0x4B972CCE,
|
||||
0x37F60915, 0xAAF9E863, 0xCF3442A3, 0x523BA3D5, 0x2E5A860E, 0xB3556778, 0x4E17973A,
|
||||
0xD318764C, 0xAF795397, 0x3276B2E1, 0x57BB1821, 0xCAB4F957, 0xB6D5DC8C, 0x2BDA3DFA,
|
||||
0x7D4E890C, 0xE041687A, 0x9C204DA1, 0x012FACD7, 0x64E20617, 0xF9EDE761, 0x858CC2BA,
|
||||
0x188323CC, 0x28A5AB56, 0xB5AA4A20, 0xC9CB6FFB, 0x54C48E8D, 0x3109244D, 0xAC06C53B,
|
||||
0xD067E0E0, 0x4D680196, 0x1BFCB560, 0x86F35416, 0xFA9271CD, 0x679D90BB, 0x02503A7B,
|
||||
0x9F5FDB0D, 0xE33EFED6, 0x7E311FA0, 0xC2CA1813, 0x5FC5F965, 0x23A4DCBE, 0xBEAB3DC8,
|
||||
0xDB669708, 0x4669767E, 0x3A0853A5, 0xA707B2D3, 0xF1930625, 0x6C9CE753, 0x10FDC288,
|
||||
0x8DF223FE, 0xE83F893E, 0x75306848, 0x09514D93, 0x945EACE5, 0xA478247F, 0x3977C509,
|
||||
0x4516E0D2, 0xD81901A4, 0xBDD4AB64, 0x20DB4A12, 0x5CBA6FC9, 0xC1B58EBF, 0x97213A49,
|
||||
0x0A2EDB3F, 0x764FFEE4, 0xEB401F92, 0x8E8DB552, 0x13825424, 0x6FE371FF, 0xF2EC9089,
|
||||
0x0FAE60CB, 0x92A181BD, 0xEEC0A466, 0x73CF4510, 0x1602EFD0, 0x8B0D0EA6, 0xF76C2B7D,
|
||||
0x6A63CA0B, 0x3CF77EFD, 0xA1F89F8B, 0xDD99BA50, 0x40965B26, 0x255BF1E6, 0xB8541090,
|
||||
0xC435354B, 0x593AD43D, 0x691C5CA7, 0xF413BDD1, 0x8872980A, 0x157D797C, 0x70B0D3BC,
|
||||
0xEDBF32CA, 0x91DE1711, 0x0CD1F667, 0x5A454291, 0xC74AA3E7, 0xBB2B863C, 0x2624674A,
|
||||
0x43E9CD8A, 0xDEE62CFC, 0xA2870927, 0x3F88E851,
|
||||
],
|
||||
[
|
||||
0x00000000, 0xB9FBDBE8, 0xA886B191, 0x117D6A79, 0x8A7C6563, 0x3387BE8B, 0x22FAD4F2,
|
||||
0x9B010F1A, 0xCF89CC87, 0x7672176F, 0x670F7D16, 0xDEF4A6FE, 0x45F5A9E4, 0xFC0E720C,
|
||||
0xED731875, 0x5488C39D, 0x44629F4F, 0xFD9944A7, 0xECE42EDE, 0x551FF536, 0xCE1EFA2C,
|
||||
0x77E521C4, 0x66984BBD, 0xDF639055, 0x8BEB53C8, 0x32108820, 0x236DE259, 0x9A9639B1,
|
||||
0x019736AB, 0xB86CED43, 0xA911873A, 0x10EA5CD2, 0x88C53E9E, 0x313EE576, 0x20438F0F,
|
||||
0x99B854E7, 0x02B95BFD, 0xBB428015, 0xAA3FEA6C, 0x13C43184, 0x474CF219, 0xFEB729F1,
|
||||
0xEFCA4388, 0x56319860, 0xCD30977A, 0x74CB4C92, 0x65B626EB, 0xDC4DFD03, 0xCCA7A1D1,
|
||||
0x755C7A39, 0x64211040, 0xDDDACBA8, 0x46DBC4B2, 0xFF201F5A, 0xEE5D7523, 0x57A6AECB,
|
||||
0x032E6D56, 0xBAD5B6BE, 0xABA8DCC7, 0x1253072F, 0x89520835, 0x30A9D3DD, 0x21D4B9A4,
|
||||
0x982F624C, 0xCAFB7B7D, 0x7300A095, 0x627DCAEC, 0xDB861104, 0x40871E1E, 0xF97CC5F6,
|
||||
0xE801AF8F, 0x51FA7467, 0x0572B7FA, 0xBC896C12, 0xADF4066B, 0x140FDD83, 0x8F0ED299,
|
||||
0x36F50971, 0x27886308, 0x9E73B8E0, 0x8E99E432, 0x37623FDA, 0x261F55A3, 0x9FE48E4B,
|
||||
0x04E58151, 0xBD1E5AB9, 0xAC6330C0, 0x1598EB28, 0x411028B5, 0xF8EBF35D, 0xE9969924,
|
||||
0x506D42CC, 0xCB6C4DD6, 0x7297963E, 0x63EAFC47, 0xDA1127AF, 0x423E45E3, 0xFBC59E0B,
|
||||
0xEAB8F472, 0x53432F9A, 0xC8422080, 0x71B9FB68, 0x60C49111, 0xD93F4AF9, 0x8DB78964,
|
||||
0x344C528C, 0x253138F5, 0x9CCAE31D, 0x07CBEC07, 0xBE3037EF, 0xAF4D5D96, 0x16B6867E,
|
||||
0x065CDAAC, 0xBFA70144, 0xAEDA6B3D, 0x1721B0D5, 0x8C20BFCF, 0x35DB6427, 0x24A60E5E,
|
||||
0x9D5DD5B6, 0xC9D5162B, 0x702ECDC3, 0x6153A7BA, 0xD8A87C52, 0x43A97348, 0xFA52A8A0,
|
||||
0xEB2FC2D9, 0x52D41931, 0x4E87F0BB, 0xF77C2B53, 0xE601412A, 0x5FFA9AC2, 0xC4FB95D8,
|
||||
0x7D004E30, 0x6C7D2449, 0xD586FFA1, 0x810E3C3C, 0x38F5E7D4, 0x29888DAD, 0x90735645,
|
||||
0x0B72595F, 0xB28982B7, 0xA3F4E8CE, 0x1A0F3326, 0x0AE56FF4, 0xB31EB41C, 0xA263DE65,
|
||||
0x1B98058D, 0x80990A97, 0x3962D17F, 0x281FBB06, 0x91E460EE, 0xC56CA373, 0x7C97789B,
|
||||
0x6DEA12E2, 0xD411C90A, 0x4F10C610, 0xF6EB1DF8, 0xE7967781, 0x5E6DAC69, 0xC642CE25,
|
||||
0x7FB915CD, 0x6EC47FB4, 0xD73FA45C, 0x4C3EAB46, 0xF5C570AE, 0xE4B81AD7, 0x5D43C13F,
|
||||
0x09CB02A2, 0xB030D94A, 0xA14DB333, 0x18B668DB, 0x83B767C1, 0x3A4CBC29, 0x2B31D650,
|
||||
0x92CA0DB8, 0x8220516A, 0x3BDB8A82, 0x2AA6E0FB, 0x935D3B13, 0x085C3409, 0xB1A7EFE1,
|
||||
0xA0DA8598, 0x19215E70, 0x4DA99DED, 0xF4524605, 0xE52F2C7C, 0x5CD4F794, 0xC7D5F88E,
|
||||
0x7E2E2366, 0x6F53491F, 0xD6A892F7, 0x847C8BC6, 0x3D87502E, 0x2CFA3A57, 0x9501E1BF,
|
||||
0x0E00EEA5, 0xB7FB354D, 0xA6865F34, 0x1F7D84DC, 0x4BF54741, 0xF20E9CA9, 0xE373F6D0,
|
||||
0x5A882D38, 0xC1892222, 0x7872F9CA, 0x690F93B3, 0xD0F4485B, 0xC01E1489, 0x79E5CF61,
|
||||
0x6898A518, 0xD1637EF0, 0x4A6271EA, 0xF399AA02, 0xE2E4C07B, 0x5B1F1B93, 0x0F97D80E,
|
||||
0xB66C03E6, 0xA711699F, 0x1EEAB277, 0x85EBBD6D, 0x3C106685, 0x2D6D0CFC, 0x9496D714,
|
||||
0x0CB9B558, 0xB5426EB0, 0xA43F04C9, 0x1DC4DF21, 0x86C5D03B, 0x3F3E0BD3, 0x2E4361AA,
|
||||
0x97B8BA42, 0xC33079DF, 0x7ACBA237, 0x6BB6C84E, 0xD24D13A6, 0x494C1CBC, 0xF0B7C754,
|
||||
0xE1CAAD2D, 0x583176C5, 0x48DB2A17, 0xF120F1FF, 0xE05D9B86, 0x59A6406E, 0xC2A74F74,
|
||||
0x7B5C949C, 0x6A21FEE5, 0xD3DA250D, 0x8752E690, 0x3EA93D78, 0x2FD45701, 0x962F8CE9,
|
||||
0x0D2E83F3, 0xB4D5581B, 0xA5A83262, 0x1C53E98A,
|
||||
],
|
||||
[
|
||||
0x00000000, 0xAE689191, 0x87A02563, 0x29C8B4F2, 0xD4314C87, 0x7A59DD16, 0x539169E4,
|
||||
0xFDF9F875, 0x73139F4F, 0xDD7B0EDE, 0xF4B3BA2C, 0x5ADB2BBD, 0xA722D3C8, 0x094A4259,
|
||||
0x2082F6AB, 0x8EEA673A, 0xE6273E9E, 0x484FAF0F, 0x61871BFD, 0xCFEF8A6C, 0x32167219,
|
||||
0x9C7EE388, 0xB5B6577A, 0x1BDEC6EB, 0x9534A1D1, 0x3B5C3040, 0x129484B2, 0xBCFC1523,
|
||||
0x4105ED56, 0xEF6D7CC7, 0xC6A5C835, 0x68CD59A4, 0x173F7B7D, 0xB957EAEC, 0x909F5E1E,
|
||||
0x3EF7CF8F, 0xC30E37FA, 0x6D66A66B, 0x44AE1299, 0xEAC68308, 0x642CE432, 0xCA4475A3,
|
||||
0xE38CC151, 0x4DE450C0, 0xB01DA8B5, 0x1E753924, 0x37BD8DD6, 0x99D51C47, 0xF11845E3,
|
||||
0x5F70D472, 0x76B86080, 0xD8D0F111, 0x25290964, 0x8B4198F5, 0xA2892C07, 0x0CE1BD96,
|
||||
0x820BDAAC, 0x2C634B3D, 0x05ABFFCF, 0xABC36E5E, 0x563A962B, 0xF85207BA, 0xD19AB348,
|
||||
0x7FF222D9, 0x2E7EF6FA, 0x8016676B, 0xA9DED399, 0x07B64208, 0xFA4FBA7D, 0x54272BEC,
|
||||
0x7DEF9F1E, 0xD3870E8F, 0x5D6D69B5, 0xF305F824, 0xDACD4CD6, 0x74A5DD47, 0x895C2532,
|
||||
0x2734B4A3, 0x0EFC0051, 0xA09491C0, 0xC859C864, 0x663159F5, 0x4FF9ED07, 0xE1917C96,
|
||||
0x1C6884E3, 0xB2001572, 0x9BC8A180, 0x35A03011, 0xBB4A572B, 0x1522C6BA, 0x3CEA7248,
|
||||
0x9282E3D9, 0x6F7B1BAC, 0xC1138A3D, 0xE8DB3ECF, 0x46B3AF5E, 0x39418D87, 0x97291C16,
|
||||
0xBEE1A8E4, 0x10893975, 0xED70C100, 0x43185091, 0x6AD0E463, 0xC4B875F2, 0x4A5212C8,
|
||||
0xE43A8359, 0xCDF237AB, 0x639AA63A, 0x9E635E4F, 0x300BCFDE, 0x19C37B2C, 0xB7ABEABD,
|
||||
0xDF66B319, 0x710E2288, 0x58C6967A, 0xF6AE07EB, 0x0B57FF9E, 0xA53F6E0F, 0x8CF7DAFD,
|
||||
0x229F4B6C, 0xAC752C56, 0x021DBDC7, 0x2BD50935, 0x85BD98A4, 0x784460D1, 0xD62CF140,
|
||||
0xFFE445B2, 0x518CD423, 0x5CFDEDF4, 0xF2957C65, 0xDB5DC897, 0x75355906, 0x88CCA173,
|
||||
0x26A430E2, 0x0F6C8410, 0xA1041581, 0x2FEE72BB, 0x8186E32A, 0xA84E57D8, 0x0626C649,
|
||||
0xFBDF3E3C, 0x55B7AFAD, 0x7C7F1B5F, 0xD2178ACE, 0xBADAD36A, 0x14B242FB, 0x3D7AF609,
|
||||
0x93126798, 0x6EEB9FED, 0xC0830E7C, 0xE94BBA8E, 0x47232B1F, 0xC9C94C25, 0x67A1DDB4,
|
||||
0x4E696946, 0xE001F8D7, 0x1DF800A2, 0xB3909133, 0x9A5825C1, 0x3430B450, 0x4BC29689,
|
||||
0xE5AA0718, 0xCC62B3EA, 0x620A227B, 0x9FF3DA0E, 0x319B4B9F, 0x1853FF6D, 0xB63B6EFC,
|
||||
0x38D109C6, 0x96B99857, 0xBF712CA5, 0x1119BD34, 0xECE04541, 0x4288D4D0, 0x6B406022,
|
||||
0xC528F1B3, 0xADE5A817, 0x038D3986, 0x2A458D74, 0x842D1CE5, 0x79D4E490, 0xD7BC7501,
|
||||
0xFE74C1F3, 0x501C5062, 0xDEF63758, 0x709EA6C9, 0x5956123B, 0xF73E83AA, 0x0AC77BDF,
|
||||
0xA4AFEA4E, 0x8D675EBC, 0x230FCF2D, 0x72831B0E, 0xDCEB8A9F, 0xF5233E6D, 0x5B4BAFFC,
|
||||
0xA6B25789, 0x08DAC618, 0x211272EA, 0x8F7AE37B, 0x01908441, 0xAFF815D0, 0x8630A122,
|
||||
0x285830B3, 0xD5A1C8C6, 0x7BC95957, 0x5201EDA5, 0xFC697C34, 0x94A42590, 0x3ACCB401,
|
||||
0x130400F3, 0xBD6C9162, 0x40956917, 0xEEFDF886, 0xC7354C74, 0x695DDDE5, 0xE7B7BADF,
|
||||
0x49DF2B4E, 0x60179FBC, 0xCE7F0E2D, 0x3386F658, 0x9DEE67C9, 0xB426D33B, 0x1A4E42AA,
|
||||
0x65BC6073, 0xCBD4F1E2, 0xE21C4510, 0x4C74D481, 0xB18D2CF4, 0x1FE5BD65, 0x362D0997,
|
||||
0x98459806, 0x16AFFF3C, 0xB8C76EAD, 0x910FDA5F, 0x3F674BCE, 0xC29EB3BB, 0x6CF6222A,
|
||||
0x453E96D8, 0xEB560749, 0x839B5EED, 0x2DF3CF7C, 0x043B7B8E, 0xAA53EA1F, 0x57AA126A,
|
||||
0xF9C283FB, 0xD00A3709, 0x7E62A698, 0xF088C1A2, 0x5EE05033, 0x7728E4C1, 0xD9407550,
|
||||
0x24B98D25, 0x8AD11CB4, 0xA319A846, 0x0D7139D7,
|
||||
],
|
||||
];
|
|
@ -0,0 +1 @@
|
|||
{"files":{"Cargo.toml":"072c7be7fbdda0977ca4c0609066a46c590d87f8c25ef75c66be5c5a0763f237","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"852bcc033a46c62f99fb5ffd43b3241ba2c0c440c6034aa2114505f2c4f03c4a","README.md":"eadf743c720d5ee3fcc8a6faeff390d3291fa2383425dd2dccd052b4583848d7","benches/bench.rs":"5a57c0cc31e38a5e4a6db435587f877ad9d71c0dc6b999971ed071a5c7d094a4","src/bit_reverse.rs":"0c5fe0a6fbd01d36b30d53d0215208aee1e6bdf6e02f66ffe149ca3c0c67071f","src/bitstream.rs":"e8c15c517d125a45b40a4f2b06a39bcb54fa559796e40465678e5fdefeedcc01","src/chained_hash_table.rs":"8d45799a74f15cdca5b4cdb75e715f0ef218b894622132b3a5f5026de4e25db1","src/checksum.rs":"2785f88ff8c96d5cf6f6919daf1a66121da293f9d0a19ff227ae6bd40309f5db","src/compress.rs":"ce28956bc0484e977def2e4930a953c2c54932f7614dcd27ff16aa323b2c66af","src/compression_options.rs":"585183818883cf64c3c9021e48be834a07699dc05d544644ecfe837fb17f112b","src/deflate_state.rs":"302a5af101c60158dc99ac9f4e62f68928eb4de39ff484e85ed017217203346e","src/encoder_state.rs":"25db212b800bb54909763d63e727578ba077d0e0082104ded9bcb2205ad6d65e","src/huffman_lengths.rs":"ee04034f4a843022422f98f2fd4c3054447ac13e585dd83ffab2225a487b001c","src/huffman_table.rs":"378f61513d2a918aabd0ccc2e0dfd601a6b3cea1854f1690b90df83c4e2b32a1","src/input_buffer.rs":"0c92be26b993345d0c7181a0150e644543d2ec9302d13ad1fe6bb5f1186a7425","src/length_encode.rs":"e6e3b254a0ad5eecde183b67761fd45d3314f4965b96f830fe8229e91b26debc","src/lib.rs":"8d473e400ee6b452920eaef2bb7765d83cce313b6883a33583466a99f83f3c9e","src/lz77.rs":"ec77dbed67a7a81ac0cfd50aeb05fb3f9fc4604414c0748b602eee1c72e529ac","src/lzvalue.rs":"4dc4a24689c6d56d2efa8e3d67d5a100f538006b49a2e6387b37638978175954","src/matching.rs":"8ab38d8a89b286c5dd618bf067feea61c2464313ac7b70a99c9fe8bece4d9756","src/output_writer.rs":"925046352f57f6494443d60ffcb1abb23f0d0afe2d829e76f9d60ea69f290253","src/rle.rs":"8ee49e091ca975aa11412b1f24d56fc9ae92666b95e52bdc9adbb2ffa765a190","src/stored_block.rs":"7706ffb500a98414c9bced459903242560488fd1a5f8eeeab7ad0b6ce7e62654","src/test_utils.rs":"9a81b4678a766a5488fc75d46968086e27d211014039007ab130f59a972d18c2","src/writer.rs":"3bb7cc0322aa703aa8c346cef271757fda17ab809da2aa6440f22fd141f20427","src/zlib.rs":"13675ed7e8d4d355f93ee81e351fd0633a42b3299129c421b2d15b28c58abf3c"},"package":"050ef6de42a33903b30a7497b76b40d3d58691d4d3eec355348c122444a388f0"}
|
|
@ -0,0 +1,45 @@
|
|||
"""
|
||||
cargo-raze crate build file.
|
||||
|
||||
DO NOT EDIT! Replaced on runs of cargo-raze
|
||||
"""
|
||||
package(default_visibility = [
|
||||
# Public for visibility by "@raze__crate__version//" targets.
|
||||
#
|
||||
# Prefer access through "//third_party/cargo", which limits external
|
||||
# visibility to explicit Cargo.toml dependencies.
|
||||
"//visibility:public",
|
||||
])
|
||||
|
||||
licenses([
|
||||
"notice", # "MIT,Apache-2.0"
|
||||
])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_rust//rust:rust.bzl",
|
||||
"rust_library",
|
||||
"rust_binary",
|
||||
"rust_test",
|
||||
)
|
||||
|
||||
|
||||
# Unsupported target "bench" with type "bench" omitted
|
||||
|
||||
rust_library(
|
||||
name = "deflate",
|
||||
crate_root = "src/lib.rs",
|
||||
crate_type = "lib",
|
||||
edition = "2018",
|
||||
srcs = glob(["**/*.rs"]),
|
||||
deps = [
|
||||
"//third_party/cargo/vendor/adler32-1.0.4:adler32",
|
||||
"//third_party/cargo/vendor/byteorder-1.3.4:byteorder",
|
||||
],
|
||||
rustc_flags = [
|
||||
"--cap-lints=allow",
|
||||
],
|
||||
version = "0.8.3",
|
||||
crate_features = [
|
||||
],
|
||||
)
|
||||
|
|
@ -0,0 +1,46 @@
|
|||
# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
|
||||
#
|
||||
# When uploading crates to the registry Cargo will automatically
|
||||
# "normalize" Cargo.toml files for maximal compatibility
|
||||
# with all versions of Cargo and also rewrite `path` dependencies
|
||||
# to registry (e.g., crates.io) dependencies
|
||||
#
|
||||
# If you believe there's an error in this file please file an
|
||||
# issue against the rust-lang/cargo repository. If you're
|
||||
# editing this file be aware that the upstream Cargo.toml
|
||||
# will likely look very different (and much more reasonable)
|
||||
|
||||
[package]
|
||||
edition = "2018"
|
||||
name = "deflate"
|
||||
version = "0.8.3"
|
||||
authors = ["oyvindln <oyvindln@users.noreply.github.com>"]
|
||||
exclude = ["tests/*", "fuzz/*"]
|
||||
description = "A DEFLATE, zlib and gzip encoder written in rust.\n"
|
||||
homepage = "https://github.com/image-rs/deflate-rs"
|
||||
documentation = "https://docs.rs/deflate/"
|
||||
readme = "README.md"
|
||||
keywords = ["flate", "deflate", "zlib", "compression", "gzip"]
|
||||
categories = ["compression"]
|
||||
license = "MIT/Apache-2.0"
|
||||
repository = "https://github.com/image-rs/deflate-rs"
|
||||
[package.metadata.docs.rs]
|
||||
features = ["gzip"]
|
||||
[dependencies.adler32]
|
||||
version = "1.0.4"
|
||||
|
||||
[dependencies.byteorder]
|
||||
version = "1"
|
||||
|
||||
[dependencies.gzip-header]
|
||||
version = "0.3"
|
||||
optional = true
|
||||
[dev-dependencies.miniz_oxide]
|
||||
version = "0.3.6"
|
||||
|
||||
[features]
|
||||
benchmarks = []
|
||||
gzip = ["gzip-header"]
|
||||
[badges.travis-ci]
|
||||
branch = "dev"
|
||||
repository = "image-rs/deflate-rs"
|
|
@ -0,0 +1,201 @@
|
|||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
|
@ -0,0 +1,21 @@
|
|||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2016
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
|
@ -0,0 +1,49 @@
|
|||
# deflate-rs
|
||||
|
||||
[![Build Status](https://travis-ci.org/image-rs/deflate-rs.svg)](https://travis-ci.org/image-rs/deflate-rs)[![Crates.io](https://img.shields.io/crates/v/deflate.svg)](https://crates.io/crates/deflate)[![Docs](https://docs.rs/deflate/badge.svg)](https://docs.rs/deflate)
|
||||
|
||||
|
||||
An implementation of a [DEFLATE](http://www.gzip.org/zlib/rfc-deflate.html) encoder in pure Rust. Not a direct port, but does take some inspiration from [zlib](http://www.zlib.net/), [miniz](https://github.com/richgel999/miniz) and [zopfli](https://github.com/google/zopfli). The API is based on the one in the [flate2](https://crates.io/crates/flate2) crate that contains bindings, zlib miniz_oxide, and miniz.
|
||||
|
||||
Deflate encoding with and without zlib and gzip metadata (zlib dictionaries are not supported) is supported. No unsafe code is used.
|
||||
|
||||
This library is now mostly in maintenance mode, focus being on the Rust backend of [flate2](https://crates.io/crates/flate2) instead.
|
||||
|
||||
# Usage:
|
||||
## Simple compression function:
|
||||
``` rust
|
||||
use deflate::deflate_bytes;
|
||||
|
||||
let data = b"Some data";
|
||||
let compressed = deflate_bytes(&data);
|
||||
```
|
||||
|
||||
## Using a writer:
|
||||
|
||||
``` rust
|
||||
use std::io::Write;
|
||||
|
||||
use deflate::Compression;
|
||||
use deflate::write::ZlibEncoder;
|
||||
|
||||
let data = b"This is some test data";
|
||||
let mut encoder = ZlibEncoder::new(Vec::new(), Compression::Default);
|
||||
encoder.write_all(data).unwrap();
|
||||
let compressed_data = encoder.finish().unwrap();
|
||||
```
|
||||
|
||||
# Other deflate/zlib Rust projects from various people
|
||||
* [flate2](http://alexcrichton.com/flate2-rs/flate2/index.html) FLATE, Gzip, and Zlib bindings for Rust - can use miniz_oxide for a pure Rust implementation.
|
||||
* [Zopfli in Rust](https://github.com/carols10cents/zopfli) Rust port of zopfli
|
||||
* [inflate](https://github.com/PistonDevelopers/inflate) DEFLATE decoder implemented in Rust
|
||||
* [miniz-oxide](https://github.com/Frommi/miniz_oxide) Port of miniz to Rust.
|
||||
* [libflate](https://github.com/sile/libflate) Another DEFLATE/Zlib/Gzip encoder and decoder written in Rust. (Only does some very light compression).
|
||||
|
||||
# License
|
||||
deflate is distributed under the terms of both the MIT and Apache 2.0 licences.
|
||||
|
||||
bitstream.rs is © @nwin and was released under both MIT and Apache 2.0
|
||||
|
||||
Some code in length_encode.rs has been ported from the `miniz` library, which is public domain.
|
||||
|
||||
The test data (src/pg11.txt) is borrowed from [Project Gutenberg](https://www.gutenberg.org/ebooks/11) and is available under public domain, or the Project Gutenberg Licence
|
|
@ -0,0 +1,107 @@
|
|||
#![feature(test)]
|
||||
|
||||
extern crate deflate;
|
||||
extern crate flate2;
|
||||
extern crate test;
|
||||
|
||||
use std::io;
|
||||
use std::io::Write;
|
||||
|
||||
use deflate::{deflate_bytes_zlib, deflate_bytes_zlib_conf, CompressionOptions};
|
||||
use flate2::write;
|
||||
use flate2::Compression;
|
||||
use test::Bencher;
|
||||
|
||||
fn load_from_file(name: &str) -> Vec<u8> {
|
||||
use std::fs::File;
|
||||
use std::io::Read;
|
||||
let mut input = Vec::new();
|
||||
let mut f = File::open(name).unwrap();
|
||||
|
||||
f.read_to_end(&mut input).unwrap();
|
||||
input
|
||||
}
|
||||
|
||||
fn get_test_data() -> Vec<u8> {
|
||||
use std::env;
|
||||
let path = env::var("TEST_FILE").unwrap_or_else(|_| "tests/pg11.txt".to_string());
|
||||
load_from_file(&path)
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn test_file_zlib_def(b: &mut Bencher) {
|
||||
let test_data = get_test_data();
|
||||
|
||||
b.iter(|| deflate_bytes_zlib(&test_data));
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn test_file_zlib_best(b: &mut Bencher) {
|
||||
let test_data = get_test_data();
|
||||
|
||||
b.iter(|| deflate_bytes_zlib_conf(&test_data, CompressionOptions::high()));
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn test_file_zlib_fast(b: &mut Bencher) {
|
||||
let test_data = get_test_data();
|
||||
|
||||
b.iter(|| deflate_bytes_zlib_conf(&test_data, CompressionOptions::fast()));
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn test_file_zlib_rle(b: &mut Bencher) {
|
||||
let test_data = get_test_data();
|
||||
|
||||
b.iter(|| deflate_bytes_zlib_conf(&test_data, CompressionOptions::rle()));
|
||||
}
|
||||
|
||||
fn deflate_bytes_flate2_zlib(level: Compression, input: &[u8]) -> Vec<u8> {
|
||||
use flate2::write::ZlibEncoder;
|
||||
|
||||
let mut e = ZlibEncoder::new(Vec::with_capacity(input.len() / 3), level);
|
||||
e.write_all(input).unwrap();
|
||||
e.finish().unwrap()
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn test_file_zlib_flate2_def(b: &mut Bencher) {
|
||||
let test_data = get_test_data();
|
||||
b.iter(|| deflate_bytes_flate2_zlib(Compression::default(), &test_data));
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn test_file_zlib_flate2_best(b: &mut Bencher) {
|
||||
let test_data = get_test_data();
|
||||
b.iter(|| deflate_bytes_flate2_zlib(Compression::best(), &test_data));
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn test_file_zlib_flate2_fast(b: &mut Bencher) {
|
||||
let test_data = get_test_data();
|
||||
b.iter(|| deflate_bytes_flate2_zlib(Compression::fast(), &test_data));
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone)]
|
||||
struct Dummy {}
|
||||
|
||||
impl Write for Dummy {
|
||||
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
|
||||
Ok(buf.len())
|
||||
}
|
||||
|
||||
fn flush(&mut self) -> io::Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn writer_create(b: &mut Bencher) {
|
||||
use deflate::write::DeflateEncoder;
|
||||
b.iter(|| DeflateEncoder::new(Dummy {}, CompressionOptions::fast()));
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn writer_create_flate2(b: &mut Bencher) {
|
||||
b.iter(|| write::DeflateEncoder::new(Dummy {}, Compression::fast()));
|
||||
}
|
|
@ -0,0 +1,27 @@
|
|||
/// Reverse the first length bits of n.
|
||||
/// (Passing more than 16 as length will produce garbage.
|
||||
pub fn reverse_bits(mut n: u16, length: u8) -> u16 {
|
||||
debug_assert!(length <= 16);
|
||||
// Borrowed from http://aggregate.org/MAGIC/#Bit%20Reversal
|
||||
n = ((n & 0xaaaa) >> 1) | ((n & 0x5555) << 1);
|
||||
n = ((n & 0xcccc) >> 2) | ((n & 0x3333) << 2);
|
||||
n = ((n & 0xf0f0) >> 4) | ((n & 0x0f0f) << 4);
|
||||
n = ((n & 0xff00) >> 8) | ((n & 0x00ff) << 8);
|
||||
n >> (16 - length)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::reverse_bits;
|
||||
#[test]
|
||||
fn test_bit_reverse() {
|
||||
assert_eq!(reverse_bits(0b0111_0100, 8), 0b0010_1110);
|
||||
assert_eq!(
|
||||
reverse_bits(0b1100_1100_1100_1100, 16),
|
||||
0b0011_0011_0011_0011
|
||||
);
|
||||
// Check that we ignore >16 length
|
||||
// We no longer check for this.
|
||||
// assert_eq!(reverse_bits(0b11001100_11001100, 32), 0b0011001100110011);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,229 @@
|
|||
// This was originally based on code from: https://github.com/nwin/lzw
|
||||
// Copyright (c) 2015 nwin
|
||||
// which is under both Apache 2.0 and MIT
|
||||
|
||||
//! This module provides a bit writer
|
||||
use std::io::{self, Write};
|
||||
|
||||
#[cfg(target_pointer_width = "64")]
|
||||
#[macro_use]
|
||||
mod arch_dep {
|
||||
/// The data type of the accumulator.
|
||||
/// a 64-bit value allows us to store more before
|
||||
/// each push to the vector, but is sub-optimal
|
||||
/// on 32-bit platforms.
|
||||
pub type AccType = u64;
|
||||
pub const FLUSH_AT: u8 = 48;
|
||||
/// Push pending bits to vector.
|
||||
/// Using a macro here since an inline function.
|
||||
/// didn't optimise properly.
|
||||
/// TODO June 2019: See if it's still needed.
|
||||
macro_rules! push {
|
||||
($s:ident) => {
|
||||
$s.w.extend_from_slice(
|
||||
&[
|
||||
$s.acc as u8,
|
||||
($s.acc >> 8) as u8,
|
||||
($s.acc >> 16) as u8,
|
||||
($s.acc >> 24) as u8,
|
||||
($s.acc >> 32) as u8,
|
||||
($s.acc >> 40) as u8,
|
||||
][..],
|
||||
)
|
||||
};
|
||||
}
|
||||
}
|
||||
#[cfg(not(target_pointer_width = "64"))]
|
||||
#[macro_use]
|
||||
mod arch_dep {
|
||||
pub type AccType = u32;
|
||||
pub const FLUSH_AT: u8 = 16;
|
||||
macro_rules! push {
|
||||
($s:ident) => {
|
||||
// Unlike the 64-bit case, using copy_from_slice seemed to worsen performance here.
|
||||
// TODO: Needs benching on a 32-bit system to see what works best.
|
||||
$s.w.push($s.acc as u8);
|
||||
$s.w.push(($s.acc >> 8) as u8);
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
use self::arch_dep::*;
|
||||
|
||||
/// Writes bits to a byte stream, LSB first.
|
||||
pub struct LsbWriter {
|
||||
// Public for now so it can be replaced after initialization.
|
||||
pub w: Vec<u8>,
|
||||
bits: u8,
|
||||
acc: AccType,
|
||||
}
|
||||
|
||||
impl LsbWriter {
|
||||
/// Creates a new bit reader
|
||||
pub fn new(writer: Vec<u8>) -> LsbWriter {
|
||||
LsbWriter {
|
||||
w: writer,
|
||||
bits: 0,
|
||||
acc: 0,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn pending_bits(&self) -> u8 {
|
||||
self.bits
|
||||
}
|
||||
|
||||
/// Buffer n number of bits, and write them to the vec if there are enough pending bits.
|
||||
pub fn write_bits(&mut self, v: u16, n: u8) {
|
||||
// NOTE: This outputs garbage data if n is 0, but v is not 0
|
||||
self.acc |= (AccType::from(v)) << self.bits;
|
||||
self.bits += n;
|
||||
// Waiting until we have FLUSH_AT bits and pushing them all in one batch.
|
||||
while self.bits >= FLUSH_AT {
|
||||
push!(self);
|
||||
self.acc >>= FLUSH_AT;
|
||||
self.bits -= FLUSH_AT;
|
||||
}
|
||||
}
|
||||
|
||||
fn write_bits_finish(&mut self, v: u16, n: u8) {
|
||||
// NOTE: This outputs garbage data if n is 0, but v is not 0
|
||||
self.acc |= (AccType::from(v)) << self.bits;
|
||||
self.bits += n % 8;
|
||||
while self.bits >= 8 {
|
||||
self.w.push(self.acc as u8);
|
||||
self.acc >>= 8;
|
||||
self.bits -= 8;
|
||||
}
|
||||
}
|
||||
|
||||
pub fn flush_raw(&mut self) {
|
||||
let missing = FLUSH_AT - self.bits;
|
||||
// Have to test for self.bits > 0 here,
|
||||
// otherwise flush would output an extra byte when flush was called at a byte boundary
|
||||
if missing > 0 && self.bits > 0 {
|
||||
self.write_bits_finish(0, missing);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Write for LsbWriter {
|
||||
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
|
||||
if self.acc == 0 {
|
||||
self.w.extend_from_slice(buf)
|
||||
} else {
|
||||
for &byte in buf.iter() {
|
||||
self.write_bits(u16::from(byte), 8)
|
||||
}
|
||||
}
|
||||
Ok(buf.len())
|
||||
}
|
||||
|
||||
fn flush(&mut self) -> io::Result<()> {
|
||||
self.flush_raw();
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::LsbWriter;
|
||||
|
||||
#[test]
|
||||
fn write_bits() {
|
||||
let input = [
|
||||
(3, 3),
|
||||
(10, 8),
|
||||
(88, 7),
|
||||
(0, 2),
|
||||
(0, 5),
|
||||
(0, 0),
|
||||
(238, 8),
|
||||
(126, 8),
|
||||
(161, 8),
|
||||
(10, 8),
|
||||
(238, 8),
|
||||
(174, 8),
|
||||
(126, 8),
|
||||
(174, 8),
|
||||
(65, 8),
|
||||
(142, 8),
|
||||
(62, 8),
|
||||
(10, 8),
|
||||
(1, 8),
|
||||
(161, 8),
|
||||
(78, 8),
|
||||
(62, 8),
|
||||
(158, 8),
|
||||
(206, 8),
|
||||
(10, 8),
|
||||
(64, 7),
|
||||
(0, 0),
|
||||
(24, 5),
|
||||
(0, 0),
|
||||
(174, 8),
|
||||
(126, 8),
|
||||
(193, 8),
|
||||
(174, 8),
|
||||
];
|
||||
let expected = [
|
||||
83, 192, 2, 220, 253, 66, 21, 220, 93, 253, 92, 131, 28, 125, 20, 2, 66, 157, 124, 60,
|
||||
157, 21, 128, 216, 213, 47, 216, 21,
|
||||
];
|
||||
let mut writer = LsbWriter::new(Vec::new());
|
||||
for v in input.iter() {
|
||||
writer.write_bits(v.0, v.1);
|
||||
}
|
||||
writer.flush_raw();
|
||||
assert_eq!(writer.w, expected);
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(all(test, feature = "benchmarks"))]
|
||||
mod bench {
|
||||
use super::LsbWriter;
|
||||
use test_std::Bencher;
|
||||
#[bench]
|
||||
fn bit_writer(b: &mut Bencher) {
|
||||
let input = [
|
||||
(3, 3),
|
||||
(10, 8),
|
||||
(88, 7),
|
||||
(0, 2),
|
||||
(0, 5),
|
||||
(0, 0),
|
||||
(238, 8),
|
||||
(126, 8),
|
||||
(161, 8),
|
||||
(10, 8),
|
||||
(238, 8),
|
||||
(174, 8),
|
||||
(126, 8),
|
||||
(174, 8),
|
||||
(65, 8),
|
||||
(142, 8),
|
||||
(62, 8),
|
||||
(10, 8),
|
||||
(1, 8),
|
||||
(161, 8),
|
||||
(78, 8),
|
||||
(62, 8),
|
||||
(158, 8),
|
||||
(206, 8),
|
||||
(10, 8),
|
||||
(64, 7),
|
||||
(0, 0),
|
||||
(24, 5),
|
||||
(0, 0),
|
||||
(174, 8),
|
||||
(126, 8),
|
||||
(193, 8),
|
||||
(174, 8),
|
||||
];
|
||||
let mut writer = LsbWriter::new(Vec::with_capacity(100));
|
||||
b.iter(|| {
|
||||
for v in input.iter() {
|
||||
let _ = writer.write_bits(v.0, v.1);
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
|
@ -0,0 +1,351 @@
|
|||
pub const WINDOW_SIZE: usize = 32768;
|
||||
pub const WINDOW_MASK: usize = WINDOW_SIZE - 1;
|
||||
#[cfg(test)]
|
||||
pub const HASH_BYTES: usize = 3;
|
||||
const HASH_SHIFT: u16 = 5;
|
||||
const HASH_MASK: u16 = WINDOW_MASK as u16;
|
||||
|
||||
/// Helper struct to let us allocate both head and prev in the same block.
|
||||
struct Tables {
|
||||
/// Starts of hash chains (in prev)
|
||||
pub head: [u16; WINDOW_SIZE],
|
||||
/// Link to previous occurence of this hash value
|
||||
pub prev: [u16; WINDOW_SIZE],
|
||||
}
|
||||
|
||||
impl Default for Tables {
|
||||
#[inline]
|
||||
fn default() -> Tables {
|
||||
Tables {
|
||||
head: [0; WINDOW_SIZE],
|
||||
prev: [0; WINDOW_SIZE],
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Tables {
|
||||
#[inline]
|
||||
fn fill_prev(&mut self) {
|
||||
self.prev.copy_from_slice(&self.head);
|
||||
}
|
||||
}
|
||||
|
||||
/// Create and box the hash chains.
|
||||
fn create_tables() -> Box<Tables> {
|
||||
// Using default here is a trick to get around the lack of box syntax on stable rust.
|
||||
//
|
||||
// Box::new([0u16,n]) ends up creating an temporary array on the stack which is not optimised
|
||||
// but using default, which simply calls `box value` internally allows us to get around this.
|
||||
//
|
||||
// We could use vec instead, but using a boxed array helps the compiler optimise
|
||||
// away bounds checks as `n & WINDOW_MASK < WINDOW_SIZE` will always be true.
|
||||
let mut t: Box<Tables> = Box::default();
|
||||
|
||||
for (n, b) in t.head.iter_mut().enumerate() {
|
||||
*b = n as u16;
|
||||
}
|
||||
|
||||
t.fill_prev();
|
||||
|
||||
t
|
||||
}
|
||||
|
||||
/// Returns a new hash value based on the previous value and the next byte
|
||||
#[inline]
|
||||
pub fn update_hash(current_hash: u16, to_insert: u8) -> u16 {
|
||||
update_hash_conf(current_hash, to_insert, HASH_SHIFT, HASH_MASK)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn update_hash_conf(current_hash: u16, to_insert: u8, shift: u16, mask: u16) -> u16 {
|
||||
((current_hash << shift) ^ (u16::from(to_insert))) & mask
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn reset_array(arr: &mut [u16; WINDOW_SIZE]) {
|
||||
for (n, b) in arr.iter_mut().enumerate() {
|
||||
*b = n as u16;
|
||||
}
|
||||
}
|
||||
|
||||
pub struct ChainedHashTable {
|
||||
// Current running hash value of the last 3 bytes
|
||||
current_hash: u16,
|
||||
// Hash chains.
|
||||
c: Box<Tables>,
|
||||
// Used for testing
|
||||
// count: DebugCounter,
|
||||
}
|
||||
|
||||
impl ChainedHashTable {
|
||||
pub fn new() -> ChainedHashTable {
|
||||
ChainedHashTable {
|
||||
current_hash: 0,
|
||||
c: create_tables(),
|
||||
//count: DebugCounter::default(),
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub fn from_starting_values(v1: u8, v2: u8) -> ChainedHashTable {
|
||||
let mut t = ChainedHashTable::new();
|
||||
t.current_hash = update_hash(t.current_hash, v1);
|
||||
t.current_hash = update_hash(t.current_hash, v2);
|
||||
t
|
||||
}
|
||||
|
||||
/// Resets the hash value and hash chains
|
||||
pub fn reset(&mut self) {
|
||||
self.current_hash = 0;
|
||||
reset_array(&mut self.c.head);
|
||||
{
|
||||
let h = self.c.head;
|
||||
let mut c = self.c.prev;
|
||||
c[..].copy_from_slice(&h[..]);
|
||||
}
|
||||
/*if cfg!(debug_assertions) {
|
||||
self.count.reset();
|
||||
}*/
|
||||
}
|
||||
|
||||
pub fn add_initial_hash_values(&mut self, v1: u8, v2: u8) {
|
||||
self.current_hash = update_hash(self.current_hash, v1);
|
||||
self.current_hash = update_hash(self.current_hash, v2);
|
||||
}
|
||||
|
||||
/// Insert a byte into the hash table
|
||||
#[inline]
|
||||
pub fn add_hash_value(&mut self, position: usize, value: u8) {
|
||||
// Check that all bytes are input in order and at the correct positions.
|
||||
// Disabled for now as it breaks when sync flushing.
|
||||
/*debug_assert_eq!(
|
||||
position & WINDOW_MASK,
|
||||
self.count.get() as usize & WINDOW_MASK
|
||||
);*/
|
||||
debug_assert!(
|
||||
position < WINDOW_SIZE * 2,
|
||||
"Position is larger than 2 * window size! {}",
|
||||
position
|
||||
);
|
||||
// Storing the hash in a temporary variable here makes the compiler avoid the
|
||||
// bounds checks in this function.
|
||||
let new_hash = update_hash(self.current_hash, value);
|
||||
|
||||
self.add_with_hash(position, new_hash);
|
||||
|
||||
// Update the stored hash value with the new hash.
|
||||
self.current_hash = new_hash;
|
||||
}
|
||||
|
||||
/// Directly set the current hash value
|
||||
#[inline]
|
||||
pub fn set_hash(&mut self, hash: u16) {
|
||||
self.current_hash = hash;
|
||||
}
|
||||
|
||||
/// Update the tables directly, providing the hash.
|
||||
#[inline]
|
||||
pub fn add_with_hash(&mut self, position: usize, hash: u16) {
|
||||
/*if cfg!(debug_assertions) {
|
||||
self.count.add(1);
|
||||
}*/
|
||||
|
||||
self.c.prev[position & WINDOW_MASK] = self.c.head[hash as usize];
|
||||
|
||||
// Ignoring any bits over 16 here is deliberate, as we only concern ourselves about
|
||||
// where in the buffer (which is 64k bytes) we are referring to.
|
||||
self.c.head[hash as usize] = position as u16;
|
||||
}
|
||||
|
||||
// Get the head of the hash chain for the current hash value
|
||||
#[cfg(test)]
|
||||
#[inline]
|
||||
pub fn current_head(&self) -> u16 {
|
||||
self.c.head[self.current_hash as usize]
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn current_hash(&self) -> u16 {
|
||||
self.current_hash
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn get_prev(&self, bytes: usize) -> u16 {
|
||||
self.c.prev[bytes & WINDOW_MASK]
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
#[inline]
|
||||
pub fn farthest_next(&self, match_pos: usize, match_len: usize) -> usize {
|
||||
let to_check = match_len.saturating_sub(2);
|
||||
|
||||
let mut n = 0;
|
||||
let mut smallest_prev = self.get_prev(match_pos);
|
||||
let mut smallest_pos = 0;
|
||||
while n < to_check {
|
||||
let prev = self.get_prev(match_pos + n);
|
||||
if prev < smallest_prev {
|
||||
smallest_prev = prev;
|
||||
smallest_pos = n;
|
||||
}
|
||||
n += 1;
|
||||
}
|
||||
smallest_pos
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn slide_value(b: u16, pos: u16, bytes: u16) -> u16 {
|
||||
if b >= bytes {
|
||||
b - bytes
|
||||
} else {
|
||||
pos
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn slide_table(table: &mut [u16; WINDOW_SIZE], bytes: u16) {
|
||||
for (n, b) in table.iter_mut().enumerate() {
|
||||
*b = ChainedHashTable::slide_value(*b, n as u16, bytes);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn slide(&mut self, bytes: usize) {
|
||||
/*if cfg!(debug_assertions) && bytes != WINDOW_SIZE {
|
||||
// This should only happen in tests in this file.
|
||||
self.count.reset();
|
||||
}*/
|
||||
ChainedHashTable::slide_table(&mut self.c.head, bytes as u16);
|
||||
ChainedHashTable::slide_table(&mut self.c.prev, bytes as u16);
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub fn filled_hash_table(data: &[u8]) -> ChainedHashTable {
|
||||
assert!(data.len() <= (WINDOW_SIZE * 2) + 2);
|
||||
let mut hash_table = ChainedHashTable::from_starting_values(data[0], data[1]);
|
||||
for (n, b) in data[2..].iter().enumerate() {
|
||||
hash_table.add_hash_value(n, *b);
|
||||
}
|
||||
hash_table
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::{filled_hash_table, ChainedHashTable};
|
||||
|
||||
#[test]
|
||||
fn chained_hash() {
|
||||
use std::str;
|
||||
|
||||
let test_string = "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do \
|
||||
eiusmod tempor. rum. incididunt ut labore et dolore magna aliqua. Ut \
|
||||
enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi \
|
||||
ut aliquip ex ea commodo consequat. rum. Duis aute irure dolor in \
|
||||
reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla \
|
||||
pariatur. Excepteur sint occaecat cupidatat non proident, sunt in \
|
||||
culpa qui officia deserunt mollit anim id est laborum.";
|
||||
|
||||
let test_data = test_string.as_bytes();
|
||||
|
||||
let current_bytes = &test_data[test_data.len() - super::HASH_BYTES..test_data.len()];
|
||||
|
||||
let num_iters = test_string
|
||||
.matches(str::from_utf8(current_bytes).unwrap())
|
||||
.count();
|
||||
|
||||
let hash_table = filled_hash_table(test_data);
|
||||
|
||||
// Test that the positions in the chain are valid
|
||||
let mut prev_value = hash_table.get_prev(hash_table.current_head() as usize) as usize;
|
||||
let mut count = 0;
|
||||
let mut current = hash_table.current_head() as usize;
|
||||
while current != prev_value {
|
||||
count += 1;
|
||||
current = prev_value;
|
||||
prev_value = hash_table.get_prev(prev_value) as usize;
|
||||
}
|
||||
// There should be at least as many occurences of the hash of the checked bytes as the
|
||||
// numbers of occurences of the checked bytes themselves. As the hashes are not large enough
|
||||
// to store 8 * 3 = 24 bits, there could be more with different input data.
|
||||
assert!(count >= num_iters);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn table_unique() {
|
||||
let mut test_data = Vec::new();
|
||||
test_data.extend(0u8..255);
|
||||
test_data.extend(255u8..0);
|
||||
let hash_table = filled_hash_table(&test_data);
|
||||
let prev_pos = hash_table.get_prev(hash_table.current_head() as usize);
|
||||
// Since all sequences in the input are unique, there shouldn't be any previous values.
|
||||
assert_eq!(prev_pos, hash_table.current_hash());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn table_slide() {
|
||||
use std::fs::File;
|
||||
use std::io::Read;
|
||||
|
||||
let window_size = super::WINDOW_SIZE;
|
||||
let window_size16 = super::WINDOW_SIZE as u16;
|
||||
|
||||
let mut input = Vec::new();
|
||||
|
||||
let mut f = File::open("tests/pg11.txt").unwrap();
|
||||
|
||||
f.read_to_end(&mut input).unwrap();
|
||||
|
||||
let mut hash_table = filled_hash_table(&input[..window_size + 2]);
|
||||
|
||||
for (n, b) in input[2..window_size + 2].iter().enumerate() {
|
||||
hash_table.add_hash_value(n + window_size, *b);
|
||||
}
|
||||
|
||||
hash_table.slide(window_size);
|
||||
|
||||
{
|
||||
let max_head = hash_table.c.head.iter().max().unwrap();
|
||||
// After sliding there should be no hashes referring to values
|
||||
// higher than the window size
|
||||
assert!(*max_head < window_size16);
|
||||
assert!(*max_head > 0);
|
||||
let pos = hash_table.get_prev(hash_table.current_head() as usize);
|
||||
// There should be a previous occurence since we inserted the data 3 times
|
||||
assert!(pos < window_size16);
|
||||
assert!(pos > 0);
|
||||
}
|
||||
|
||||
for (n, b) in input[2..(window_size / 2)].iter().enumerate() {
|
||||
hash_table.add_hash_value(n + window_size, *b);
|
||||
}
|
||||
|
||||
// There should hashes referring to values in the upper part of the input window
|
||||
// at this point
|
||||
let max_prev = hash_table.c.prev.iter().max().unwrap();
|
||||
assert!(*max_prev > window_size16);
|
||||
|
||||
let mut pos = hash_table.current_head();
|
||||
// There should be a previous occurence since we inserted the data 3 times
|
||||
assert!(pos > window_size16);
|
||||
let end_byte = input[(window_size / 2) - 1 - 2];
|
||||
let mut iterations = 0;
|
||||
while pos > window_size16 && iterations < 5000 {
|
||||
assert_eq!(input[pos as usize & window_size - 1], end_byte);
|
||||
|
||||
pos = hash_table.get_prev(pos as usize);
|
||||
iterations += 1;
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
/// Ensure that the initial hash values are correct.
|
||||
fn initial_chains() {
|
||||
let t = ChainedHashTable::new();
|
||||
for (n, &b) in t.c.head.iter().enumerate() {
|
||||
assert_eq!(n, b as usize);
|
||||
}
|
||||
for (n, &b) in t.c.prev.iter().enumerate() {
|
||||
assert_eq!(n, b as usize);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,71 @@
|
|||
use adler32::RollingAdler32;
|
||||
|
||||
pub trait RollingChecksum {
|
||||
fn update(&mut self, byte: u8);
|
||||
fn update_from_slice(&mut self, data: &[u8]);
|
||||
fn current_hash(&self) -> u32;
|
||||
}
|
||||
|
||||
pub struct NoChecksum {}
|
||||
|
||||
impl NoChecksum {
|
||||
pub fn new() -> NoChecksum {
|
||||
NoChecksum {}
|
||||
}
|
||||
}
|
||||
|
||||
impl RollingChecksum for NoChecksum {
|
||||
fn update(&mut self, _: u8) {}
|
||||
fn update_from_slice(&mut self, _: &[u8]) {}
|
||||
fn current_hash(&self) -> u32 {
|
||||
1
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> RollingChecksum for &'a mut NoChecksum {
|
||||
fn update(&mut self, _: u8) {}
|
||||
fn update_from_slice(&mut self, _: &[u8]) {}
|
||||
fn current_hash(&self) -> u32 {
|
||||
1
|
||||
}
|
||||
}
|
||||
|
||||
pub struct Adler32Checksum {
|
||||
adler32: RollingAdler32,
|
||||
}
|
||||
|
||||
impl Adler32Checksum {
|
||||
pub fn new() -> Adler32Checksum {
|
||||
Adler32Checksum {
|
||||
adler32: RollingAdler32::new(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl RollingChecksum for Adler32Checksum {
|
||||
fn update(&mut self, byte: u8) {
|
||||
self.adler32.update(byte);
|
||||
}
|
||||
|
||||
fn update_from_slice(&mut self, data: &[u8]) {
|
||||
self.adler32.update_buffer(data);
|
||||
}
|
||||
|
||||
fn current_hash(&self) -> u32 {
|
||||
self.adler32.hash()
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> RollingChecksum for &'a mut Adler32Checksum {
|
||||
fn update(&mut self, byte: u8) {
|
||||
self.adler32.update(byte);
|
||||
}
|
||||
|
||||
fn update_from_slice(&mut self, data: &[u8]) {
|
||||
self.adler32.update_buffer(data);
|
||||
}
|
||||
|
||||
fn current_hash(&self) -> u32 {
|
||||
self.adler32.hash()
|
||||
}
|
||||
}
|
|
@ -0,0 +1,353 @@
|
|||
use std::io;
|
||||
use std::io::Write;
|
||||
|
||||
use crate::bitstream::LsbWriter;
|
||||
use crate::deflate_state::DeflateState;
|
||||
use crate::encoder_state::EncoderState;
|
||||
use crate::huffman_lengths::{gen_huffman_lengths, write_huffman_lengths, BlockType};
|
||||
use crate::lz77::{lz77_compress_block, LZ77Status};
|
||||
use crate::lzvalue::LZValue;
|
||||
use crate::stored_block::{compress_block_stored, write_stored_header, MAX_STORED_BLOCK_LENGTH};
|
||||
|
||||
const LARGEST_OUTPUT_BUF_SIZE: usize = 1024 * 32;
|
||||
|
||||
/// Flush mode to use when compressing input received in multiple steps.
|
||||
///
|
||||
/// (The more obscure ZLIB flush modes are not implemented.)
|
||||
#[derive(Eq, PartialEq, Debug, Copy, Clone)]
|
||||
pub enum Flush {
|
||||
// Simply wait for more input when we are out of input data to process.
|
||||
None,
|
||||
// Send a "sync block", corresponding to Z_SYNC_FLUSH in zlib. This finishes compressing and
|
||||
// outputting all pending data, and then outputs an empty stored block.
|
||||
// (That is, the block header indicating a stored block followed by `0000FFFF`).
|
||||
Sync,
|
||||
_Partial,
|
||||
_Block,
|
||||
_Full,
|
||||
// Finish compressing and output all remaining input.
|
||||
Finish,
|
||||
}
|
||||
|
||||
/// Write all the lz77 encoded data in the buffer using the specified `EncoderState`, and finish
|
||||
/// with the end of block code.
|
||||
pub fn flush_to_bitstream(buffer: &[LZValue], state: &mut EncoderState) {
|
||||
for &b in buffer {
|
||||
state.write_lzvalue(b.value());
|
||||
}
|
||||
state.write_end_of_block()
|
||||
}
|
||||
|
||||
/// Compress the input data using only fixed huffman codes.
|
||||
///
|
||||
/// Currently only used in tests.
|
||||
#[cfg(test)]
|
||||
pub fn compress_data_fixed(input: &[u8]) -> Vec<u8> {
|
||||
use crate::lz77::lz77_compress;
|
||||
|
||||
let mut state = EncoderState::fixed(Vec::new());
|
||||
let compressed = lz77_compress(input).unwrap();
|
||||
|
||||
// We currently don't split blocks here(this function is just used for tests anyhow)
|
||||
state.write_start_of_block(true, true);
|
||||
flush_to_bitstream(&compressed, &mut state);
|
||||
|
||||
state.flush();
|
||||
state.reset(Vec::new())
|
||||
}
|
||||
|
||||
fn write_stored_block(input: &[u8], mut writer: &mut LsbWriter, final_block: bool) {
|
||||
// If the input is not zero, we write stored blocks for the input data.
|
||||
if !input.is_empty() {
|
||||
let mut i = input.chunks(MAX_STORED_BLOCK_LENGTH).peekable();
|
||||
|
||||
while let Some(chunk) = i.next() {
|
||||
let last_chunk = i.peek().is_none();
|
||||
// Write the block header
|
||||
write_stored_header(writer, final_block && last_chunk);
|
||||
|
||||
// Write the actual data.
|
||||
compress_block_stored(chunk, &mut writer).expect("Write error");
|
||||
}
|
||||
} else {
|
||||
// If the input length is zero, we output an empty block. This is used for syncing.
|
||||
write_stored_header(writer, final_block);
|
||||
compress_block_stored(&[], &mut writer).expect("Write error");
|
||||
}
|
||||
}
|
||||
|
||||
/// Inner compression function used by both the writers and the simple compression functions.
|
||||
pub fn compress_data_dynamic_n<W: Write>(
|
||||
input: &[u8],
|
||||
deflate_state: &mut DeflateState<W>,
|
||||
flush: Flush,
|
||||
) -> io::Result<usize> {
|
||||
let mut bytes_written = 0;
|
||||
|
||||
let mut slice = input;
|
||||
|
||||
loop {
|
||||
let output_buf_len = deflate_state.output_buf().len();
|
||||
let output_buf_pos = deflate_state.output_buf_pos;
|
||||
// If the output buffer has too much data in it already, flush it before doing anything
|
||||
// else.
|
||||
if output_buf_len > LARGEST_OUTPUT_BUF_SIZE {
|
||||
let written = deflate_state
|
||||
.inner
|
||||
.as_mut()
|
||||
.expect("Missing writer!")
|
||||
.write(&deflate_state.encoder_state.inner_vec()[output_buf_pos..])?;
|
||||
|
||||
if written < output_buf_len.checked_sub(output_buf_pos).unwrap() {
|
||||
// Only some of the data was flushed, so keep track of where we were.
|
||||
deflate_state.output_buf_pos += written;
|
||||
} else {
|
||||
// If we flushed all of the output, reset the output buffer.
|
||||
deflate_state.output_buf_pos = 0;
|
||||
deflate_state.output_buf().clear();
|
||||
}
|
||||
|
||||
if bytes_written == 0 {
|
||||
// If the buffer was already full when the function was called, this has to be
|
||||
// returned rather than Ok(0) to indicate that we didn't write anything, but are
|
||||
// not done yet.
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::Interrupted,
|
||||
"Internal buffer full.",
|
||||
));
|
||||
} else {
|
||||
return Ok(bytes_written);
|
||||
}
|
||||
}
|
||||
|
||||
if deflate_state.lz77_state.is_last_block() {
|
||||
// The last block has already been written, so we don't ave anything to compress.
|
||||
break;
|
||||
}
|
||||
|
||||
let (written, status, position) = lz77_compress_block(
|
||||
slice,
|
||||
&mut deflate_state.lz77_state,
|
||||
&mut deflate_state.input_buffer,
|
||||
&mut deflate_state.lz77_writer,
|
||||
flush,
|
||||
);
|
||||
|
||||
// Bytes written in this call
|
||||
bytes_written += written;
|
||||
// Total bytes written since the compression process started
|
||||
// TODO: Should we realistically have to worry about overflowing here?
|
||||
deflate_state.bytes_written += written as u64;
|
||||
|
||||
if status == LZ77Status::NeedInput {
|
||||
// If we've consumed all the data input so far, and we're not
|
||||
// finishing or syncing or ending the block here, simply return
|
||||
// the number of bytes consumed so far.
|
||||
return Ok(bytes_written);
|
||||
}
|
||||
|
||||
// Increment start of input data
|
||||
slice = &slice[written..];
|
||||
|
||||
// We need to check if this is the last block as the header will then be
|
||||
// slightly different to indicate this.
|
||||
let last_block = deflate_state.lz77_state.is_last_block();
|
||||
|
||||
let current_block_input_bytes = deflate_state.lz77_state.current_block_input_bytes();
|
||||
|
||||
if cfg!(debug_assertions) {
|
||||
deflate_state
|
||||
.bytes_written_control
|
||||
.add(current_block_input_bytes);
|
||||
}
|
||||
|
||||
let partial_bits = deflate_state.encoder_state.writer.pending_bits();
|
||||
|
||||
let res = {
|
||||
let (l_freqs, d_freqs) = deflate_state.lz77_writer.get_frequencies();
|
||||
let (l_lengths, d_lengths) =
|
||||
deflate_state.encoder_state.huffman_table.get_lengths_mut();
|
||||
|
||||
gen_huffman_lengths(
|
||||
l_freqs,
|
||||
d_freqs,
|
||||
current_block_input_bytes,
|
||||
partial_bits,
|
||||
l_lengths,
|
||||
d_lengths,
|
||||
&mut deflate_state.length_buffers,
|
||||
)
|
||||
};
|
||||
|
||||
// Check if we've actually managed to compress the input, and output stored blocks
|
||||
// if not.
|
||||
match res {
|
||||
BlockType::Dynamic(header) => {
|
||||
// Write the block header.
|
||||
deflate_state
|
||||
.encoder_state
|
||||
.write_start_of_block(false, last_block);
|
||||
|
||||
// Output the lengths of the huffman codes used in this block.
|
||||
write_huffman_lengths(
|
||||
&header,
|
||||
&deflate_state.encoder_state.huffman_table,
|
||||
&deflate_state.length_buffers.length_buf,
|
||||
&mut deflate_state.encoder_state.writer,
|
||||
);
|
||||
|
||||
// Uupdate the huffman codes that will be used to encode the
|
||||
// lz77-compressed data.
|
||||
deflate_state
|
||||
.encoder_state
|
||||
.huffman_table
|
||||
.update_from_lengths();
|
||||
|
||||
// Write the huffman compressed data and the end of block marker.
|
||||
flush_to_bitstream(
|
||||
deflate_state.lz77_writer.get_buffer(),
|
||||
&mut deflate_state.encoder_state,
|
||||
);
|
||||
}
|
||||
BlockType::Fixed => {
|
||||
// Write the block header for fixed code blocks.
|
||||
deflate_state
|
||||
.encoder_state
|
||||
.write_start_of_block(true, last_block);
|
||||
|
||||
// Use the pre-defined static huffman codes.
|
||||
deflate_state.encoder_state.set_huffman_to_fixed();
|
||||
|
||||
// Write the compressed data and the end of block marker.
|
||||
flush_to_bitstream(
|
||||
deflate_state.lz77_writer.get_buffer(),
|
||||
&mut deflate_state.encoder_state,
|
||||
);
|
||||
}
|
||||
BlockType::Stored => {
|
||||
// If compression fails, output a stored block instead.
|
||||
|
||||
let start_pos = position.saturating_sub(current_block_input_bytes as usize);
|
||||
|
||||
assert!(
|
||||
position >= current_block_input_bytes as usize,
|
||||
"Error! Trying to output a stored block with forgotten data!\
|
||||
if you encounter this error, please file an issue!"
|
||||
);
|
||||
|
||||
write_stored_block(
|
||||
&deflate_state.input_buffer.get_buffer()[start_pos..position],
|
||||
&mut deflate_state.encoder_state.writer,
|
||||
flush == Flush::Finish && last_block,
|
||||
);
|
||||
}
|
||||
};
|
||||
|
||||
// Clear the current lz77 data in the writer for the next call.
|
||||
deflate_state.lz77_writer.clear();
|
||||
// We are done with the block, so we reset the number of bytes taken
|
||||
// for the next one.
|
||||
deflate_state.lz77_state.reset_input_bytes();
|
||||
|
||||
// We are done for now.
|
||||
if status == LZ77Status::Finished {
|
||||
// This flush mode means that there should be an empty stored block at the end.
|
||||
if flush == Flush::Sync {
|
||||
write_stored_block(&[], &mut deflate_state.encoder_state.writer, false);
|
||||
} else if !deflate_state.lz77_state.is_last_block() {
|
||||
// Make sure a block with the last block header has been output.
|
||||
// Not sure this can actually happen, but we make sure to finish properly
|
||||
// if it somehow does.
|
||||
// An empty fixed block is the shortest.
|
||||
let es = &mut deflate_state.encoder_state;
|
||||
es.set_huffman_to_fixed();
|
||||
es.write_start_of_block(true, true);
|
||||
es.write_end_of_block();
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// If we reach this point, the remaining data in the buffers is to be flushed.
|
||||
deflate_state.encoder_state.flush();
|
||||
// Make sure we've output everything, and return the number of bytes written if everything
|
||||
// went well.
|
||||
let output_buf_pos = deflate_state.output_buf_pos;
|
||||
let written_to_writer = deflate_state
|
||||
.inner
|
||||
.as_mut()
|
||||
.expect("Missing writer!")
|
||||
.write(&deflate_state.encoder_state.inner_vec()[output_buf_pos..])?;
|
||||
if written_to_writer
|
||||
< deflate_state
|
||||
.output_buf()
|
||||
.len()
|
||||
.checked_sub(output_buf_pos)
|
||||
.unwrap()
|
||||
{
|
||||
deflate_state.output_buf_pos += written_to_writer;
|
||||
} else {
|
||||
// If we sucessfully wrote all the data, we can clear the output buffer.
|
||||
deflate_state.output_buf_pos = 0;
|
||||
deflate_state.output_buf().clear();
|
||||
}
|
||||
Ok(bytes_written)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use crate::test_utils::{decompress_to_end, get_test_data};
|
||||
|
||||
#[test]
|
||||
/// Test compressing a short string using fixed encoding.
|
||||
fn fixed_string_mem() {
|
||||
let test_data = String::from(" GNU GENERAL PUBLIC LICENSE").into_bytes();
|
||||
let compressed = compress_data_fixed(&test_data);
|
||||
|
||||
let result = decompress_to_end(&compressed);
|
||||
|
||||
assert_eq!(test_data, result);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn fixed_data() {
|
||||
let data = vec![190u8; 400];
|
||||
let compressed = compress_data_fixed(&data);
|
||||
let result = decompress_to_end(&compressed);
|
||||
|
||||
assert_eq!(data, result);
|
||||
}
|
||||
|
||||
/// Test deflate example.
|
||||
///
|
||||
/// Check if the encoder produces the same code as the example given by Mark Adler here:
|
||||
/// https://stackoverflow.com/questions/17398931/deflate-encoding-with-static-huffman-codes/17415203
|
||||
#[test]
|
||||
fn fixed_example() {
|
||||
let test_data = b"Deflate late";
|
||||
// let check =
|
||||
// [0x73, 0x49, 0x4d, 0xcb, 0x49, 0x2c, 0x49, 0x55, 0xc8, 0x49, 0x2c, 0x49, 0x5, 0x0];
|
||||
let check = [
|
||||
0x73, 0x49, 0x4d, 0xcb, 0x49, 0x2c, 0x49, 0x55, 0x00, 0x11, 0x00,
|
||||
];
|
||||
let compressed = compress_data_fixed(test_data);
|
||||
assert_eq!(&compressed, &check);
|
||||
let decompressed = decompress_to_end(&compressed);
|
||||
assert_eq!(&decompressed, test_data)
|
||||
}
|
||||
|
||||
#[test]
|
||||
/// Test compression from a file.
|
||||
fn fixed_string_file() {
|
||||
let input = get_test_data();
|
||||
|
||||
let compressed = compress_data_fixed(&input);
|
||||
println!("Fixed codes compressed len: {}", compressed.len());
|
||||
let result = decompress_to_end(&compressed);
|
||||
|
||||
assert_eq!(input.len(), result.len());
|
||||
// Not using assert_eq here deliberately to avoid massive amounts of output spam.
|
||||
assert!(input == result);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,196 @@
|
|||
//! This module contains the various options to tweak how compression is performed.
|
||||
//!
|
||||
//! Note that due to the nature of the `DEFLATE` format, lower compression levels
|
||||
//! may for some data compress better than higher compression levels.
|
||||
//!
|
||||
//! For applications where a maximum level of compression (irrespective of compression
|
||||
//! speed) is required, consider using the [`Zopfli`](https://crates.io/crates/zopfli)
|
||||
//! compressor, which uses a specialised (but slow) algorithm to figure out the maximum
|
||||
//! of compression for the provided data.
|
||||
//!
|
||||
use crate::lz77::MatchingType;
|
||||
use std::convert::From;
|
||||
|
||||
pub const HIGH_MAX_HASH_CHECKS: u16 = 1768;
|
||||
pub const HIGH_LAZY_IF_LESS_THAN: u16 = 128;
|
||||
/// The maximum number of hash checks that make sense as this is the length
|
||||
/// of the hash chain.
|
||||
pub const MAX_HASH_CHECKS: u16 = 32 * 1024;
|
||||
pub const DEFAULT_MAX_HASH_CHECKS: u16 = 128;
|
||||
pub const DEFAULT_LAZY_IF_LESS_THAN: u16 = 32;
|
||||
|
||||
/// An enum describing the level of compression to be used by the encoder
|
||||
///
|
||||
/// Higher compression ratios will take longer to encode.
|
||||
///
|
||||
/// This is a simplified interface to specify a compression level.
|
||||
///
|
||||
/// [See also `CompressionOptions`](./struct.CompressionOptions.html) which provides for
|
||||
/// tweaking the settings more finely.
|
||||
#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash)]
|
||||
pub enum Compression {
|
||||
/// Fast minimal compression (`CompressionOptions::fast()`).
|
||||
Fast,
|
||||
/// Default level (`CompressionOptions::default()`).
|
||||
Default,
|
||||
/// Higher compression level (`CompressionOptions::high()`).
|
||||
///
|
||||
/// Best in this context isn't actually the highest possible level
|
||||
/// the encoder can do, but is meant to emulate the `Best` setting in the `Flate2`
|
||||
/// library.
|
||||
Best,
|
||||
}
|
||||
|
||||
impl Default for Compression {
|
||||
fn default() -> Compression {
|
||||
Compression::Default
|
||||
}
|
||||
}
|
||||
|
||||
/// Enum allowing some special options (not implemented yet)!
|
||||
#[derive(Clone, Copy, Debug, Eq, PartialEq, Hash)]
|
||||
pub enum SpecialOptions {
|
||||
/// Compress normally.
|
||||
Normal,
|
||||
/// Force fixed Huffman tables. (Unimplemented!).
|
||||
_ForceFixed,
|
||||
/// Force stored (uncompressed) blocks only. (Unimplemented!).
|
||||
_ForceStored,
|
||||
}
|
||||
|
||||
impl Default for SpecialOptions {
|
||||
fn default() -> SpecialOptions {
|
||||
SpecialOptions::Normal
|
||||
}
|
||||
}
|
||||
|
||||
pub const DEFAULT_OPTIONS: CompressionOptions = CompressionOptions {
|
||||
max_hash_checks: DEFAULT_MAX_HASH_CHECKS,
|
||||
lazy_if_less_than: DEFAULT_LAZY_IF_LESS_THAN,
|
||||
matching_type: MatchingType::Lazy,
|
||||
special: SpecialOptions::Normal,
|
||||
};
|
||||
|
||||
/// A struct describing the options for a compressor or compression function.
|
||||
///
|
||||
/// These values are not stable and still subject to change!
|
||||
#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)]
|
||||
pub struct CompressionOptions {
|
||||
/// The maximum number of checks to make in the hash table for matches.
|
||||
///
|
||||
/// Higher numbers mean slower, but better compression. Very high (say `>1024`) values
|
||||
/// will impact compression speed a lot. The maximum match length is 2^15, so values higher than
|
||||
/// this won't make any difference, and will be truncated to 2^15 by the compression
|
||||
/// function/writer.
|
||||
///
|
||||
/// Default value: `128`
|
||||
pub max_hash_checks: u16,
|
||||
// pub _window_size: u16,
|
||||
/// Only lazy match if we have a length less than this value.
|
||||
///
|
||||
/// Higher values degrade compression slightly, but improve compression speed.
|
||||
///
|
||||
/// * `0`: Never lazy match. (Same effect as setting `MatchingType` to greedy, but may be slower).
|
||||
/// * `1...257`: Only check for a better match if the first match was shorter than this value.
|
||||
/// * `258`: Always lazy match.
|
||||
///
|
||||
/// As the maximum length of a match is `258`, values higher than this will have
|
||||
/// no further effect.
|
||||
///
|
||||
/// * Default value: `32`
|
||||
pub lazy_if_less_than: u16,
|
||||
|
||||
// pub _decent_match: u16,
|
||||
/// Whether to use lazy or greedy matching.
|
||||
///
|
||||
/// Lazy matching will provide better compression, at the expense of compression speed.
|
||||
///
|
||||
/// As a special case, if max_hash_checks is set to 0, and matching_type is set to lazy,
|
||||
/// compression using only run-length encoding (i.e maximum match distance of 1) is performed.
|
||||
/// (This may be changed in the future but is defined like this at the moment to avoid API
|
||||
/// breakage.
|
||||
///
|
||||
/// [See `MatchingType`](./enum.MatchingType.html)
|
||||
///
|
||||
/// * Default value: `MatchingType::Lazy`
|
||||
pub matching_type: MatchingType,
|
||||
/// Force fixed/stored blocks (Not implemented yet).
|
||||
/// * Default value: `SpecialOptions::Normal`
|
||||
pub special: SpecialOptions,
|
||||
}
|
||||
|
||||
// Some standard profiles for the compression options.
|
||||
// Ord should be implemented at some point, but won't yet until the struct is stabilised.
|
||||
impl CompressionOptions {
|
||||
/// Returns compression settings roughly corresponding to the `HIGH(9)` setting in miniz.
|
||||
pub fn high() -> CompressionOptions {
|
||||
CompressionOptions {
|
||||
max_hash_checks: HIGH_MAX_HASH_CHECKS,
|
||||
lazy_if_less_than: HIGH_LAZY_IF_LESS_THAN,
|
||||
matching_type: MatchingType::Lazy,
|
||||
special: SpecialOptions::Normal,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns a fast set of compression settings
|
||||
///
|
||||
/// Ideally this should roughly correspond to the `FAST(1)` setting in miniz.
|
||||
/// However, that setting makes miniz use a somewhat different algorithm,
|
||||
/// so currently hte fast level in this library is slower and better compressing
|
||||
/// than the corresponding level in miniz.
|
||||
pub fn fast() -> CompressionOptions {
|
||||
CompressionOptions {
|
||||
max_hash_checks: 1,
|
||||
lazy_if_less_than: 0,
|
||||
matching_type: MatchingType::Greedy,
|
||||
special: SpecialOptions::Normal,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns a set of compression settings that makes the compressor only compress using
|
||||
/// Huffman coding. (Ignoring any length/distance matching)
|
||||
///
|
||||
/// This will normally have the worst compression ratio (besides only using uncompressed data),
|
||||
/// but may be the fastest method in some cases.
|
||||
pub fn huffman_only() -> CompressionOptions {
|
||||
CompressionOptions {
|
||||
max_hash_checks: 0,
|
||||
lazy_if_less_than: 0,
|
||||
matching_type: MatchingType::Greedy,
|
||||
special: SpecialOptions::Normal,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns a set of compression settings that makes the compressor compress only using
|
||||
/// run-length encoding (i.e only looking for matches one byte back).
|
||||
///
|
||||
/// This is very fast, but tends to compress worse than looking for more matches using hash
|
||||
/// chains that the slower settings do.
|
||||
/// Works best on data that has runs of equivalent bytes, like binary or simple images,
|
||||
/// less good for text.
|
||||
pub fn rle() -> CompressionOptions {
|
||||
CompressionOptions {
|
||||
max_hash_checks: 0,
|
||||
lazy_if_less_than: 0,
|
||||
matching_type: MatchingType::Lazy,
|
||||
special: SpecialOptions::Normal,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for CompressionOptions {
|
||||
/// Returns the options describing the default compression level.
|
||||
fn default() -> CompressionOptions {
|
||||
DEFAULT_OPTIONS
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Compression> for CompressionOptions {
|
||||
fn from(compression: Compression) -> CompressionOptions {
|
||||
match compression {
|
||||
Compression::Fast => CompressionOptions::fast(),
|
||||
Compression::Default => CompressionOptions::default(),
|
||||
Compression::Best => CompressionOptions::high(),
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,145 @@
|
|||
use std::io::Write;
|
||||
use std::{cmp, io, mem};
|
||||
|
||||
use crate::compress::Flush;
|
||||
use crate::compression_options::{CompressionOptions, MAX_HASH_CHECKS};
|
||||
use crate::encoder_state::EncoderState;
|
||||
pub use crate::huffman_table::MAX_MATCH;
|
||||
use crate::huffman_table::NUM_LITERALS_AND_LENGTHS;
|
||||
use crate::input_buffer::InputBuffer;
|
||||
use crate::length_encode::{EncodedLength, LeafVec};
|
||||
use crate::lz77::LZ77State;
|
||||
use crate::output_writer::DynamicWriter;
|
||||
|
||||
/// A counter used for checking values in debug mode.
|
||||
/// Does nothing when debug assertions are disabled.
|
||||
#[derive(Default)]
|
||||
pub struct DebugCounter {
|
||||
#[cfg(debug_assertions)]
|
||||
count: u64,
|
||||
}
|
||||
|
||||
impl DebugCounter {
|
||||
#[cfg(debug_assertions)]
|
||||
pub fn get(&self) -> u64 {
|
||||
self.count
|
||||
}
|
||||
|
||||
#[cfg(not(debug_assertions))]
|
||||
pub fn get(&self) -> u64 {
|
||||
0
|
||||
}
|
||||
|
||||
#[cfg(debug_assertions)]
|
||||
pub fn reset(&mut self) {
|
||||
self.count = 0;
|
||||
}
|
||||
|
||||
#[cfg(not(debug_assertions))]
|
||||
pub fn reset(&self) {}
|
||||
|
||||
#[cfg(debug_assertions)]
|
||||
pub fn add(&mut self, val: u64) {
|
||||
self.count += val;
|
||||
}
|
||||
|
||||
#[cfg(not(debug_assertions))]
|
||||
pub fn add(&self, _: u64) {}
|
||||
}
|
||||
|
||||
pub struct LengthBuffers {
|
||||
pub leaf_buf: LeafVec,
|
||||
pub length_buf: Vec<EncodedLength>,
|
||||
}
|
||||
|
||||
impl LengthBuffers {
|
||||
#[inline]
|
||||
fn new() -> LengthBuffers {
|
||||
LengthBuffers {
|
||||
leaf_buf: Vec::with_capacity(NUM_LITERALS_AND_LENGTHS),
|
||||
length_buf: Vec::with_capacity(19),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A struct containing all the stored state used for the encoder.
|
||||
pub struct DeflateState<W: Write> {
|
||||
/// State of lz77 compression.
|
||||
pub lz77_state: LZ77State,
|
||||
pub input_buffer: InputBuffer,
|
||||
pub compression_options: CompressionOptions,
|
||||
/// State the Huffman part of the compression and the output buffer.
|
||||
pub encoder_state: EncoderState,
|
||||
/// The buffer containing the raw output of the lz77-encoding.
|
||||
pub lz77_writer: DynamicWriter,
|
||||
/// Buffers used when generating Huffman code lengths.
|
||||
pub length_buffers: LengthBuffers,
|
||||
/// Total number of bytes consumed/written to the input buffer.
|
||||
pub bytes_written: u64,
|
||||
/// Wrapped writer.
|
||||
/// Option is used to allow us to implement `Drop` and `finish()` at the same time for the
|
||||
/// writer structs.
|
||||
pub inner: Option<W>,
|
||||
/// The position in the output buffer where data should be flushed from, to keep track of
|
||||
/// what data has been output in case not all data is output when writing to the wrapped
|
||||
/// writer.
|
||||
pub output_buf_pos: usize,
|
||||
pub flush_mode: Flush,
|
||||
/// Number of bytes written as calculated by sum of block input lengths.
|
||||
/// Used to check that they are correct when `debug_assertions` are enabled.
|
||||
pub bytes_written_control: DebugCounter,
|
||||
}
|
||||
|
||||
impl<W: Write> DeflateState<W> {
|
||||
pub fn new(compression_options: CompressionOptions, writer: W) -> DeflateState<W> {
|
||||
DeflateState {
|
||||
input_buffer: InputBuffer::empty(),
|
||||
lz77_state: LZ77State::new(
|
||||
compression_options.max_hash_checks,
|
||||
cmp::min(compression_options.lazy_if_less_than, MAX_HASH_CHECKS),
|
||||
compression_options.matching_type,
|
||||
),
|
||||
encoder_state: EncoderState::new(Vec::with_capacity(1024 * 32)),
|
||||
lz77_writer: DynamicWriter::new(),
|
||||
length_buffers: LengthBuffers::new(),
|
||||
compression_options,
|
||||
bytes_written: 0,
|
||||
inner: Some(writer),
|
||||
output_buf_pos: 0,
|
||||
flush_mode: Flush::None,
|
||||
bytes_written_control: DebugCounter::default(),
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn output_buf(&mut self) -> &mut Vec<u8> {
|
||||
self.encoder_state.inner_vec()
|
||||
}
|
||||
|
||||
/// Resets the status of the decoder, leaving the compression options intact
|
||||
///
|
||||
/// If flushing the current writer succeeds, it is replaced with the provided one,
|
||||
/// buffers and status (except compression options) is reset and the old writer
|
||||
/// is returned.
|
||||
///
|
||||
/// If flushing fails, the rest of the writer is not cleared.
|
||||
pub fn reset(&mut self, writer: W) -> io::Result<W> {
|
||||
self.encoder_state.flush();
|
||||
self.inner
|
||||
.as_mut()
|
||||
.expect("Missing writer!")
|
||||
.write_all(self.encoder_state.inner_vec())?;
|
||||
self.encoder_state.inner_vec().clear();
|
||||
self.input_buffer = InputBuffer::empty();
|
||||
self.lz77_writer.clear();
|
||||
self.lz77_state.reset();
|
||||
self.bytes_written = 0;
|
||||
self.output_buf_pos = 0;
|
||||
self.flush_mode = Flush::None;
|
||||
if cfg!(debug_assertions) {
|
||||
self.bytes_written_control.reset();
|
||||
}
|
||||
mem::replace(&mut self.inner, Some(writer))
|
||||
.ok_or_else(|| io::Error::new(io::ErrorKind::Other, "Missing writer"))
|
||||
}
|
||||
}
|
|
@ -0,0 +1,130 @@
|
|||
use crate::bitstream::LsbWriter;
|
||||
use crate::huffman_table::HuffmanTable;
|
||||
use crate::lzvalue::LZType;
|
||||
#[cfg(test)]
|
||||
use std::mem;
|
||||
|
||||
// The first bits of each block, which describe the type of the block
|
||||
// `-TTF` - TT = type, 00 = stored, 01 = fixed, 10 = dynamic, 11 = reserved, F - 1 if final block
|
||||
// `0000`;
|
||||
const FIXED_FIRST_BYTE: u16 = 0b010;
|
||||
const FIXED_FIRST_BYTE_FINAL: u16 = 0b011;
|
||||
const DYNAMIC_FIRST_BYTE: u16 = 0b100;
|
||||
const DYNAMIC_FIRST_BYTE_FINAL: u16 = 0b101;
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub enum BType {
|
||||
NoCompression = 0b00,
|
||||
FixedHuffman = 0b01,
|
||||
DynamicHuffman = 0b10, // Reserved = 0b11, //Error
|
||||
}
|
||||
|
||||
/// A struct wrapping a writer that writes data compressed using the provided Huffman table
|
||||
pub struct EncoderState {
|
||||
pub huffman_table: HuffmanTable,
|
||||
pub writer: LsbWriter,
|
||||
}
|
||||
|
||||
impl EncoderState {
|
||||
/// Creates a new encoder state using the provided Huffman table and writer
|
||||
pub fn new(writer: Vec<u8>) -> EncoderState {
|
||||
EncoderState {
|
||||
huffman_table: HuffmanTable::empty(),
|
||||
writer: LsbWriter::new(writer),
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
/// Creates a new encoder state using the fixed Huffman table
|
||||
pub fn fixed(writer: Vec<u8>) -> EncoderState {
|
||||
EncoderState {
|
||||
huffman_table: HuffmanTable::fixed_table(),
|
||||
writer: LsbWriter::new(writer),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn inner_vec(&mut self) -> &mut Vec<u8> {
|
||||
&mut self.writer.w
|
||||
}
|
||||
|
||||
/// Encodes a literal value to the writer
|
||||
fn write_literal(&mut self, value: u8) {
|
||||
let code = self.huffman_table.get_literal(value);
|
||||
debug_assert!(code.length > 0);
|
||||
self.writer.write_bits(code.code, code.length);
|
||||
}
|
||||
|
||||
/// Write a LZvalue to the contained writer, returning Err if the write operation fails
|
||||
pub fn write_lzvalue(&mut self, value: LZType) {
|
||||
match value {
|
||||
LZType::Literal(l) => self.write_literal(l),
|
||||
LZType::StoredLengthDistance(l, d) => {
|
||||
let (code, extra_bits_code) = self.huffman_table.get_length_huffman(l);
|
||||
debug_assert!(
|
||||
code.length != 0,
|
||||
format!("Code: {:?}, Value: {:?}", code, value)
|
||||
);
|
||||
self.writer.write_bits(code.code, code.length);
|
||||
self.writer
|
||||
.write_bits(extra_bits_code.code, extra_bits_code.length);
|
||||
|
||||
let (code, extra_bits_code) = self.huffman_table.get_distance_huffman(d);
|
||||
debug_assert!(
|
||||
code.length != 0,
|
||||
format!("Code: {:?}, Value: {:?}", code, value)
|
||||
);
|
||||
|
||||
self.writer.write_bits(code.code, code.length);
|
||||
self.writer
|
||||
.write_bits(extra_bits_code.code, extra_bits_code.length)
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
/// Write the start of a block, returning Err if the write operation fails.
|
||||
pub fn write_start_of_block(&mut self, fixed: bool, final_block: bool) {
|
||||
if final_block {
|
||||
// The final block has one bit flipped to indicate it's
|
||||
// the final one
|
||||
if fixed {
|
||||
self.writer.write_bits(FIXED_FIRST_BYTE_FINAL, 3)
|
||||
} else {
|
||||
self.writer.write_bits(DYNAMIC_FIRST_BYTE_FINAL, 3)
|
||||
}
|
||||
} else if fixed {
|
||||
self.writer.write_bits(FIXED_FIRST_BYTE, 3)
|
||||
} else {
|
||||
self.writer.write_bits(DYNAMIC_FIRST_BYTE, 3)
|
||||
}
|
||||
}
|
||||
|
||||
/// Write the end of block code
|
||||
pub fn write_end_of_block(&mut self) {
|
||||
let code = self.huffman_table.get_end_of_block();
|
||||
self.writer.write_bits(code.code, code.length)
|
||||
}
|
||||
|
||||
/// Flush the contained writer and it's bitstream wrapper.
|
||||
pub fn flush(&mut self) {
|
||||
self.writer.flush_raw()
|
||||
}
|
||||
|
||||
pub fn set_huffman_to_fixed(&mut self) {
|
||||
self.huffman_table.set_to_fixed()
|
||||
}
|
||||
|
||||
/// Reset the encoder state with a new writer, returning the old one if flushing
|
||||
/// succeeds.
|
||||
#[cfg(test)]
|
||||
pub fn reset(&mut self, writer: Vec<u8>) -> Vec<u8> {
|
||||
// Make sure the writer is flushed
|
||||
// Ideally this should be done before this function is called, but we
|
||||
// do it here just in case.
|
||||
self.flush();
|
||||
// Reset the huffman table
|
||||
// This probably isn't needed, but again, we do it just in case to avoid leaking any data
|
||||
// If this turns out to be a performance issue, it can probably be ignored later.
|
||||
self.huffman_table = HuffmanTable::empty();
|
||||
mem::replace(&mut self.writer.w, writer)
|
||||
}
|
||||
}
|
|
@ -0,0 +1,385 @@
|
|||
use crate::bitstream::LsbWriter;
|
||||
use crate::deflate_state::LengthBuffers;
|
||||
use crate::huffman_table::{
|
||||
create_codes_in_place, num_extra_bits_for_distance_code, num_extra_bits_for_length_code,
|
||||
HuffmanTable, FIXED_CODE_LENGTHS, LENGTH_BITS_START, MAX_CODE_LENGTH, NUM_DISTANCE_CODES,
|
||||
NUM_LITERALS_AND_LENGTHS,
|
||||
};
|
||||
use crate::length_encode::{
|
||||
encode_lengths_m, huffman_lengths_from_frequency_m, EncodedLength, COPY_PREVIOUS,
|
||||
REPEAT_ZERO_3_BITS, REPEAT_ZERO_7_BITS,
|
||||
};
|
||||
use crate::output_writer::FrequencyType;
|
||||
use crate::stored_block::MAX_STORED_BLOCK_LENGTH;
|
||||
|
||||
use std::cmp;
|
||||
|
||||
/// The minimum number of literal/length values
|
||||
pub const MIN_NUM_LITERALS_AND_LENGTHS: usize = 257;
|
||||
/// The minimum number of distances
|
||||
pub const MIN_NUM_DISTANCES: usize = 1;
|
||||
|
||||
const NUM_HUFFMAN_LENGTHS: usize = 19;
|
||||
|
||||
/// The output ordering of the lengths for the Huffman codes used to encode the lengths
|
||||
/// used to build the full Huffman tree for length/literal codes.
|
||||
/// http://www.gzip.org/zlib/rfc-deflate.html#dyn
|
||||
const HUFFMAN_LENGTH_ORDER: [u8; NUM_HUFFMAN_LENGTHS] = [
|
||||
16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15,
|
||||
];
|
||||
|
||||
// Number of bits used for the values specifying the number of codes
|
||||
const HLIT_BITS: u8 = 5;
|
||||
const HDIST_BITS: u8 = 5;
|
||||
const HCLEN_BITS: u8 = 4;
|
||||
|
||||
/// The longest a Huffman code describing another Huffman length can be
|
||||
const MAX_HUFFMAN_CODE_LENGTH: usize = 7;
|
||||
|
||||
// How many bytes (not including padding and the 3-bit block type) the stored block header takes up.
|
||||
const STORED_BLOCK_HEADER_LENGTH: u64 = 4;
|
||||
const BLOCK_MARKER_LENGTH: u8 = 3;
|
||||
|
||||
/// Creates a new slice from the input slice that stops at the final non-zero value
|
||||
pub fn remove_trailing_zeroes<T: From<u8> + PartialEq>(input: &[T], min_length: usize) -> &[T] {
|
||||
let num_zeroes = input.iter().rev().take_while(|&a| *a == T::from(0)).count();
|
||||
&input[0..cmp::max(input.len() - num_zeroes, min_length)]
|
||||
}
|
||||
|
||||
/// How many extra bits the Huffman length code uses to represent a value.
|
||||
fn extra_bits_for_huffman_length_code(code: u8) -> u8 {
|
||||
match code {
|
||||
16..=17 => 3,
|
||||
18 => 7,
|
||||
_ => 0,
|
||||
}
|
||||
}
|
||||
|
||||
/// Calculate how many bits the Huffman-encoded Huffman lengths will use.
|
||||
fn calculate_huffman_length(frequencies: &[FrequencyType], code_lengths: &[u8]) -> u64 {
|
||||
frequencies
|
||||
.iter()
|
||||
.zip(code_lengths)
|
||||
.enumerate()
|
||||
.fold(0, |acc, (n, (&f, &l))| {
|
||||
acc + (u64::from(f)
|
||||
* (u64::from(l) + u64::from(extra_bits_for_huffman_length_code(n as u8))))
|
||||
})
|
||||
}
|
||||
|
||||
/// Calculate how many bits data with the given frequencies will use when compressed with dynamic
|
||||
/// code lengths (first return value) and static code lengths (second return value).
|
||||
///
|
||||
/// Parameters:
|
||||
/// Frequencies, length of dynamic codes, and a function to get how many extra bits in addition
|
||||
/// to the length of the Huffman code the symbol will use.
|
||||
fn calculate_block_length<F>(
|
||||
frequencies: &[FrequencyType],
|
||||
dyn_code_lengths: &[u8],
|
||||
get_num_extra_bits: &F,
|
||||
) -> (u64, u64)
|
||||
where
|
||||
F: Fn(usize) -> u64,
|
||||
{
|
||||
// Length of data represented by dynamic codes.
|
||||
let mut d_ll_length = 0u64;
|
||||
// length of data represented by static codes.
|
||||
let mut s_ll_length = 0u64;
|
||||
|
||||
let iter = frequencies
|
||||
.iter()
|
||||
.zip(dyn_code_lengths.iter().zip(FIXED_CODE_LENGTHS.iter()))
|
||||
.enumerate();
|
||||
|
||||
// This could maybe be optimised a bit by splitting the iteration of codes using extra bits and
|
||||
// codes not using extra bits, but the extra complexity may not be worth it.
|
||||
for (c, (&f, (&l, &fl))) in iter {
|
||||
// Frequency
|
||||
let f = u64::from(f);
|
||||
// How many extra bits the current code number needs.
|
||||
let extra_bits_for_code = get_num_extra_bits(c);
|
||||
|
||||
d_ll_length += f * (u64::from(l) + extra_bits_for_code);
|
||||
s_ll_length += f * (u64::from(fl) + extra_bits_for_code);
|
||||
}
|
||||
|
||||
(d_ll_length, s_ll_length)
|
||||
}
|
||||
|
||||
/// Get how extra padding bits after a block start header a stored block would use.
|
||||
///
|
||||
/// # Panics
|
||||
/// Panics if `pending_bits > 8`
|
||||
fn stored_padding(pending_bits: u8) -> u64 {
|
||||
assert!(pending_bits <= 8);
|
||||
let free_space = 8 - pending_bits;
|
||||
if free_space >= BLOCK_MARKER_LENGTH {
|
||||
// There is space in the current byte for the header.
|
||||
free_space - BLOCK_MARKER_LENGTH
|
||||
} else {
|
||||
// The header will require an extra byte.
|
||||
8 - (BLOCK_MARKER_LENGTH - free_space)
|
||||
}
|
||||
.into()
|
||||
}
|
||||
|
||||
/// Calculate the number of bits storing the data in stored blocks will take up, excluding the
|
||||
/// first block start code and potential padding bits. As stored blocks have a maximum length,
|
||||
/// (as opposed to fixed and dynamic ones), multiple blocks may have to be utilised.
|
||||
///
|
||||
/// # Panics
|
||||
/// Panics if `input_bytes` is 0.
|
||||
fn stored_length(input_bytes: u64) -> u64 {
|
||||
// Check how many stored blocks these bytes would take up.
|
||||
// (Integer divison rounding up.)
|
||||
let num_blocks = (input_bytes
|
||||
.checked_sub(1)
|
||||
.expect("Underflow calculating stored block length!")
|
||||
/ MAX_STORED_BLOCK_LENGTH as u64)
|
||||
+ 1;
|
||||
// The length will be the input length and the headers for each block. (Excluding the start
|
||||
// of block code for the first one)
|
||||
(input_bytes + (STORED_BLOCK_HEADER_LENGTH as u64 * num_blocks) + (num_blocks - 1)) * 8
|
||||
}
|
||||
|
||||
pub enum BlockType {
|
||||
Stored,
|
||||
Fixed,
|
||||
Dynamic(DynamicBlockHeader),
|
||||
}
|
||||
|
||||
/// A struct containing the different data needed to write the header for a dynamic block.
|
||||
///
|
||||
/// The code lengths are stored directly in the `HuffmanTable` struct.
|
||||
/// TODO: Do the same for other things here.
|
||||
pub struct DynamicBlockHeader {
|
||||
/// Length of the run-length encoding symbols.
|
||||
pub huffman_table_lengths: Vec<u8>,
|
||||
/// Number of lengths for values describing the Huffman table that encodes the length values
|
||||
/// of the main Huffman tables.
|
||||
pub used_hclens: usize,
|
||||
}
|
||||
|
||||
/// Generate the lengths of the Huffman codes we will be using, using the
|
||||
/// frequency of the different symbols/lengths/distances, and determine what block type will give
|
||||
/// the shortest representation.
|
||||
/// TODO: This needs a test
|
||||
pub fn gen_huffman_lengths(
|
||||
l_freqs: &[FrequencyType],
|
||||
d_freqs: &[FrequencyType],
|
||||
num_input_bytes: u64,
|
||||
pending_bits: u8,
|
||||
l_lengths: &mut [u8; 288],
|
||||
d_lengths: &mut [u8; 32],
|
||||
length_buffers: &mut LengthBuffers,
|
||||
) -> BlockType {
|
||||
// Avoid corner cases and issues if this is called for an empty block.
|
||||
// For blocks this short, a fixed block will be the shortest.
|
||||
// TODO: Find the minimum value it's worth doing calculations for.
|
||||
if num_input_bytes <= 4 {
|
||||
return BlockType::Fixed;
|
||||
};
|
||||
|
||||
let l_freqs = remove_trailing_zeroes(l_freqs, MIN_NUM_LITERALS_AND_LENGTHS);
|
||||
let d_freqs = remove_trailing_zeroes(d_freqs, MIN_NUM_DISTANCES);
|
||||
|
||||
// The huffman spec allows us to exclude zeroes at the end of the
|
||||
// table of huffman lengths.
|
||||
// Since a frequency of 0 will give an huffman
|
||||
// length of 0. We strip off the trailing zeroes before even
|
||||
// generating the lengths to save some work.
|
||||
// There is however a minimum number of values we have to keep
|
||||
// according to the deflate spec.
|
||||
// TODO: We could probably compute some of this in parallel.
|
||||
huffman_lengths_from_frequency_m(
|
||||
l_freqs,
|
||||
MAX_CODE_LENGTH,
|
||||
&mut length_buffers.leaf_buf,
|
||||
l_lengths,
|
||||
);
|
||||
huffman_lengths_from_frequency_m(
|
||||
d_freqs,
|
||||
MAX_CODE_LENGTH,
|
||||
&mut length_buffers.leaf_buf,
|
||||
d_lengths,
|
||||
);
|
||||
|
||||
let used_lengths = l_freqs.len();
|
||||
let used_distances = d_freqs.len();
|
||||
|
||||
// Encode length values
|
||||
let mut freqs = [0u16; 19];
|
||||
encode_lengths_m(
|
||||
l_lengths[..used_lengths]
|
||||
.iter()
|
||||
.chain(&d_lengths[..used_distances]),
|
||||
&mut length_buffers.length_buf,
|
||||
&mut freqs,
|
||||
);
|
||||
|
||||
// Create huffman lengths for the length/distance code lengths
|
||||
let mut huffman_table_lengths = vec![0; freqs.len()];
|
||||
huffman_lengths_from_frequency_m(
|
||||
&freqs,
|
||||
MAX_HUFFMAN_CODE_LENGTH,
|
||||
&mut length_buffers.leaf_buf,
|
||||
huffman_table_lengths.as_mut_slice(),
|
||||
);
|
||||
|
||||
// Count how many of these lengths we use.
|
||||
let used_hclens = HUFFMAN_LENGTH_ORDER.len()
|
||||
- HUFFMAN_LENGTH_ORDER
|
||||
.iter()
|
||||
.rev()
|
||||
.take_while(|&&n| huffman_table_lengths[n as usize] == 0)
|
||||
.count();
|
||||
|
||||
// There has to be at least 4 hclens, so if there isn't, something went wrong.
|
||||
debug_assert!(used_hclens >= 4);
|
||||
|
||||
// Calculate how many bytes of space this block will take up with the different block types
|
||||
// (excluding the 3-bit block header since it's used in all block types).
|
||||
|
||||
// Total length of the compressed literals/lengths.
|
||||
let (d_ll_length, s_ll_length) = calculate_block_length(l_freqs, l_lengths, &|c| {
|
||||
num_extra_bits_for_length_code(c.saturating_sub(LENGTH_BITS_START as usize) as u8).into()
|
||||
});
|
||||
|
||||
// Total length of the compressed distances.
|
||||
let (d_dist_length, s_dist_length) = calculate_block_length(d_freqs, d_lengths, &|c| {
|
||||
num_extra_bits_for_distance_code(c as u8).into()
|
||||
});
|
||||
|
||||
// Total length of the compressed huffman code lengths.
|
||||
let huff_table_length = calculate_huffman_length(&freqs, &huffman_table_lengths);
|
||||
|
||||
// For dynamic blocks the huffman tables takes up some extra space.
|
||||
let dynamic_length = d_ll_length
|
||||
+ d_dist_length
|
||||
+ huff_table_length
|
||||
+ (used_hclens as u64 * 3)
|
||||
+ u64::from(HLIT_BITS)
|
||||
+ u64::from(HDIST_BITS)
|
||||
+ u64::from(HCLEN_BITS);
|
||||
|
||||
// Static blocks don't have any extra header data.
|
||||
let static_length = s_ll_length + s_dist_length;
|
||||
|
||||
// Calculate how many bits it will take to store the data in uncompressed (stored) block(s).
|
||||
let stored_length = stored_length(num_input_bytes) + stored_padding(pending_bits % 8);
|
||||
|
||||
let used_length = cmp::min(cmp::min(dynamic_length, static_length), stored_length);
|
||||
|
||||
// Check if the block is actually compressed. If using a dynamic block
|
||||
// increases the length of the block (for instance if the input data is mostly random or
|
||||
// already compressed), we want to output a stored(uncompressed) block instead to avoid wasting
|
||||
// space.
|
||||
if used_length == static_length {
|
||||
BlockType::Fixed
|
||||
} else if used_length == stored_length {
|
||||
BlockType::Stored
|
||||
} else {
|
||||
BlockType::Dynamic(DynamicBlockHeader {
|
||||
huffman_table_lengths,
|
||||
used_hclens,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// Write the specified Huffman lengths to the bit writer
|
||||
pub fn write_huffman_lengths(
|
||||
header: &DynamicBlockHeader,
|
||||
huffman_table: &HuffmanTable,
|
||||
encoded_lengths: &[EncodedLength],
|
||||
writer: &mut LsbWriter,
|
||||
) {
|
||||
// Ignore trailing zero lengths as allowed by the deflate spec.
|
||||
let (literal_len_lengths, distance_lengths) = huffman_table.get_lengths();
|
||||
let literal_len_lengths =
|
||||
remove_trailing_zeroes(literal_len_lengths, MIN_NUM_LITERALS_AND_LENGTHS);
|
||||
let distance_lengths = remove_trailing_zeroes(distance_lengths, MIN_NUM_DISTANCES);
|
||||
let huffman_table_lengths = &header.huffman_table_lengths;
|
||||
let used_hclens = header.used_hclens;
|
||||
|
||||
assert!(literal_len_lengths.len() <= NUM_LITERALS_AND_LENGTHS);
|
||||
assert!(literal_len_lengths.len() >= MIN_NUM_LITERALS_AND_LENGTHS);
|
||||
assert!(distance_lengths.len() <= NUM_DISTANCE_CODES);
|
||||
assert!(distance_lengths.len() >= MIN_NUM_DISTANCES);
|
||||
|
||||
// Number of length codes - 257.
|
||||
let hlit = (literal_len_lengths.len() - MIN_NUM_LITERALS_AND_LENGTHS) as u16;
|
||||
writer.write_bits(hlit, HLIT_BITS);
|
||||
// Number of distance codes - 1.
|
||||
let hdist = (distance_lengths.len() - MIN_NUM_DISTANCES) as u16;
|
||||
writer.write_bits(hdist, HDIST_BITS);
|
||||
|
||||
// Number of huffman table lengths - 4.
|
||||
let hclen = used_hclens.saturating_sub(4);
|
||||
|
||||
// Write HCLEN.
|
||||
// Casting to u16 is safe since the length can never be more than the length of
|
||||
// `HUFFMAN_LENGTH_ORDER` anyhow.
|
||||
writer.write_bits(hclen as u16, HCLEN_BITS);
|
||||
|
||||
// Write the lengths for the huffman table describing the huffman table
|
||||
// Each length is 3 bits
|
||||
for n in &HUFFMAN_LENGTH_ORDER[..used_hclens] {
|
||||
writer.write_bits(u16::from(huffman_table_lengths[usize::from(*n)]), 3);
|
||||
}
|
||||
|
||||
// Generate codes for the main huffman table using the lengths we just wrote
|
||||
let mut codes = [0u16; NUM_HUFFMAN_LENGTHS];
|
||||
create_codes_in_place(&mut codes[..], huffman_table_lengths);
|
||||
|
||||
// Write the actual huffman lengths
|
||||
for v in encoded_lengths {
|
||||
match *v {
|
||||
EncodedLength::Length(n) => {
|
||||
let (c, l) = (codes[usize::from(n)], huffman_table_lengths[usize::from(n)]);
|
||||
writer.write_bits(c, l);
|
||||
}
|
||||
EncodedLength::CopyPrevious(n) => {
|
||||
let (c, l) = (codes[COPY_PREVIOUS], huffman_table_lengths[COPY_PREVIOUS]);
|
||||
writer.write_bits(c, l);
|
||||
debug_assert!(n >= 3);
|
||||
debug_assert!(n <= 6);
|
||||
writer.write_bits((n - 3).into(), 2);
|
||||
}
|
||||
EncodedLength::RepeatZero3Bits(n) => {
|
||||
let (c, l) = (
|
||||
codes[REPEAT_ZERO_3_BITS],
|
||||
huffman_table_lengths[REPEAT_ZERO_3_BITS],
|
||||
);
|
||||
writer.write_bits(c, l);
|
||||
debug_assert!(n >= 3);
|
||||
writer.write_bits((n - 3).into(), 3);
|
||||
}
|
||||
EncodedLength::RepeatZero7Bits(n) => {
|
||||
let (c, l) = (
|
||||
codes[REPEAT_ZERO_7_BITS],
|
||||
huffman_table_lengths[REPEAT_ZERO_7_BITS],
|
||||
);
|
||||
writer.write_bits(c, l);
|
||||
debug_assert!(n >= 11);
|
||||
debug_assert!(n <= 138);
|
||||
writer.write_bits((n - 11).into(), 7);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::stored_padding;
|
||||
#[test]
|
||||
fn padding() {
|
||||
assert_eq!(stored_padding(0), 5);
|
||||
assert_eq!(stored_padding(1), 4);
|
||||
assert_eq!(stored_padding(2), 3);
|
||||
assert_eq!(stored_padding(3), 2);
|
||||
assert_eq!(stored_padding(4), 1);
|
||||
assert_eq!(stored_padding(5), 0);
|
||||
assert_eq!(stored_padding(6), 7);
|
||||
assert_eq!(stored_padding(7), 6);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,539 @@
|
|||
use crate::bit_reverse::reverse_bits;
|
||||
use crate::lzvalue::StoredLength;
|
||||
use std::fmt;
|
||||
|
||||
/// The number of length codes in the Huffman table
|
||||
pub const NUM_LENGTH_CODES: usize = 29;
|
||||
|
||||
/// The number of distance codes in the distance Huffman table
|
||||
// NOTE: two mode codes are actually used when constructing codes
|
||||
pub const NUM_DISTANCE_CODES: usize = 30;
|
||||
|
||||
/// Combined number of literal and length codes
|
||||
// NOTE: two mode codes are actually used when constructing codes
|
||||
pub const NUM_LITERALS_AND_LENGTHS: usize = 286;
|
||||
|
||||
/// The maximum length of a Huffman code
|
||||
pub const MAX_CODE_LENGTH: usize = 15;
|
||||
|
||||
/// The minimum and maximum lengths for a match according to the DEFLATE specification
|
||||
pub const MIN_MATCH: u16 = 3;
|
||||
pub const MAX_MATCH: u16 = 258;
|
||||
|
||||
pub const MIN_DISTANCE: u16 = 1;
|
||||
pub const MAX_DISTANCE: u16 = 32768;
|
||||
|
||||
/// The position in the literal/length table of the end of block symbol
|
||||
pub const END_OF_BLOCK_POSITION: usize = 256;
|
||||
|
||||
/// Bit lengths for literal and length codes in the fixed Huffman table
|
||||
/// The Huffman codes are generated from this and the distance bit length table
|
||||
pub static FIXED_CODE_LENGTHS: [u8; NUM_LITERALS_AND_LENGTHS + 2] = [
|
||||
8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
|
||||
8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
|
||||
8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
|
||||
8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
|
||||
8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
|
||||
9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
|
||||
9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
|
||||
9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
|
||||
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 8, 8,
|
||||
];
|
||||
|
||||
/// The number of extra bits for the length codes
|
||||
const LENGTH_EXTRA_BITS_LENGTH: [u8; NUM_LENGTH_CODES] = [
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 0,
|
||||
];
|
||||
|
||||
/// Table used to get a code from a length value (see get_distance_code_and_extra_bits)
|
||||
const LENGTH_CODE: [u8; 256] = [
|
||||
0, 1, 2, 3, 4, 5, 6, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 12, 12, 13, 13, 13, 13, 14, 14, 14,
|
||||
14, 15, 15, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16, 17, 17, 17, 17, 17, 17, 17, 17, 18, 18, 18,
|
||||
18, 18, 18, 18, 18, 19, 19, 19, 19, 19, 19, 19, 19, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20,
|
||||
20, 20, 20, 20, 20, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 22, 22, 22,
|
||||
22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23,
|
||||
23, 23, 23, 23, 23, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
|
||||
24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
|
||||
25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 26, 26, 26,
|
||||
26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26,
|
||||
26, 26, 26, 26, 26, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27,
|
||||
27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 28,
|
||||
];
|
||||
|
||||
/// Base values to calculate the value of the bits in length codes
|
||||
const BASE_LENGTH: [u8; NUM_LENGTH_CODES] = [
|
||||
0, 1, 2, 3, 4, 5, 6, 7, 8, 10, 12, 14, 16, 20, 24, 28, 32, 40, 48, 56, 64, 80, 96, 112, 128,
|
||||
160, 192, 224, 255,
|
||||
]; // 258 - MIN_MATCh
|
||||
|
||||
/// What number in the literal/length table the lengths start at
|
||||
pub const LENGTH_BITS_START: u16 = 257;
|
||||
|
||||
/// Lengths for the distance codes in the pre-defined/fixed Huffman table
|
||||
/// (All distance codes are 5 bits long)
|
||||
pub const FIXED_CODE_LENGTHS_DISTANCE: [u8; NUM_DISTANCE_CODES + 2] = [5; NUM_DISTANCE_CODES + 2];
|
||||
|
||||
const DISTANCE_CODES: [u8; 512] = [
|
||||
0, 1, 2, 3, 4, 4, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9,
|
||||
10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 11, 11, 11, 11, 11, 11, 11, 11,
|
||||
11, 11, 11, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
|
||||
12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 13, 13, 13, 13, 13, 13, 13, 13,
|
||||
13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
|
||||
14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
|
||||
14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
|
||||
14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 15, 15, 15, 15, 15, 15, 15, 15,
|
||||
15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
|
||||
15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
|
||||
15, 15, 15, 15, 15, 15, 15, 15, 0, 0, 16, 17, 18, 18, 19, 19, 20, 20, 20, 20, 21, 21, 21, 21,
|
||||
22, 22, 22, 22, 22, 22, 22, 22, 23, 23, 23, 23, 23, 23, 23, 23, 24, 24, 24, 24, 24, 24, 24, 24,
|
||||
24, 24, 24, 24, 24, 24, 24, 24, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
|
||||
26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26,
|
||||
26, 26, 26, 26, 26, 26, 26, 26, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27,
|
||||
27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 28, 28, 28, 28, 28, 28, 28, 28,
|
||||
28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28,
|
||||
28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28,
|
||||
28, 28, 28, 28, 28, 28, 28, 28, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29,
|
||||
29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29,
|
||||
29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29,
|
||||
];
|
||||
|
||||
/// Number of extra bits following the distance codes
|
||||
#[cfg(test)]
|
||||
const DISTANCE_EXTRA_BITS: [u8; NUM_DISTANCE_CODES] = [
|
||||
0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13,
|
||||
13,
|
||||
];
|
||||
|
||||
const DISTANCE_BASE: [u16; NUM_DISTANCE_CODES] = [
|
||||
0, 1, 2, 3, 4, 6, 8, 12, 16, 24, 32, 48, 64, 96, 128, 192, 256, 384, 512, 768, 1024, 1536,
|
||||
2048, 3072, 4096, 6144, 8192, 12288, 16384, 24576,
|
||||
];
|
||||
|
||||
pub fn num_extra_bits_for_length_code(code: u8) -> u8 {
|
||||
LENGTH_EXTRA_BITS_LENGTH[code as usize]
|
||||
}
|
||||
|
||||
/// Get the number of extra bits used for a distance code.
|
||||
/// (Code numbers above `NUM_DISTANCE_CODES` will give some garbage
|
||||
/// value.)
|
||||
pub fn num_extra_bits_for_distance_code(code: u8) -> u8 {
|
||||
// This can be easily calculated without a lookup.
|
||||
//
|
||||
let mut c = code >> 1;
|
||||
c -= (c != 0) as u8;
|
||||
c
|
||||
}
|
||||
|
||||
/// A struct representing the data needed to generate the bit codes for
|
||||
/// a given value and Huffman table.
|
||||
#[derive(Copy, Clone)]
|
||||
struct ExtraBits {
|
||||
/// The position of the length in the Huffman table.
|
||||
pub code_number: u16,
|
||||
/// Number of extra bits following the code.
|
||||
pub num_bits: u8,
|
||||
/// The value of the extra bits, which together with the length/distance code
|
||||
/// allow us to calculate the exact length/distance.
|
||||
pub value: u16,
|
||||
}
|
||||
|
||||
/// Get the length code that corresponds to the length value
|
||||
/// Panics if length is out of range.
|
||||
pub fn get_length_code(length: u16) -> usize {
|
||||
// Going via an u8 here helps the compiler evade bounds checking.
|
||||
usize::from(LENGTH_CODE[(length.wrapping_sub(MIN_MATCH)) as u8 as usize])
|
||||
+ LENGTH_BITS_START as usize
|
||||
}
|
||||
|
||||
/// Get the code for the Huffman table and the extra bits for the requested length.
|
||||
fn get_length_code_and_extra_bits(length: StoredLength) -> ExtraBits {
|
||||
// Length values are stored as unsigned bytes, where the actual length is the value - 3
|
||||
// The `StoredLength` struct takes care of this conversion for us.
|
||||
let n = LENGTH_CODE[length.stored_length() as usize];
|
||||
|
||||
// We can then get the base length from the base length table,
|
||||
// which we use to calculate the value of the extra bits.
|
||||
let base = BASE_LENGTH[n as usize];
|
||||
let num_bits = num_extra_bits_for_length_code(n);
|
||||
ExtraBits {
|
||||
code_number: u16::from(n) + LENGTH_BITS_START,
|
||||
num_bits,
|
||||
value: (length.stored_length() - base).into(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the spot in the Huffman table for distances `distance` corresponds to
|
||||
/// Returns 255 if the distance is invalid.
|
||||
/// Avoiding option here for simplicity and performance) as this being called with an invalid
|
||||
/// value would be a bug.
|
||||
pub fn get_distance_code(distance: u16) -> u8 {
|
||||
let distance = distance as usize;
|
||||
|
||||
match distance {
|
||||
// Since the array starts at 0, we need to subtract 1 to get the correct code number.
|
||||
1..=256 => DISTANCE_CODES[distance - 1],
|
||||
// Due to the distrubution of the distance codes above 256, we can get away with only
|
||||
// using the top bits to determine the code, rather than having a 32k long table of
|
||||
// distance codes.
|
||||
257..=32768 => DISTANCE_CODES[256 + ((distance - 1) >> 7)],
|
||||
_ => 0,
|
||||
}
|
||||
}
|
||||
|
||||
fn get_distance_code_and_extra_bits(distance: u16) -> ExtraBits {
|
||||
let distance_code = get_distance_code(distance);
|
||||
let extra = num_extra_bits_for_distance_code(distance_code);
|
||||
// FIXME: We should add 1 to the values in distance_base to avoid having to add one here
|
||||
let base = DISTANCE_BASE[distance_code as usize] + 1;
|
||||
ExtraBits {
|
||||
code_number: distance_code.into(),
|
||||
num_bits: extra,
|
||||
value: distance - base,
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone, Default)]
|
||||
pub struct HuffmanCode {
|
||||
pub code: u16,
|
||||
pub length: u8,
|
||||
}
|
||||
|
||||
impl fmt::Debug for HuffmanCode {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(
|
||||
f,
|
||||
"HuffmanCode {{ code: {:b}, length: {}}}",
|
||||
self.code, self.length
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl HuffmanCode {
|
||||
#[inline]
|
||||
/// Create a Huffman code value from a code and length.
|
||||
fn new(code: u16, length: u8) -> HuffmanCode {
|
||||
HuffmanCode { code, length }
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub struct LengthAndDistanceBits {
|
||||
pub length_code: HuffmanCode,
|
||||
pub length_extra_bits: HuffmanCode,
|
||||
pub distance_code: HuffmanCode,
|
||||
pub distance_extra_bits: HuffmanCode,
|
||||
}
|
||||
|
||||
/// Counts the number of values of each length.
|
||||
/// Returns a tuple containing the longest length value in the table, it's position,
|
||||
/// and fills in lengths in the `len_counts` slice.
|
||||
/// Returns an error if `table` is empty, or if any of the lengths exceed 15.
|
||||
fn build_length_count_table(table: &[u8], len_counts: &mut [u16; 16]) -> (usize, usize) {
|
||||
// TODO: Validate the length table properly in debug mode.
|
||||
let max_length = (*table.iter().max().expect("BUG! Empty lengths!")).into();
|
||||
|
||||
assert!(max_length <= MAX_CODE_LENGTH);
|
||||
|
||||
let mut max_length_pos = 0;
|
||||
|
||||
for (n, &length) in table.iter().enumerate() {
|
||||
// TODO: Make sure we don't have more of one length than we can make
|
||||
// codes for
|
||||
if length > 0 {
|
||||
len_counts[usize::from(length)] += 1;
|
||||
max_length_pos = n;
|
||||
}
|
||||
}
|
||||
(max_length, max_length_pos)
|
||||
}
|
||||
|
||||
/// Generates a vector of Huffman codes given a table of bit lengths
|
||||
/// Returns an error if any of the lengths are > 15
|
||||
pub fn create_codes_in_place(code_table: &mut [u16], length_table: &[u8]) {
|
||||
let mut len_counts = [0; 16];
|
||||
let (max_length, max_length_pos) = build_length_count_table(length_table, &mut len_counts);
|
||||
let lengths = len_counts;
|
||||
|
||||
let mut code = 0u16;
|
||||
let mut next_code = Vec::with_capacity(length_table.len());
|
||||
next_code.push(code);
|
||||
|
||||
for bits in 1..=max_length {
|
||||
code = (code + lengths[bits - 1]) << 1;
|
||||
next_code.push(code);
|
||||
}
|
||||
|
||||
for n in 0..=max_length_pos {
|
||||
let length = usize::from(length_table[n]);
|
||||
if length != 0 {
|
||||
// The algorithm generates the code in the reverse bit order, so we need to reverse them
|
||||
// to get the correct codes.
|
||||
code_table[n] = reverse_bits(next_code[length], length as u8);
|
||||
// We use wrapping here as we would otherwise overflow on the last code
|
||||
// This should be okay as we exit the loop after this so the value is ignored
|
||||
next_code[length] = next_code[length].wrapping_add(1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A structure containing the tables of Huffman codes for lengths, literals and distances
|
||||
pub struct HuffmanTable {
|
||||
// Literal, end of block and length codes
|
||||
codes: [u16; 288],
|
||||
code_lengths: [u8; 288],
|
||||
// Distance codes
|
||||
distance_codes: [u16; 32],
|
||||
distance_code_lengths: [u8; 32],
|
||||
}
|
||||
|
||||
impl HuffmanTable {
|
||||
pub fn empty() -> HuffmanTable {
|
||||
HuffmanTable {
|
||||
codes: [0; 288],
|
||||
code_lengths: [0; 288],
|
||||
distance_codes: [0; 32],
|
||||
distance_code_lengths: [0; 32],
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub fn from_length_tables(
|
||||
literals_and_lengths: &[u8; 288],
|
||||
distances: &[u8; 32],
|
||||
) -> HuffmanTable {
|
||||
let mut table = HuffmanTable {
|
||||
codes: [0; 288],
|
||||
code_lengths: *literals_and_lengths,
|
||||
distance_codes: [0; 32],
|
||||
distance_code_lengths: *distances,
|
||||
};
|
||||
|
||||
table.update_from_lengths();
|
||||
table
|
||||
}
|
||||
|
||||
/// Get references to the lengths of the current Huffman codes.
|
||||
#[inline]
|
||||
pub fn get_lengths(&self) -> (&[u8; 288], &[u8; 32]) {
|
||||
(&self.code_lengths, &self.distance_code_lengths)
|
||||
}
|
||||
|
||||
/// Get mutable references to the lengths of the current Huffman codes.
|
||||
///
|
||||
/// Used for updating the lengths in place.
|
||||
#[inline]
|
||||
pub fn get_lengths_mut(&mut self) -> (&mut [u8; 288], &mut [u8; 32]) {
|
||||
(&mut self.code_lengths, &mut self.distance_code_lengths)
|
||||
}
|
||||
|
||||
/// Update the Huffman codes using the existing length values in the Huffman table.
|
||||
pub fn update_from_lengths(&mut self) {
|
||||
create_codes_in_place(self.codes.as_mut(), &self.code_lengths[..]);
|
||||
create_codes_in_place(
|
||||
self.distance_codes.as_mut(),
|
||||
&self.distance_code_lengths[..],
|
||||
);
|
||||
}
|
||||
|
||||
pub fn set_to_fixed(&mut self) {
|
||||
self.code_lengths = FIXED_CODE_LENGTHS;
|
||||
self.distance_code_lengths = FIXED_CODE_LENGTHS_DISTANCE;
|
||||
self.update_from_lengths();
|
||||
}
|
||||
|
||||
/// Create a `HuffmanTable` using the fixed tables specified in the DEFLATE format specification.
|
||||
#[cfg(test)]
|
||||
pub fn fixed_table() -> HuffmanTable {
|
||||
// This should be safe to unwrap, if it were to panic the code is wrong,
|
||||
// tests should catch it.
|
||||
HuffmanTable::from_length_tables(&FIXED_CODE_LENGTHS, &FIXED_CODE_LENGTHS_DISTANCE)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn get_ll_huff(&self, value: usize) -> HuffmanCode {
|
||||
HuffmanCode::new(self.codes[value], self.code_lengths[value])
|
||||
}
|
||||
|
||||
/// Get the Huffman code from the corresponding literal value
|
||||
#[inline]
|
||||
pub fn get_literal(&self, value: u8) -> HuffmanCode {
|
||||
let index = usize::from(value);
|
||||
HuffmanCode::new(self.codes[index], self.code_lengths[index])
|
||||
}
|
||||
|
||||
/// Get the Huffman code for the end of block value
|
||||
#[inline]
|
||||
pub fn get_end_of_block(&self) -> HuffmanCode {
|
||||
self.get_ll_huff(END_OF_BLOCK_POSITION)
|
||||
}
|
||||
|
||||
/// Get the Huffman code and extra bits for the specified length
|
||||
#[inline]
|
||||
pub fn get_length_huffman(&self, length: StoredLength) -> (HuffmanCode, HuffmanCode) {
|
||||
let length_data = get_length_code_and_extra_bits(length);
|
||||
|
||||
let length_huffman_code = self.get_ll_huff(length_data.code_number as usize);
|
||||
|
||||
(
|
||||
length_huffman_code,
|
||||
HuffmanCode {
|
||||
code: length_data.value,
|
||||
length: length_data.num_bits,
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
/// Get the Huffman code and extra bits for the specified distance
|
||||
///
|
||||
/// Returns None if distance is 0 or above 32768
|
||||
#[inline]
|
||||
pub fn get_distance_huffman(&self, distance: u16) -> (HuffmanCode, HuffmanCode) {
|
||||
debug_assert!(distance >= MIN_DISTANCE && distance <= MAX_DISTANCE);
|
||||
|
||||
let distance_data = get_distance_code_and_extra_bits(distance);
|
||||
|
||||
let distance_huffman_code = self.distance_codes[distance_data.code_number as usize];
|
||||
let distance_huffman_length =
|
||||
self.distance_code_lengths[distance_data.code_number as usize];
|
||||
|
||||
(
|
||||
HuffmanCode {
|
||||
code: distance_huffman_code,
|
||||
length: distance_huffman_length,
|
||||
},
|
||||
HuffmanCode {
|
||||
code: distance_data.value,
|
||||
length: distance_data.num_bits,
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub fn get_length_distance_code(&self, length: u16, distance: u16) -> LengthAndDistanceBits {
|
||||
assert!(length >= MIN_MATCH && length < MAX_DISTANCE);
|
||||
let l_codes = self.get_length_huffman(StoredLength::from_actual_length(length));
|
||||
let d_codes = self.get_distance_huffman(distance);
|
||||
LengthAndDistanceBits {
|
||||
length_code: l_codes.0,
|
||||
length_extra_bits: l_codes.1,
|
||||
distance_code: d_codes.0,
|
||||
distance_extra_bits: d_codes.1,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use super::{
|
||||
build_length_count_table, get_distance_code_and_extra_bits, get_length_code_and_extra_bits,
|
||||
};
|
||||
|
||||
use crate::lzvalue::StoredLength;
|
||||
|
||||
fn l(length: u16) -> StoredLength {
|
||||
StoredLength::from_actual_length(length)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_get_length_code() {
|
||||
let extra_bits = get_length_code_and_extra_bits(l(4));
|
||||
assert_eq!(extra_bits.code_number, 258);
|
||||
assert_eq!(extra_bits.num_bits, 0);
|
||||
assert_eq!(extra_bits.value, 0);
|
||||
|
||||
let extra_bits = get_length_code_and_extra_bits(l(165));
|
||||
assert_eq!(extra_bits.code_number, 282);
|
||||
assert_eq!(extra_bits.num_bits, 5);
|
||||
assert_eq!(extra_bits.value, 2);
|
||||
|
||||
let extra_bits = get_length_code_and_extra_bits(l(257));
|
||||
assert_eq!(extra_bits.code_number, 284);
|
||||
assert_eq!(extra_bits.num_bits, 5);
|
||||
assert_eq!(extra_bits.value, 30);
|
||||
|
||||
let extra_bits = get_length_code_and_extra_bits(l(258));
|
||||
assert_eq!(extra_bits.code_number, 285);
|
||||
assert_eq!(extra_bits.num_bits, 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_distance_code() {
|
||||
assert_eq!(get_distance_code(1), 0);
|
||||
// Using 0 for None at the moment
|
||||
assert_eq!(get_distance_code(0), 0);
|
||||
assert_eq!(get_distance_code(50000), 0);
|
||||
assert_eq!(get_distance_code(6146), 25);
|
||||
assert_eq!(get_distance_code(256), 15);
|
||||
assert_eq!(get_distance_code(4733), 24);
|
||||
assert_eq!(get_distance_code(257), 16);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_distance_extra_bits() {
|
||||
let extra = get_distance_code_and_extra_bits(527);
|
||||
assert_eq!(extra.value, 0b1110);
|
||||
assert_eq!(extra.code_number, 18);
|
||||
assert_eq!(extra.num_bits, 8);
|
||||
let extra = get_distance_code_and_extra_bits(256);
|
||||
assert_eq!(extra.code_number, 15);
|
||||
assert_eq!(extra.num_bits, 6);
|
||||
let extra = get_distance_code_and_extra_bits(4733);
|
||||
assert_eq!(extra.code_number, 24);
|
||||
assert_eq!(extra.num_bits, 11);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_length_table_fixed() {
|
||||
let _ = build_length_count_table(&FIXED_CODE_LENGTHS, &mut [0; 16]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic]
|
||||
fn test_length_table_max_length() {
|
||||
let table = [16u8; 288];
|
||||
build_length_count_table(&table, &mut [0; 16]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic]
|
||||
fn test_empty_table() {
|
||||
let table = [];
|
||||
build_length_count_table(&table, &mut [0; 16]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn make_table_fixed() {
|
||||
let table = HuffmanTable::fixed_table();
|
||||
assert_eq!(table.codes[0], 0b00001100);
|
||||
assert_eq!(table.codes[143], 0b11111101);
|
||||
assert_eq!(table.codes[144], 0b000010011);
|
||||
assert_eq!(table.codes[255], 0b111111111);
|
||||
assert_eq!(table.codes[256], 0b0000000);
|
||||
assert_eq!(table.codes[279], 0b1110100);
|
||||
assert_eq!(table.codes[280], 0b00000011);
|
||||
assert_eq!(table.codes[287], 0b11100011);
|
||||
|
||||
assert_eq!(table.distance_codes[0], 0);
|
||||
assert_eq!(table.distance_codes[5], 20);
|
||||
|
||||
let ld = table.get_length_distance_code(4, 5);
|
||||
|
||||
assert_eq!(ld.length_code.code, 0b00100000);
|
||||
assert_eq!(ld.distance_code.code, 0b00100);
|
||||
assert_eq!(ld.distance_extra_bits.length, 1);
|
||||
assert_eq!(ld.distance_extra_bits.code, 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn extra_bits_distance() {
|
||||
use std::mem::size_of;
|
||||
for i in 0..NUM_DISTANCE_CODES {
|
||||
assert_eq!(
|
||||
num_extra_bits_for_distance_code(i as u8),
|
||||
DISTANCE_EXTRA_BITS[i]
|
||||
);
|
||||
}
|
||||
println!("Size of huffmanCode struct: {}", size_of::<HuffmanCode>());
|
||||
}
|
||||
}
|
|
@ -0,0 +1,147 @@
|
|||
use std::cmp;
|
||||
|
||||
use crate::chained_hash_table::WINDOW_SIZE;
|
||||
|
||||
const MAX_MATCH: usize = crate::huffman_table::MAX_MATCH as usize;
|
||||
|
||||
/// The maximum size of the buffer.
|
||||
pub const BUFFER_SIZE: usize = (WINDOW_SIZE * 2) + MAX_MATCH;
|
||||
|
||||
pub struct InputBuffer {
|
||||
buffer: Vec<u8>,
|
||||
}
|
||||
|
||||
impl InputBuffer {
|
||||
#[cfg(test)]
|
||||
pub fn new<'a>(data: &'a [u8]) -> (InputBuffer, Option<&[u8]>) {
|
||||
let mut b = InputBuffer::empty();
|
||||
let rem = b.add_data(data);
|
||||
(b, rem)
|
||||
}
|
||||
|
||||
pub fn empty() -> InputBuffer {
|
||||
InputBuffer {
|
||||
buffer: Vec::with_capacity(BUFFER_SIZE),
|
||||
}
|
||||
}
|
||||
|
||||
/// Add data to the buffer.
|
||||
///
|
||||
/// Returns a slice of the data that was not added (including the lookahead if any).
|
||||
pub fn add_data<'a>(&mut self, data: &'a [u8]) -> Option<&'a [u8]> {
|
||||
debug_assert!(self.current_end() <= BUFFER_SIZE);
|
||||
if self.current_end() + data.len() > BUFFER_SIZE {
|
||||
// Add data and return how much was left.
|
||||
let consumed = {
|
||||
let space_left = BUFFER_SIZE - self.buffer.len();
|
||||
self.buffer.extend_from_slice(&data[..space_left]);
|
||||
space_left
|
||||
};
|
||||
Some(&data[consumed..])
|
||||
} else {
|
||||
// There's space for all of the data.
|
||||
self.buffer.extend_from_slice(data);
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the current amount of data in the buffer.
|
||||
pub fn current_end(&self) -> usize {
|
||||
self.buffer.len()
|
||||
}
|
||||
|
||||
/// Slide the input window and add new data.
|
||||
///
|
||||
/// Returns a slice containing the data that did not fit, or `None` if all data was consumed.
|
||||
pub fn slide<'a>(&mut self, data: &'a [u8]) -> Option<&'a [u8]> {
|
||||
// This should only be used when the buffer is full
|
||||
assert!(self.buffer.len() > WINDOW_SIZE * 2);
|
||||
|
||||
// Do this in a closure to to end the borrow of buffer.
|
||||
let (final_len, upper_len, end) = {
|
||||
// Split into lower window and upper window + lookahead
|
||||
let (lower, upper) = self.buffer.split_at_mut(WINDOW_SIZE);
|
||||
// Copy the upper window to the lower window
|
||||
lower.copy_from_slice(&upper[..WINDOW_SIZE]);
|
||||
let lookahead_len = {
|
||||
// Copy the lookahead to the start of the upper window
|
||||
let (upper_2, lookahead) = upper.split_at_mut(WINDOW_SIZE);
|
||||
let lookahead_len = lookahead.len();
|
||||
debug_assert!(lookahead_len <= MAX_MATCH);
|
||||
upper_2[..lookahead_len].copy_from_slice(lookahead);
|
||||
lookahead_len
|
||||
};
|
||||
|
||||
// Length of the upper window minus the lookahead bytes
|
||||
let upper_len = upper.len() - lookahead_len;
|
||||
let end = cmp::min(data.len(), upper_len);
|
||||
upper[lookahead_len..lookahead_len + end].copy_from_slice(&data[..end]);
|
||||
// Remove unused data if any.
|
||||
(lower.len() + lookahead_len + end, upper_len, end)
|
||||
};
|
||||
// Remove unused space.
|
||||
self.buffer.truncate(final_len);
|
||||
|
||||
if data.len() > upper_len {
|
||||
// Return a slice of the data that was not added
|
||||
Some(&data[end..])
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
/// Get a mutable slice of the used part of the buffer.
|
||||
pub fn get_buffer(&mut self) -> &mut [u8] {
|
||||
&mut self.buffer
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::MAX_MATCH;
|
||||
use super::*;
|
||||
use crate::chained_hash_table::WINDOW_SIZE;
|
||||
#[test]
|
||||
pub fn buffer_add_full() {
|
||||
let data = [10u8; BUFFER_SIZE + 10];
|
||||
let (mut buf, extra) = InputBuffer::new(&data[..]);
|
||||
assert!(extra.unwrap() == &[10; 10]);
|
||||
let to_add = [2, 5, 3];
|
||||
let not_added = buf.add_data(&to_add);
|
||||
assert_eq!(not_added.unwrap(), to_add);
|
||||
}
|
||||
|
||||
#[test]
|
||||
pub fn buffer_add_not_full() {
|
||||
let data = [10u8; BUFFER_SIZE - 5];
|
||||
let (mut buf, extra) = InputBuffer::new(&data[..]);
|
||||
assert_eq!(buf.current_end(), data.len());
|
||||
assert_eq!(extra, None);
|
||||
let to_add = [2, 5, 3];
|
||||
{
|
||||
let not_added = buf.add_data(&to_add);
|
||||
assert!(not_added.is_none());
|
||||
}
|
||||
let not_added = buf.add_data(&to_add);
|
||||
assert_eq!(not_added.unwrap()[0], 3);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn slide() {
|
||||
let data = [10u8; BUFFER_SIZE];
|
||||
let (mut buf, extra) = InputBuffer::new(&data[..]);
|
||||
assert_eq!(extra, None);
|
||||
let to_add = [5; 5];
|
||||
let rem = buf.slide(&to_add);
|
||||
assert!(rem.is_none());
|
||||
{
|
||||
let slice = buf.get_buffer();
|
||||
assert!(slice[..WINDOW_SIZE + MAX_MATCH] == data[WINDOW_SIZE..]);
|
||||
assert_eq!(
|
||||
slice[WINDOW_SIZE + MAX_MATCH..WINDOW_SIZE + MAX_MATCH + 5],
|
||||
to_add
|
||||
);
|
||||
}
|
||||
assert_eq!(buf.current_end(), WINDOW_SIZE + MAX_MATCH + to_add.len());
|
||||
}
|
||||
}
|
|
@ -0,0 +1,657 @@
|
|||
use std::clone::Clone;
|
||||
use std::iter::Iterator;
|
||||
|
||||
/// An enum representing the different types in the run-length encoded data used to encode
|
||||
/// Huffman table lengths
|
||||
#[derive(Debug, PartialEq, Eq)]
|
||||
pub enum EncodedLength {
|
||||
// An actual length value
|
||||
Length(u8),
|
||||
// Copy the previous value n times
|
||||
CopyPrevious(u8),
|
||||
// Repeat zero n times (with n represented by 3 bits)
|
||||
RepeatZero3Bits(u8),
|
||||
// Repeat zero n times (with n represented by 7 bits)
|
||||
RepeatZero7Bits(u8),
|
||||
}
|
||||
|
||||
impl EncodedLength {
|
||||
fn from_prev_and_repeat(prev: u8, repeat: u8) -> EncodedLength {
|
||||
match prev {
|
||||
0 => {
|
||||
if repeat <= 10 {
|
||||
EncodedLength::RepeatZero3Bits(repeat)
|
||||
} else {
|
||||
EncodedLength::RepeatZero7Bits(repeat)
|
||||
}
|
||||
}
|
||||
1..=15 => EncodedLength::CopyPrevious(repeat),
|
||||
_ => panic!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub const COPY_PREVIOUS: usize = 16;
|
||||
pub const REPEAT_ZERO_3_BITS: usize = 17;
|
||||
pub const REPEAT_ZERO_7_BITS: usize = 18;
|
||||
|
||||
const MIN_REPEAT: u8 = 3;
|
||||
|
||||
/// Push an `EncodedLength` to the vector and update the frequency table.
|
||||
fn update_out_and_freq(
|
||||
encoded: EncodedLength,
|
||||
output: &mut Vec<EncodedLength>,
|
||||
frequencies: &mut [u16; 19],
|
||||
) {
|
||||
let index = match encoded {
|
||||
EncodedLength::Length(l) => usize::from(l),
|
||||
EncodedLength::CopyPrevious(_) => COPY_PREVIOUS,
|
||||
EncodedLength::RepeatZero3Bits(_) => REPEAT_ZERO_3_BITS,
|
||||
EncodedLength::RepeatZero7Bits(_) => REPEAT_ZERO_7_BITS,
|
||||
};
|
||||
|
||||
frequencies[index] += 1;
|
||||
|
||||
output.push(encoded);
|
||||
}
|
||||
|
||||
/// Convenience function to check if the repeat counter should be incremented further
|
||||
fn not_max_repetitions(length_value: u8, repeats: u8) -> bool {
|
||||
(length_value == 0 && repeats < 138) || repeats < 6
|
||||
}
|
||||
|
||||
///Convenience version for unit tests.
|
||||
#[cfg(test)]
|
||||
pub fn encode_lengths<'a, I>(lengths: I) -> (Vec<EncodedLength>, [u16; 19])
|
||||
where
|
||||
I: Iterator<Item = &'a u8> + Clone,
|
||||
{
|
||||
let mut freqs = [0u16; 19];
|
||||
let mut encoded: Vec<EncodedLength> = Vec::new();
|
||||
encode_lengths_m(lengths, &mut encoded, &mut freqs);
|
||||
(encoded, freqs)
|
||||
}
|
||||
|
||||
/// Run-length encodes the lengths of the values in `lengths` according to the deflate
|
||||
/// specification. This is used for writing the code lengths for the Huffman tables for
|
||||
/// the deflate stream.
|
||||
///
|
||||
/// Populates the supplied array with the frequency of the different encoded length values
|
||||
/// The frequency array is taken as a parameter rather than returned to avoid
|
||||
/// excessive `memcpy`-ing.
|
||||
pub fn encode_lengths_m<'a, I>(
|
||||
lengths: I,
|
||||
mut out: &mut Vec<EncodedLength>,
|
||||
mut frequencies: &mut [u16; 19],
|
||||
) where
|
||||
I: Iterator<Item = &'a u8> + Clone,
|
||||
{
|
||||
out.clear();
|
||||
// Number of repetitions of the current value
|
||||
let mut repeat = 0;
|
||||
let mut iter = lengths.clone().enumerate().peekable();
|
||||
// Previous value
|
||||
// We set it to the compliment of the first falue to simplify the code.
|
||||
let mut prev = !iter.peek().expect("No length values!").1;
|
||||
|
||||
while let Some((n, &l)) = iter.next() {
|
||||
if l == prev && not_max_repetitions(l, repeat) {
|
||||
repeat += 1;
|
||||
}
|
||||
if l != prev || iter.peek().is_none() || !not_max_repetitions(l, repeat) {
|
||||
if repeat >= MIN_REPEAT {
|
||||
// The previous value has been repeated enough times to write out a repeat code.
|
||||
|
||||
let val = EncodedLength::from_prev_and_repeat(prev, repeat);
|
||||
update_out_and_freq(val, &mut out, &mut frequencies);
|
||||
repeat = 0;
|
||||
// If we have a new length value, output l unless the last value is 0 or l is the
|
||||
// last byte.
|
||||
if l != prev {
|
||||
if l != 0 || iter.peek().is_none() {
|
||||
update_out_and_freq(EncodedLength::Length(l), &mut out, &mut frequencies);
|
||||
repeat = 0;
|
||||
} else {
|
||||
// If we have a zero, we start repeat at one instead of outputting, as
|
||||
// there are separate codes for repeats of zero so we don't need a literal
|
||||
// to define what byte to repeat.
|
||||
repeat = 1;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// There haven't been enough repetitions of the previous value,
|
||||
// so just we output the lengths directly.
|
||||
|
||||
// If we are at the end, and we have a value that is repeated, we need to
|
||||
// skip a byte and output the last one.
|
||||
let extra_skip = if iter.peek().is_none() && l == prev {
|
||||
1
|
||||
} else {
|
||||
0
|
||||
};
|
||||
|
||||
// Get to the position of the next byte to output by starting at zero and skipping.
|
||||
let b_iter = lengths.clone().skip(n + extra_skip - repeat as usize);
|
||||
|
||||
// As repeats of zeroes have separate codes, we don't need to output a literal here
|
||||
// if we have a zero (unless we are at the end).
|
||||
let extra = if l != 0 || iter.peek().is_none() {
|
||||
1
|
||||
} else {
|
||||
0
|
||||
};
|
||||
|
||||
for &i in b_iter.take(repeat as usize + extra) {
|
||||
update_out_and_freq(EncodedLength::Length(i), &mut out, &mut frequencies);
|
||||
}
|
||||
|
||||
// If the current byte is zero we start repeat at 1 as we didn't output the literal
|
||||
// directly.
|
||||
repeat = 1 - extra as u8;
|
||||
}
|
||||
}
|
||||
prev = l;
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub fn huffman_lengths_from_frequency(frequencies: &[u16], max_len: usize) -> Vec<u8> {
|
||||
in_place::gen_lengths(frequencies, max_len)
|
||||
}
|
||||
|
||||
pub type LeafVec = Vec<in_place::Node>;
|
||||
|
||||
/// Generate a set of canonical huffman lengths from the given frequencies, with a maximum length
|
||||
/// of `max_len`. The lengths are put in the lens slice parameter. Unused lengths are set to 0.
|
||||
///
|
||||
/// The leaf buffer is passed in to avoid allocating it every time this function is called.
|
||||
/// The existing data contained in it is not preserved.
|
||||
pub fn huffman_lengths_from_frequency_m(
|
||||
frequencies: &[u16],
|
||||
max_len: usize,
|
||||
leaf_buffer: &mut LeafVec,
|
||||
lens: &mut [u8],
|
||||
) {
|
||||
in_place::in_place_lengths(frequencies, max_len, leaf_buffer, lens);
|
||||
}
|
||||
|
||||
mod in_place {
|
||||
type WeightType = u32;
|
||||
|
||||
pub fn validate_lengths(lengths: &[u8]) -> bool {
|
||||
// Avoid issue with floating point on mips: https://github.com/image-rs/deflate-rs/issues/23
|
||||
if cfg!(any(
|
||||
target_arch = "mips",
|
||||
target_arch = "mipsel",
|
||||
target_arch = "mips64",
|
||||
target_arch = "mipsel64"
|
||||
)) {
|
||||
true
|
||||
} else {
|
||||
let v = lengths.iter().fold(0f64, |acc, &n| {
|
||||
acc + if n != 0 {
|
||||
2f64.powi(-(i32::from(n)))
|
||||
} else {
|
||||
0f64
|
||||
}
|
||||
});
|
||||
|
||||
match v.partial_cmp(&1.0) {
|
||||
Some(std::cmp::Ordering::Greater) => false,
|
||||
_ => true,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Eq, PartialEq, Debug)]
|
||||
pub struct Node {
|
||||
value: WeightType,
|
||||
symbol: u16,
|
||||
}
|
||||
|
||||
fn step_1(leaves: &mut [Node]) {
|
||||
// If there are less than 2 non-zero frequencies, this function should not have been
|
||||
// called and we should not have gotten to this point.
|
||||
debug_assert!(leaves.len() >= 2);
|
||||
let mut root = 0;
|
||||
let mut leaf = 2;
|
||||
|
||||
leaves[0].value += leaves[1].value;
|
||||
|
||||
for next in 1..leaves.len() - 1 {
|
||||
if (leaf >= leaves.len()) || (leaves[root].value < leaves[leaf].value) {
|
||||
leaves[next].value = leaves[root].value;
|
||||
leaves[root].value = next as WeightType;
|
||||
root += 1;
|
||||
} else {
|
||||
leaves[next].value = leaves[leaf].value;
|
||||
leaf += 1;
|
||||
}
|
||||
|
||||
if (leaf >= leaves.len()) || (root < next && (leaves[root].value < leaves[leaf].value))
|
||||
{
|
||||
leaves[next].value += leaves[root].value;
|
||||
leaves[root].value = next as WeightType;
|
||||
root += 1;
|
||||
} else {
|
||||
leaves[next].value += leaves[leaf].value;
|
||||
leaf += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn step_2(leaves: &mut [Node]) {
|
||||
debug_assert!(leaves.len() >= 2);
|
||||
let n = leaves.len();
|
||||
|
||||
leaves[n - 2].value = 0;
|
||||
for t in (0..(n + 1 - 3)).rev() {
|
||||
leaves[t].value = leaves[leaves[t].value as usize].value + 1;
|
||||
}
|
||||
|
||||
let mut available = 1 as usize;
|
||||
let mut used = 0;
|
||||
let mut depth = 0;
|
||||
let mut root = n as isize - 2;
|
||||
let mut next = n as isize - 1;
|
||||
|
||||
while available > 0 {
|
||||
while root >= 0 && leaves[root as usize].value == depth {
|
||||
used += 1;
|
||||
root -= 1;
|
||||
}
|
||||
while available > used {
|
||||
leaves[next as usize].value = depth;
|
||||
next -= 1;
|
||||
available -= 1;
|
||||
}
|
||||
available = 2 * used;
|
||||
depth += 1;
|
||||
used = 0;
|
||||
}
|
||||
}
|
||||
|
||||
const MAX_NUMBER_OF_CODES: usize = 32;
|
||||
const NUM_CODES_LENGTH: usize = MAX_NUMBER_OF_CODES + 1;
|
||||
|
||||
/// Checks if any of the lengths exceed `max_len`, and if that is the case, alters the length
|
||||
/// table so that no codes exceed `max_len`.
|
||||
/// This is ported from miniz (which is released as public domain by Rich Geldreich
|
||||
/// https://github.com/richgel999/miniz/blob/master/miniz.c)
|
||||
///
|
||||
/// This will not generate optimal (minimim-redundancy) codes, however in most cases
|
||||
/// this won't make a large difference.
|
||||
pub fn enforce_max_code_lengths(
|
||||
num_codes: &mut [u16; NUM_CODES_LENGTH],
|
||||
num_used: usize,
|
||||
max_len: usize,
|
||||
) {
|
||||
debug_assert!(max_len <= 15);
|
||||
|
||||
if num_used <= 1 {
|
||||
return;
|
||||
} else {
|
||||
let mut num_above_max = 0u16;
|
||||
for &l in num_codes[(max_len as usize + 1)..].iter() {
|
||||
num_above_max += l;
|
||||
}
|
||||
|
||||
num_codes[max_len] += num_above_max;
|
||||
|
||||
let mut total = 0u32;
|
||||
for i in (1..=max_len).rev() {
|
||||
// This should be safe as max_len won't be higher than 15, and num_codes[i] can't
|
||||
// be higher than 288,
|
||||
// and 288 << 15 will not be anywhere close to overflowing 32 bits
|
||||
total += (u32::from(num_codes[i])) << (max_len - i);
|
||||
}
|
||||
|
||||
// miniz uses unsigned long here. 32-bits should be sufficient though,
|
||||
// as max_len won't be longer than 15 anyhow.
|
||||
while total != 1u32 << max_len {
|
||||
num_codes[max_len] -= 1;
|
||||
for i in (1..max_len).rev() {
|
||||
if num_codes[i] != 0 {
|
||||
num_codes[i] -= 1;
|
||||
num_codes[i + 1] += 2;
|
||||
break;
|
||||
}
|
||||
}
|
||||
total -= 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
/// Convenience wrapper for tests.
|
||||
pub fn gen_lengths(frequencies: &[u16], max_len: usize) -> Vec<u8> {
|
||||
let mut lens = vec![0u8; frequencies.len()];
|
||||
let mut leaves = Vec::new();
|
||||
in_place_lengths(frequencies, max_len, &mut leaves, lens.as_mut_slice());
|
||||
lens
|
||||
}
|
||||
|
||||
/// Generate huffman code lengths, using the algorithm described by
|
||||
/// Moffat and Katajainen in In-Place Calculation of Minimum-Redundancy Codes
|
||||
/// http://people.eng.unimelb.edu.au/ammoffat/abstracts/mk95wads.html
|
||||
/// and it's implementation.
|
||||
///
|
||||
/// This is significantly faster, and seems to generally create lengths that result in length
|
||||
/// tables that are better compressible than the algorithm used previously. The downside of this
|
||||
/// algorithm is that it's not length-limited, so if too long code lengths are generated,
|
||||
/// it might result in a sub-optimal tables as the length-restricting function isn't optimal.
|
||||
pub fn in_place_lengths(
|
||||
frequencies: &[u16],
|
||||
max_len: usize,
|
||||
mut leaves: &mut Vec<Node>,
|
||||
lengths: &mut [u8],
|
||||
) {
|
||||
debug_assert!(lengths.len() >= frequencies.len());
|
||||
|
||||
for l in lengths.iter_mut() {
|
||||
*l = 0;
|
||||
}
|
||||
|
||||
// Clear any previous leaves in the leaf buffer.
|
||||
leaves.clear();
|
||||
|
||||
// Discard zero length nodes as they won't be given a code and thus don't need to
|
||||
// participate in code length generation and create a new vec of the remaining
|
||||
// symbols and weights.
|
||||
leaves.extend(frequencies.iter().enumerate().filter_map(|(n, f)| {
|
||||
if *f > 0 {
|
||||
Some(Node {
|
||||
value: u32::from(*f),
|
||||
symbol: n as u16,
|
||||
})
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}));
|
||||
|
||||
// Special cases with zero or 1 value having a non-zero frequency
|
||||
if leaves.len() == 1 {
|
||||
lengths[leaves[0].symbol as usize] = 1;
|
||||
return;
|
||||
} else if leaves.is_empty() {
|
||||
return;
|
||||
}
|
||||
|
||||
// Sort the leaves by value. As the sort in the standard library is stable, we don't
|
||||
// have to worry about the symbol code here.
|
||||
leaves.sort_by(|a, b| a.value.cmp(&b.value));
|
||||
|
||||
step_1(&mut leaves);
|
||||
step_2(&mut leaves);
|
||||
|
||||
// Count how many codes of each length used, for usage in the next section.
|
||||
let mut num_codes = [0u16; NUM_CODES_LENGTH];
|
||||
for l in leaves.iter() {
|
||||
num_codes[l.value as usize] += 1;
|
||||
}
|
||||
|
||||
// As the algorithm used here doesn't limit the maximum length that can be generated
|
||||
// we need to make sure none of the lengths exceed `max_len`
|
||||
enforce_max_code_lengths(&mut num_codes, leaves.len(), max_len);
|
||||
|
||||
// Output the actual lengths
|
||||
let mut leaf_it = leaves.iter().rev();
|
||||
// Start at 1 since the length table is already filled with zeroes.
|
||||
for (&n_codes, i) in num_codes[1..=max_len].iter().zip(1..=(max_len as u8)) {
|
||||
for _ in 0..n_codes {
|
||||
lengths[leaf_it.next().unwrap().symbol as usize] = i;
|
||||
}
|
||||
}
|
||||
|
||||
debug_assert_eq!(leaf_it.next(), None);
|
||||
debug_assert!(
|
||||
validate_lengths(lengths),
|
||||
"The generated length codes were not valid!"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use crate::huffman_table::NUM_LITERALS_AND_LENGTHS;
|
||||
use std::u16;
|
||||
|
||||
fn lit(value: u8) -> EncodedLength {
|
||||
EncodedLength::Length(value)
|
||||
}
|
||||
|
||||
fn zero(repeats: u8) -> EncodedLength {
|
||||
match repeats {
|
||||
0..=1 => EncodedLength::Length(0),
|
||||
2..=10 => EncodedLength::RepeatZero3Bits(repeats),
|
||||
_ => EncodedLength::RepeatZero7Bits(repeats),
|
||||
}
|
||||
}
|
||||
|
||||
fn copy(copies: u8) -> EncodedLength {
|
||||
EncodedLength::CopyPrevious(copies)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_encode_lengths() {
|
||||
use crate::huffman_table::FIXED_CODE_LENGTHS;
|
||||
let enc = encode_lengths(FIXED_CODE_LENGTHS.iter());
|
||||
// There are no lengths lower than 6 in the fixed table
|
||||
assert_eq!(enc.1[0..7], [0, 0, 0, 0, 0, 0, 0]);
|
||||
// Neither are there any lengths above 9
|
||||
assert_eq!(enc.1[10..16], [0, 0, 0, 0, 0, 0]);
|
||||
// Also there are no zero-length codes so there shouldn't be any repetitions of zero
|
||||
assert_eq!(enc.1[17..19], [0, 0]);
|
||||
|
||||
let test_lengths = [0, 0, 5, 0, 15, 1, 0, 0, 0, 2, 4, 4, 4, 4, 3, 5, 5, 5, 5];
|
||||
let enc = encode_lengths(test_lengths.iter()).0;
|
||||
assert_eq!(
|
||||
enc,
|
||||
vec![
|
||||
lit(0),
|
||||
lit(0),
|
||||
lit(5),
|
||||
lit(0),
|
||||
lit(15),
|
||||
lit(1),
|
||||
zero(3),
|
||||
lit(2),
|
||||
lit(4),
|
||||
copy(3),
|
||||
lit(3),
|
||||
lit(5),
|
||||
copy(3),
|
||||
]
|
||||
);
|
||||
let test_lengths = [0, 0, 0, 5, 2, 3, 0, 0, 0];
|
||||
let enc = encode_lengths(test_lengths.iter()).0;
|
||||
assert_eq!(enc, vec![zero(3), lit(5), lit(2), lit(3), zero(3)]);
|
||||
|
||||
let test_lengths = [0, 0, 0, 3, 3, 3, 5, 4, 4, 4, 4, 0, 0];
|
||||
let enc = encode_lengths(test_lengths.iter()).0;
|
||||
assert_eq!(
|
||||
enc,
|
||||
vec![
|
||||
zero(3),
|
||||
lit(3),
|
||||
lit(3),
|
||||
lit(3),
|
||||
lit(5),
|
||||
lit(4),
|
||||
copy(3),
|
||||
lit(0),
|
||||
lit(0),
|
||||
]
|
||||
);
|
||||
|
||||
let lens = [
|
||||
0, 0, 4, 0, 0, 4, 0, 0, 0, 0, 0, 4, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0,
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1,
|
||||
];
|
||||
|
||||
let _ = encode_lengths(lens.iter()).0;
|
||||
|
||||
let lens = [
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 9, 0, 0, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 6, 0, 0, 0, 8, 0, 0, 0, 0, 8, 0, 0, 7, 8, 7, 8, 6, 6, 8, 0, 7, 6, 7, 8, 7, 7,
|
||||
8, 0, 0, 0, 0, 0, 8, 8, 0, 8, 7, 0, 10, 8, 0, 8, 0, 10, 10, 8, 8, 10, 8, 0, 8, 7, 0,
|
||||
10, 0, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, 7, 7, 7, 6, 7, 8, 8, 6, 0, 0, 8, 8, 7, 8, 8, 0,
|
||||
7, 6, 6, 8, 8, 8, 10, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10, 4,
|
||||
3, 3, 4, 4, 5, 5, 5, 5, 5, 8, 8, 6, 7, 8, 10, 10, 0, 9, /* litlen */
|
||||
0, 0, 0, 0, 0, 0, 0, 8, 8, 8, 8, 6, 6, 5, 5, 5, 5, 6, 5, 5, 4, 4, 4, 4, 4, 4, 3, 4, 3,
|
||||
4,
|
||||
];
|
||||
|
||||
let enc = encode_lengths(lens.iter()).0;
|
||||
|
||||
assert_eq!(
|
||||
&enc[..10],
|
||||
&[
|
||||
zero(10),
|
||||
lit(9),
|
||||
lit(0),
|
||||
lit(0),
|
||||
lit(9),
|
||||
zero(18),
|
||||
lit(6),
|
||||
zero(3),
|
||||
lit(8),
|
||||
zero(4)
|
||||
]
|
||||
);
|
||||
assert_eq!(
|
||||
&enc[10..20],
|
||||
&[
|
||||
lit(8),
|
||||
lit(0),
|
||||
lit(0),
|
||||
lit(7),
|
||||
lit(8),
|
||||
lit(7),
|
||||
lit(8),
|
||||
lit(6),
|
||||
lit(6),
|
||||
lit(8)
|
||||
]
|
||||
);
|
||||
|
||||
let enc = encode_lengths([1, 1, 1, 2].iter()).0;
|
||||
assert_eq!(enc, vec![lit(1), lit(1), lit(1), lit(2)]);
|
||||
let enc = encode_lengths([0, 0, 3].iter()).0;
|
||||
assert_eq!(enc, vec![lit(0), lit(0), lit(3)]);
|
||||
let enc = encode_lengths([0, 0, 0, 5, 2].iter()).0;
|
||||
assert_eq!(enc, vec![zero(3), lit(5), lit(2)]);
|
||||
|
||||
let enc = encode_lengths([0, 0, 0, 5, 0].iter()).0;
|
||||
assert!(*enc.last().unwrap() != lit(5));
|
||||
|
||||
let enc = encode_lengths([0, 4, 4, 4, 4, 0].iter()).0;
|
||||
assert_eq!(*enc.last().unwrap(), zero(0));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_lengths_from_frequencies() {
|
||||
let frequencies = [1, 1, 5, 7, 10, 14];
|
||||
|
||||
let expected = [4, 4, 3, 2, 2, 2];
|
||||
let res = huffman_lengths_from_frequency(&frequencies, 4);
|
||||
|
||||
assert_eq!(expected, res.as_slice());
|
||||
|
||||
let frequencies = [1, 5, 1, 7, 10, 14];
|
||||
let expected = [4, 3, 4, 2, 2, 2];
|
||||
|
||||
let res = huffman_lengths_from_frequency(&frequencies, 4);
|
||||
|
||||
assert_eq!(expected, res.as_slice());
|
||||
|
||||
let frequencies = [0, 25, 0, 10, 2, 4];
|
||||
|
||||
let res = huffman_lengths_from_frequency(&frequencies, 4);
|
||||
assert_eq!(res[0], 0);
|
||||
assert_eq!(res[2], 0);
|
||||
assert!(res[1] < 4);
|
||||
|
||||
// Only one value
|
||||
let frequencies = [0, 0, 0, 0, 0, 0, 0, 0, 55, 0, 0, 0];
|
||||
let res = huffman_lengths_from_frequency(&frequencies, 5);
|
||||
let expected = [0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0];
|
||||
assert_eq!(expected, res.as_slice());
|
||||
|
||||
// No values
|
||||
let frequencies = [0; 30];
|
||||
let res = huffman_lengths_from_frequency(&frequencies, 5);
|
||||
for (a, b) in frequencies.iter().zip(res.iter()) {
|
||||
assert_eq!(*a, (*b).into());
|
||||
}
|
||||
// assert_eq!(frequencies, res.as_slice());
|
||||
|
||||
let mut frequencies = vec![3; NUM_LITERALS_AND_LENGTHS];
|
||||
frequencies[55] = u16::MAX / 3;
|
||||
frequencies[125] = u16::MAX / 3;
|
||||
|
||||
let res = huffman_lengths_from_frequency(&frequencies, 15);
|
||||
assert_eq!(res.len(), NUM_LITERALS_AND_LENGTHS);
|
||||
assert!(res[55] < 3);
|
||||
assert!(res[125] < 3);
|
||||
}
|
||||
|
||||
#[test]
|
||||
/// Test if the bit lengths for a set of frequencies are optimal (give the best compression
|
||||
/// give the provided frequencies).
|
||||
fn optimal_lengths() {
|
||||
let freqs = [
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 44, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 68, 0, 14, 0, 0, 0, 0, 3, 7, 6, 1, 0, 12, 14, 9, 2, 6, 9, 4, 1, 1, 4, 1, 1, 0,
|
||||
0, 1, 3, 0, 6, 0, 0, 0, 4, 4, 1, 2, 5, 3, 2, 2, 9, 0, 0, 3, 1, 5, 5, 8, 0, 6, 10, 5, 2,
|
||||
0, 0, 1, 2, 0, 8, 11, 4, 0, 1, 3, 31, 13, 23, 22, 56, 22, 8, 11, 43, 0, 7, 33, 15, 45,
|
||||
40, 16, 1, 28, 37, 35, 26, 3, 7, 11, 9, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 1, 126, 114, 66, 31, 41, 25, 15, 21, 20, 16, 15, 10, 7, 5, 1, 1,
|
||||
];
|
||||
|
||||
let lens = huffman_lengths_from_frequency(&freqs, 15);
|
||||
|
||||
// Lengths produced by miniz for this frequency table for comparison
|
||||
// the number of total bits encoded with these huffman codes is 7701
|
||||
// NOTE: There can be more than one set of optimal lengths for a set of frequencies,
|
||||
// (though there may be a difference in how well the table itself can be represented)
|
||||
// so testing for a specific length table is not ideal since different algorithms
|
||||
// may produce different length tables.
|
||||
// let lens3 = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
// 0, 0, 0, 0, 0,
|
||||
// 0, 0, 0, 0, 0, 0, 4, 0, 7, 0, 0, 0, 0, 9, 8, 8, 10, 0, 7, 7, 7, 10, 8, 7, 8,
|
||||
// 10, 10, 8, 10, 10, 0, 0, 10, 9, 0, 8, 0, 0, 0, 8, 8, 10, 9, 8, 9, 9, 9, 7, 0,
|
||||
// 0, 9, 10, 8, 8, 7, 0, 8, 7, 8, 9, 0, 0, 10, 9, 0, 7, 7, 8, 0, 10, 9, 6, 7, 6,
|
||||
// 6, 5, 6, 7, 7, 5, 0, 8, 5, 7, 5, 5, 6, 10, 6, 5, 5, 6, 9, 8, 7, 7, 10, 10, 0,
|
||||
// 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
// 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
// 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
// 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
// 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
// 0, 0, 10, 4, 4, 4, 5, 5, 6, 7, 6, 6, 6, 6, 7, 8, 8, 10, 10];
|
||||
//
|
||||
|
||||
let num_bits = lens
|
||||
.iter()
|
||||
.zip(freqs.iter())
|
||||
.fold(0, |a, (&f, &l)| a + (f as u16 * l));
|
||||
assert_eq!(num_bits, 7701);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,491 @@
|
|||
//! An implementation an encoder using [DEFLATE](http://www.gzip.org/zlib/rfc-deflate.html)
|
||||
//! compression algorithm in pure Rust.
|
||||
//!
|
||||
//! This library provides functions to compress data using the DEFLATE algorithm,
|
||||
//! optionally wrapped using the [zlib](https://tools.ietf.org/html/rfc1950) or
|
||||
//! [gzip](http://www.gzip.org/zlib/rfc-gzip.html) formats.
|
||||
//! The current implementation is still a bit lacking speed-wise compared to C-libraries
|
||||
//! like zlib and miniz.
|
||||
//!
|
||||
//! The deflate algorithm is an older compression algorithm that is still widely used today,
|
||||
//! by e.g html headers, the `.png` image format, the Unix `gzip` program and commonly in `.zip`
|
||||
//! files. The `zlib` and `gzip` formats are wrappers around DEFLATE-compressed data, containing
|
||||
//! some extra metadata and a checksum to validate the integrity of the raw data.
|
||||
//!
|
||||
//! The deflate algorithm does not perform as well as newer algorithms used in file formats such as
|
||||
//! `.7z`, `.rar`, `.xz` and `.bz2`, and is thus not the ideal choice for applications where
|
||||
//! the `DEFLATE` format (with or without wrappers) is not required.
|
||||
//!
|
||||
//! Support for the gzip wrapper (the wrapper that is used in `.gz` files) is disabled by default
|
||||
//! but can be enabled with the `gzip` feature.
|
||||
//!
|
||||
//! As this library is still in development, the compression output may change slightly
|
||||
//! between versions.
|
||||
//!
|
||||
//!
|
||||
//! # Examples:
|
||||
//! ## Simple compression function:
|
||||
//! ``` rust
|
||||
//! use deflate::deflate_bytes;
|
||||
//!
|
||||
//! let data = b"Some data";
|
||||
//! let compressed = deflate_bytes(data);
|
||||
//! # let _ = compressed;
|
||||
//! ```
|
||||
//!
|
||||
//! ## Using a writer:
|
||||
//! ``` rust
|
||||
//! use std::io::Write;
|
||||
//!
|
||||
//! use deflate::Compression;
|
||||
//! use deflate::write::ZlibEncoder;
|
||||
//!
|
||||
//! let data = b"This is some test data";
|
||||
//! let mut encoder = ZlibEncoder::new(Vec::new(), Compression::Default);
|
||||
//! encoder.write_all(data).expect("Write error!");
|
||||
//! let compressed_data = encoder.finish().expect("Failed to finish compression!");
|
||||
//! # let _ = compressed_data;
|
||||
//! ```
|
||||
|
||||
#![cfg_attr(all(feature = "benchmarks", test), feature(test))]
|
||||
|
||||
#[cfg(all(test, feature = "benchmarks"))]
|
||||
extern crate test as test_std;
|
||||
|
||||
#[cfg(test)]
|
||||
extern crate miniz_oxide;
|
||||
|
||||
extern crate adler32;
|
||||
extern crate byteorder;
|
||||
#[cfg(feature = "gzip")]
|
||||
extern crate gzip_header;
|
||||
|
||||
mod bit_reverse;
|
||||
mod bitstream;
|
||||
mod chained_hash_table;
|
||||
mod checksum;
|
||||
mod compress;
|
||||
mod compression_options;
|
||||
mod deflate_state;
|
||||
mod encoder_state;
|
||||
mod huffman_lengths;
|
||||
mod huffman_table;
|
||||
mod input_buffer;
|
||||
mod length_encode;
|
||||
mod lz77;
|
||||
mod lzvalue;
|
||||
mod matching;
|
||||
mod output_writer;
|
||||
mod rle;
|
||||
mod stored_block;
|
||||
#[cfg(test)]
|
||||
mod test_utils;
|
||||
mod writer;
|
||||
mod zlib;
|
||||
|
||||
use std::io;
|
||||
use std::io::Write;
|
||||
|
||||
use byteorder::BigEndian;
|
||||
#[cfg(feature = "gzip")]
|
||||
use byteorder::LittleEndian;
|
||||
#[cfg(feature = "gzip")]
|
||||
use gzip_header::Crc;
|
||||
#[cfg(feature = "gzip")]
|
||||
use gzip_header::GzBuilder;
|
||||
|
||||
use crate::checksum::RollingChecksum;
|
||||
use crate::deflate_state::DeflateState;
|
||||
|
||||
use crate::compress::Flush;
|
||||
pub use compression_options::{Compression, CompressionOptions, SpecialOptions};
|
||||
pub use lz77::MatchingType;
|
||||
|
||||
use crate::writer::compress_until_done;
|
||||
|
||||
/// Encoders implementing a `Write` interface.
|
||||
pub mod write {
|
||||
#[cfg(feature = "gzip")]
|
||||
pub use crate::writer::gzip::GzEncoder;
|
||||
pub use crate::writer::{DeflateEncoder, ZlibEncoder};
|
||||
}
|
||||
|
||||
fn compress_data_dynamic<RC: RollingChecksum, W: Write>(
|
||||
input: &[u8],
|
||||
writer: &mut W,
|
||||
mut checksum: RC,
|
||||
compression_options: CompressionOptions,
|
||||
) -> io::Result<()> {
|
||||
checksum.update_from_slice(input);
|
||||
// We use a box here to avoid putting the buffers on the stack
|
||||
// It's done here rather than in the structs themselves for now to
|
||||
// keep the data close in memory.
|
||||
let mut deflate_state = Box::new(DeflateState::new(compression_options, writer));
|
||||
compress_until_done(input, &mut deflate_state, Flush::Finish)
|
||||
}
|
||||
|
||||
/// Compress the given slice of bytes with DEFLATE compression.
|
||||
///
|
||||
/// Returns a `Vec<u8>` of the compressed data.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use deflate::{deflate_bytes_conf, Compression};
|
||||
///
|
||||
/// let data = b"This is some test data";
|
||||
/// let compressed_data = deflate_bytes_conf(data, Compression::Best);
|
||||
/// # let _ = compressed_data;
|
||||
/// ```
|
||||
pub fn deflate_bytes_conf<O: Into<CompressionOptions>>(input: &[u8], options: O) -> Vec<u8> {
|
||||
let mut writer = Vec::with_capacity(input.len() / 3);
|
||||
compress_data_dynamic(
|
||||
input,
|
||||
&mut writer,
|
||||
checksum::NoChecksum::new(),
|
||||
options.into(),
|
||||
)
|
||||
.expect("Write error!");
|
||||
writer
|
||||
}
|
||||
|
||||
/// Compress the given slice of bytes with DEFLATE compression using the default compression
|
||||
/// level.
|
||||
///
|
||||
/// Returns a `Vec<u8>` of the compressed data.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use deflate::deflate_bytes;
|
||||
///
|
||||
/// let data = b"This is some test data";
|
||||
/// let compressed_data = deflate_bytes(data);
|
||||
/// # let _ = compressed_data;
|
||||
/// ```
|
||||
pub fn deflate_bytes(input: &[u8]) -> Vec<u8> {
|
||||
deflate_bytes_conf(input, Compression::Default)
|
||||
}
|
||||
|
||||
/// Compress the given slice of bytes with DEFLATE compression, including a zlib header and trailer.
|
||||
///
|
||||
/// Returns a `Vec<u8>` of the compressed data.
|
||||
///
|
||||
/// Zlib dictionaries are not yet suppored.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use deflate::{deflate_bytes_zlib_conf, Compression};
|
||||
///
|
||||
/// let data = b"This is some test data";
|
||||
/// let compressed_data = deflate_bytes_zlib_conf(data, Compression::Best);
|
||||
/// # let _ = compressed_data;
|
||||
/// ```
|
||||
pub fn deflate_bytes_zlib_conf<O: Into<CompressionOptions>>(input: &[u8], options: O) -> Vec<u8> {
|
||||
use byteorder::WriteBytesExt;
|
||||
let mut writer = Vec::with_capacity(input.len() / 3);
|
||||
// Write header
|
||||
zlib::write_zlib_header(&mut writer, zlib::CompressionLevel::Default)
|
||||
.expect("Write error when writing zlib header!");
|
||||
|
||||
let mut checksum = checksum::Adler32Checksum::new();
|
||||
compress_data_dynamic(input, &mut writer, &mut checksum, options.into())
|
||||
.expect("Write error when writing compressed data!");
|
||||
|
||||
let hash = checksum.current_hash();
|
||||
|
||||
writer
|
||||
.write_u32::<BigEndian>(hash)
|
||||
.expect("Write error when writing checksum!");
|
||||
writer
|
||||
}
|
||||
|
||||
/// Compress the given slice of bytes with DEFLATE compression, including a zlib header and trailer,
|
||||
/// using the default compression level.
|
||||
///
|
||||
/// Returns a Vec<u8> of the compressed data.
|
||||
///
|
||||
/// Zlib dictionaries are not yet suppored.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use deflate::deflate_bytes_zlib;
|
||||
///
|
||||
/// let data = b"This is some test data";
|
||||
/// let compressed_data = deflate_bytes_zlib(data);
|
||||
/// # let _ = compressed_data;
|
||||
/// ```
|
||||
pub fn deflate_bytes_zlib(input: &[u8]) -> Vec<u8> {
|
||||
deflate_bytes_zlib_conf(input, Compression::Default)
|
||||
}
|
||||
|
||||
/// Compress the given slice of bytes with DEFLATE compression, including a gzip header and trailer
|
||||
/// using the given gzip header and compression options.
|
||||
///
|
||||
/// Returns a `Vec<u8>` of the compressed data.
|
||||
///
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// extern crate gzip_header;
|
||||
/// extern crate deflate;
|
||||
///
|
||||
/// # fn main() {
|
||||
/// use deflate::{deflate_bytes_gzip_conf, Compression};
|
||||
/// use gzip_header::GzBuilder;
|
||||
///
|
||||
/// let data = b"This is some test data";
|
||||
/// let compressed_data = deflate_bytes_gzip_conf(data, Compression::Best, GzBuilder::new());
|
||||
/// # let _ = compressed_data;
|
||||
/// # }
|
||||
/// ```
|
||||
#[cfg(feature = "gzip")]
|
||||
pub fn deflate_bytes_gzip_conf<O: Into<CompressionOptions>>(
|
||||
input: &[u8],
|
||||
options: O,
|
||||
gzip_header: GzBuilder,
|
||||
) -> Vec<u8> {
|
||||
use byteorder::WriteBytesExt;
|
||||
let mut writer = Vec::with_capacity(input.len() / 3);
|
||||
|
||||
// Write header
|
||||
writer
|
||||
.write_all(&gzip_header.into_header())
|
||||
.expect("Write error when writing header!");
|
||||
let mut checksum = checksum::NoChecksum::new();
|
||||
compress_data_dynamic(input, &mut writer, &mut checksum, options.into())
|
||||
.expect("Write error when writing compressed data!");
|
||||
|
||||
let mut crc = Crc::new();
|
||||
crc.update(input);
|
||||
|
||||
writer
|
||||
.write_u32::<LittleEndian>(crc.sum())
|
||||
.expect("Write error when writing checksum!");
|
||||
writer
|
||||
.write_u32::<LittleEndian>(crc.amt_as_u32())
|
||||
.expect("Write error when writing amt!");
|
||||
writer
|
||||
}
|
||||
|
||||
/// Compress the given slice of bytes with DEFLATE compression, including a gzip header and trailer,
|
||||
/// using the default compression level, and a gzip header with default values.
|
||||
///
|
||||
/// Returns a `Vec<u8>` of the compressed data.
|
||||
///
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use deflate::deflate_bytes_gzip;
|
||||
/// let data = b"This is some test data";
|
||||
/// let compressed_data = deflate_bytes_gzip(data);
|
||||
/// # let _ = compressed_data;
|
||||
/// ```
|
||||
#[cfg(feature = "gzip")]
|
||||
pub fn deflate_bytes_gzip(input: &[u8]) -> Vec<u8> {
|
||||
deflate_bytes_gzip_conf(input, Compression::Default, GzBuilder::new())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use std::io::Write;
|
||||
|
||||
#[cfg(feature = "gzip")]
|
||||
use test_utils::decompress_gzip;
|
||||
use test_utils::{decompress_to_end, decompress_zlib, get_test_data};
|
||||
|
||||
type CO = CompressionOptions;
|
||||
|
||||
/// Write data to the writer in chunks of chunk_size.
|
||||
fn chunked_write<W: Write>(mut writer: W, data: &[u8], chunk_size: usize) {
|
||||
for chunk in data.chunks(chunk_size) {
|
||||
writer.write_all(&chunk).unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn dynamic_string_mem() {
|
||||
let test_data = String::from(" GNU GENERAL PUBLIC LICENSE").into_bytes();
|
||||
let compressed = deflate_bytes(&test_data);
|
||||
|
||||
assert!(compressed.len() < test_data.len());
|
||||
|
||||
let result = decompress_to_end(&compressed);
|
||||
assert_eq!(test_data, result);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn dynamic_string_file() {
|
||||
let input = get_test_data();
|
||||
let compressed = deflate_bytes(&input);
|
||||
|
||||
let result = decompress_to_end(&compressed);
|
||||
for (n, (&a, &b)) in input.iter().zip(result.iter()).enumerate() {
|
||||
if a != b {
|
||||
println!("First difference at {}, input: {}, output: {}", n, a, b);
|
||||
println!(
|
||||
"input: {:?}, output: {:?}",
|
||||
&input[n - 3..n + 3],
|
||||
&result[n - 3..n + 3]
|
||||
);
|
||||
break;
|
||||
}
|
||||
}
|
||||
// Not using assert_eq here deliberately to avoid massive amounts of output spam
|
||||
assert!(input == result);
|
||||
// Check that we actually managed to compress the input
|
||||
assert!(compressed.len() < input.len());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn file_rle() {
|
||||
let input = get_test_data();
|
||||
let compressed = deflate_bytes_conf(&input, CO::rle());
|
||||
|
||||
let result = decompress_to_end(&compressed);
|
||||
assert!(input == result);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn file_zlib() {
|
||||
let test_data = get_test_data();
|
||||
|
||||
let compressed = deflate_bytes_zlib(&test_data);
|
||||
// {
|
||||
// use std::fs::File;
|
||||
// use std::io::Write;
|
||||
// let mut f = File::create("out.zlib").unwrap();
|
||||
// f.write_all(&compressed).unwrap();
|
||||
// }
|
||||
|
||||
println!("file_zlib compressed(default) length: {}", compressed.len());
|
||||
|
||||
let result = decompress_zlib(&compressed);
|
||||
|
||||
assert!(&test_data == &result);
|
||||
assert!(compressed.len() < test_data.len());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn zlib_short() {
|
||||
let test_data = [10, 10, 10, 10, 10, 55];
|
||||
roundtrip_zlib(&test_data, CO::default());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn zlib_last_block() {
|
||||
let mut test_data = vec![22; 32768];
|
||||
test_data.extend(&[5, 2, 55, 11, 12]);
|
||||
roundtrip_zlib(&test_data, CO::default());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn deflate_short() {
|
||||
let test_data = [10, 10, 10, 10, 10, 55];
|
||||
let compressed = deflate_bytes(&test_data);
|
||||
|
||||
let result = decompress_to_end(&compressed);
|
||||
assert_eq!(&test_data, result.as_slice());
|
||||
// If block type and compression is selected correctly, this should only take 5 bytes.
|
||||
assert_eq!(compressed.len(), 5);
|
||||
}
|
||||
|
||||
#[cfg(feature = "gzip")]
|
||||
#[test]
|
||||
fn gzip() {
|
||||
let data = get_test_data();
|
||||
let comment = b"Test";
|
||||
let compressed = deflate_bytes_gzip_conf(
|
||||
&data,
|
||||
Compression::Default,
|
||||
GzBuilder::new().comment(&comment[..]),
|
||||
);
|
||||
let (dec, decompressed) = decompress_gzip(&compressed);
|
||||
assert_eq!(dec.comment().unwrap(), comment);
|
||||
assert!(data == decompressed);
|
||||
}
|
||||
|
||||
fn chunk_test(chunk_size: usize, level: CompressionOptions) {
|
||||
let mut compressed = Vec::with_capacity(32000);
|
||||
let data = get_test_data();
|
||||
{
|
||||
let mut compressor = write::ZlibEncoder::new(&mut compressed, level);
|
||||
chunked_write(&mut compressor, &data, chunk_size);
|
||||
compressor.finish().unwrap();
|
||||
}
|
||||
let compressed2 = deflate_bytes_zlib_conf(&data, level);
|
||||
let res = decompress_zlib(&compressed);
|
||||
assert!(res == data);
|
||||
assert_eq!(compressed.len(), compressed2.len());
|
||||
assert!(compressed == compressed2);
|
||||
}
|
||||
|
||||
fn writer_chunks_level(level: CompressionOptions) {
|
||||
use input_buffer::BUFFER_SIZE;
|
||||
let ct = |n| chunk_test(n, level);
|
||||
ct(1);
|
||||
ct(50);
|
||||
ct(400);
|
||||
ct(32768);
|
||||
ct(BUFFER_SIZE);
|
||||
ct(50000);
|
||||
ct((32768 * 2) + 258);
|
||||
}
|
||||
|
||||
#[ignore]
|
||||
#[test]
|
||||
/// Test the writer by inputing data in one chunk at the time.
|
||||
fn zlib_writer_chunks() {
|
||||
writer_chunks_level(CompressionOptions::default());
|
||||
writer_chunks_level(CompressionOptions::fast());
|
||||
writer_chunks_level(CompressionOptions::rle());
|
||||
}
|
||||
|
||||
/// Check that the frequency values don't overflow.
|
||||
#[test]
|
||||
fn frequency_overflow() {
|
||||
let _ = deflate_bytes_conf(
|
||||
&vec![5; 100000],
|
||||
compression_options::CompressionOptions::default(),
|
||||
);
|
||||
}
|
||||
|
||||
fn roundtrip_zlib(data: &[u8], level: CompressionOptions) {
|
||||
let compressed = deflate_bytes_zlib_conf(data, level);
|
||||
let res = decompress_zlib(&compressed);
|
||||
if data.len() <= 32 {
|
||||
assert_eq!(res, data, "Failed with level: {:?}", level);
|
||||
} else {
|
||||
assert!(res == data, "Failed with level: {:?}", level);
|
||||
}
|
||||
}
|
||||
|
||||
fn check_zero(level: CompressionOptions) {
|
||||
roundtrip_zlib(&[], level);
|
||||
}
|
||||
|
||||
/// Compress with an empty slice.
|
||||
#[test]
|
||||
fn empty_input() {
|
||||
check_zero(CompressionOptions::default());
|
||||
check_zero(CompressionOptions::fast());
|
||||
check_zero(CompressionOptions::rle());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn one_and_two_values() {
|
||||
let one = &[1][..];
|
||||
roundtrip_zlib(one, CO::rle());
|
||||
roundtrip_zlib(one, CO::fast());
|
||||
roundtrip_zlib(one, CO::default());
|
||||
let two = &[5, 6, 7, 8][..];
|
||||
roundtrip_zlib(two, CO::rle());
|
||||
roundtrip_zlib(two, CO::fast());
|
||||
roundtrip_zlib(two, CO::default());
|
||||
}
|
||||
}
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,120 @@
|
|||
#[cfg(test)]
|
||||
use crate::huffman_table::MAX_MATCH;
|
||||
use crate::huffman_table::{MAX_DISTANCE, MIN_MATCH};
|
||||
|
||||
#[derive(Copy, Clone, Eq, PartialEq, Debug)]
|
||||
pub struct StoredLength {
|
||||
length: u8,
|
||||
}
|
||||
|
||||
impl StoredLength {
|
||||
#[cfg(test)]
|
||||
pub fn from_actual_length(length: u16) -> StoredLength {
|
||||
assert!(length <= MAX_MATCH && length >= MIN_MATCH);
|
||||
StoredLength {
|
||||
length: (length - MIN_MATCH) as u8,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn new(stored_length: u8) -> StoredLength {
|
||||
StoredLength {
|
||||
length: stored_length,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn stored_length(&self) -> u8 {
|
||||
self.length
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub fn actual_length(&self) -> u16 {
|
||||
u16::from(self.length) + MIN_MATCH
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone, Eq, PartialEq, Debug)]
|
||||
pub enum LZType {
|
||||
Literal(u8),
|
||||
StoredLengthDistance(StoredLength, u16),
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone, Eq, PartialEq, Debug)]
|
||||
pub struct LZValue {
|
||||
litlen: u8,
|
||||
distance: u16,
|
||||
}
|
||||
|
||||
impl LZValue {
|
||||
#[inline]
|
||||
pub fn literal(value: u8) -> LZValue {
|
||||
LZValue {
|
||||
litlen: value,
|
||||
distance: 0,
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn length_distance(length: u16, distance: u16) -> LZValue {
|
||||
debug_assert!(distance > 0 && distance <= MAX_DISTANCE);
|
||||
let stored_length = (length - MIN_MATCH) as u8;
|
||||
LZValue {
|
||||
litlen: stored_length,
|
||||
distance,
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn value(&self) -> LZType {
|
||||
if self.distance != 0 {
|
||||
LZType::StoredLengthDistance(StoredLength::new(self.litlen), self.distance)
|
||||
} else {
|
||||
LZType::Literal(self.litlen)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub fn lit(l: u8) -> LZValue {
|
||||
LZValue::literal(l)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub fn ld(l: u16, d: u16) -> LZValue {
|
||||
LZValue::length_distance(l, d)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use crate::huffman_table::{MAX_DISTANCE, MAX_MATCH, MIN_DISTANCE, MIN_MATCH};
|
||||
#[test]
|
||||
fn lzvalue() {
|
||||
for i in 0..255 as usize + 1 {
|
||||
let v = LZValue::literal(i as u8);
|
||||
if let LZType::Literal(n) = v.value() {
|
||||
assert_eq!(n as usize, i);
|
||||
} else {
|
||||
panic!();
|
||||
}
|
||||
}
|
||||
|
||||
for i in MIN_MATCH..MAX_MATCH + 1 {
|
||||
let v = LZValue::length_distance(i, 5);
|
||||
if let LZType::StoredLengthDistance(l, _) = v.value() {
|
||||
assert_eq!(l.actual_length(), i);
|
||||
} else {
|
||||
panic!();
|
||||
}
|
||||
}
|
||||
|
||||
for i in MIN_DISTANCE..MAX_DISTANCE + 1 {
|
||||
let v = LZValue::length_distance(5, i);
|
||||
|
||||
if let LZType::StoredLengthDistance(_, d) = v.value() {
|
||||
assert_eq!(d, i);
|
||||
} else {
|
||||
panic!("Failed to get distance {}", i);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,414 @@
|
|||
use std::cmp;
|
||||
|
||||
use crate::chained_hash_table::{ChainedHashTable, WINDOW_SIZE};
|
||||
|
||||
const MAX_MATCH: usize = crate::huffman_table::MAX_MATCH as usize;
|
||||
#[cfg(test)]
|
||||
const MIN_MATCH: usize = crate::huffman_table::MIN_MATCH as usize;
|
||||
|
||||
/// Get the length of the checked match
|
||||
/// The function returns number of bytes at and including `current_pos` that are the same as the
|
||||
/// ones at `pos_to_check`
|
||||
#[inline]
|
||||
pub fn get_match_length(data: &[u8], current_pos: usize, pos_to_check: usize) -> usize {
|
||||
// Unsafe version using unaligned loads for comparison.
|
||||
// Faster when benching the matching function alone,
|
||||
// but not as significant when running the full thing.
|
||||
/*
|
||||
type Comp = u64;
|
||||
|
||||
use std::mem::size_of;
|
||||
|
||||
let max = cmp::min(data.len() - current_pos, MAX_MATCH);
|
||||
let mut left = max;
|
||||
let s = size_of::<Comp>();
|
||||
|
||||
unsafe {
|
||||
let mut cur = data.as_ptr().offset(current_pos as isize);
|
||||
let mut tc = data.as_ptr().offset(pos_to_check as isize);
|
||||
while left >= s &&
|
||||
(*(cur as *const Comp) == *(tc as *const Comp)) {
|
||||
left -= s;
|
||||
cur = cur.offset(s as isize);
|
||||
tc = tc.offset(s as isize);
|
||||
}
|
||||
while left > 0 && *cur == *tc {
|
||||
left -= 1;
|
||||
cur = cur.offset(1);
|
||||
tc = tc.offset(1);
|
||||
}
|
||||
}
|
||||
|
||||
max - left
|
||||
*/
|
||||
|
||||
// Slightly faster than naive in single bench.
|
||||
// Does not use unaligned loads.
|
||||
// let l = cmp::min(MAX_MATCH, data.len() - current_pos);
|
||||
|
||||
// let a = unsafe{&data.get_unchecked(current_pos..current_pos + l)};
|
||||
// let b = unsafe{&data.get_unchecked(pos_to_check..)};
|
||||
|
||||
// let mut len = 0;
|
||||
|
||||
// for (l, r) in a
|
||||
// .iter()
|
||||
// .zip(b.iter()) {
|
||||
// if *l == *r {
|
||||
// len += 1;
|
||||
// continue;
|
||||
// } else {
|
||||
// break;
|
||||
// }
|
||||
// }
|
||||
// len as usize
|
||||
|
||||
// Naive version
|
||||
data[current_pos..]
|
||||
.iter()
|
||||
.zip(data[pos_to_check..].iter())
|
||||
.take(MAX_MATCH)
|
||||
.take_while(|&(&a, &b)| a == b)
|
||||
.count()
|
||||
}
|
||||
|
||||
/// Try finding the position and length of the longest match in the input data.
|
||||
/// # Returns
|
||||
/// (length, distance from position)
|
||||
/// If no match is found that was better than `prev_length` or at all, or we are at the start,
|
||||
/// the length value returned will be 2.
|
||||
///
|
||||
/// # Arguments:
|
||||
/// `data`: The data to search in.
|
||||
/// `hash_table`: Hash table to use for searching.
|
||||
/// `position`: The position in the data to match against.
|
||||
/// `prev_length`: The length of the previous `longest_match` check to compare against.
|
||||
/// `max_hash_checks`: The maximum number of matching hash chain positions to check.
|
||||
pub fn longest_match(
|
||||
data: &[u8],
|
||||
hash_table: &ChainedHashTable,
|
||||
position: usize,
|
||||
prev_length: usize,
|
||||
max_hash_checks: u16,
|
||||
) -> (usize, usize) {
|
||||
// debug_assert_eq!(position, hash_table.current_head() as usize);
|
||||
|
||||
// If we already have a match at the maximum length,
|
||||
// or we can't grow further, we stop here.
|
||||
if prev_length >= MAX_MATCH || position + prev_length >= data.len() {
|
||||
return (0, 0);
|
||||
}
|
||||
|
||||
let limit = if position > WINDOW_SIZE {
|
||||
position - WINDOW_SIZE
|
||||
} else {
|
||||
0
|
||||
};
|
||||
|
||||
// Make sure the length is at least one to simplify the matching code, as
|
||||
// otherwise the matching code might underflow.
|
||||
let prev_length = cmp::max(prev_length, 1);
|
||||
|
||||
let max_length = cmp::min(data.len() - position, MAX_MATCH);
|
||||
|
||||
// The position in the hash chain we are currently checking.
|
||||
let mut current_head = position;
|
||||
|
||||
// The best match length we've found so far, and it's distance.
|
||||
let mut best_length = prev_length;
|
||||
let mut best_distance = 0;
|
||||
|
||||
// The position of the previous value in the hash chain.
|
||||
let mut prev_head;
|
||||
|
||||
for _ in 0..max_hash_checks {
|
||||
prev_head = current_head;
|
||||
current_head = hash_table.get_prev(current_head) as usize;
|
||||
if current_head >= prev_head || current_head < limit {
|
||||
// If the current hash chain value refers to itself, or is referring to
|
||||
// a value that's higher (we only move backwars through the chain),
|
||||
// we are at the end and can stop.
|
||||
break;
|
||||
}
|
||||
|
||||
// We only check further if the match length can actually increase
|
||||
// Checking if the end byte and the potential next byte matches is generally
|
||||
// more likely to give a quick answer rather than checking from the start first, given
|
||||
// that the hashes match.
|
||||
// If there is no previous match, best_length will be 1 and the two first bytes will
|
||||
// be checked instead.
|
||||
// Since we've made sure best_length is always at least 1, this shouldn't underflow.
|
||||
if data[position + best_length - 1..=position + best_length]
|
||||
== data[current_head + best_length - 1..=current_head + best_length]
|
||||
{
|
||||
// Actually check how many bytes match.
|
||||
// At the moment this will check the two bytes we just checked again,
|
||||
// though adding code for skipping these bytes may not result in any speed
|
||||
// gain due to the added complexity.
|
||||
let length = get_match_length(data, position, current_head);
|
||||
if length > best_length {
|
||||
best_length = length;
|
||||
best_distance = position - current_head;
|
||||
if length == max_length {
|
||||
// We are at the max length, so there is no point
|
||||
// searching any longer
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if best_length > prev_length {
|
||||
(best_length, best_distance)
|
||||
} else {
|
||||
(0, 0)
|
||||
}
|
||||
}
|
||||
|
||||
/// Try finding the position and length of the longest match in the input data using fast zlib
|
||||
/// hash skipping algorithm.
|
||||
/// # Returns
|
||||
/// (length, distance from position)
|
||||
/// If no match is found that was better than `prev_length` or at all, or we are at the start,
|
||||
/// the length value returned will be 2.
|
||||
///
|
||||
/// # Arguments:
|
||||
/// `data`: The data to search in.
|
||||
/// `hash_table`: Hash table to use for searching.
|
||||
/// `position`: The position in the data to match against.
|
||||
/// `prev_length`: The length of the previous `longest_match` check to compare against.
|
||||
/// `max_hash_checks`: The maximum number of matching hash chain positions to check.
|
||||
#[cfg(test)]
|
||||
pub fn longest_match_fast(
|
||||
data: &[u8],
|
||||
hash_table: &ChainedHashTable,
|
||||
position: usize,
|
||||
prev_length: usize,
|
||||
max_hash_checks: u16,
|
||||
) -> (usize, usize) {
|
||||
// debug_assert_eq!(position, hash_table.current_head() as usize);
|
||||
|
||||
// If we already have a match at the maximum length,
|
||||
// or we can't grow further, we stop here.
|
||||
if prev_length >= MAX_MATCH || position + prev_length >= data.len() {
|
||||
return (0, 0);
|
||||
}
|
||||
|
||||
let limit = if position > WINDOW_SIZE {
|
||||
position - WINDOW_SIZE
|
||||
} else {
|
||||
0
|
||||
};
|
||||
|
||||
// Make sure the length is at least one to simplify the matching code, as
|
||||
// otherwise the matching code might underflow.
|
||||
let prev_length = cmp::max(prev_length, 1);
|
||||
|
||||
let max_length = cmp::min(data.len() - position, MAX_MATCH);
|
||||
|
||||
// The position in the hash chain we are currently checking.
|
||||
let mut current_head = position;
|
||||
|
||||
// The best match length we've found so far, and it's distance.
|
||||
let mut best_length = prev_length;
|
||||
let mut best_distance = 0;
|
||||
// The offset from the start of the match of the hash chain we are traversing.
|
||||
let mut offset = 0;
|
||||
|
||||
// The position of the previous value in the hash chain.
|
||||
let mut prev_head;
|
||||
|
||||
for _ in 0..max_hash_checks {
|
||||
prev_head = current_head;
|
||||
current_head = hash_table.get_prev(current_head) as usize;
|
||||
if current_head >= prev_head || current_head < limit + offset {
|
||||
// If the current hash chain value refers to itself, or is referring to
|
||||
// a value that's higher (we only move backwars through the chain),
|
||||
// we are at the end and can stop.
|
||||
break;
|
||||
}
|
||||
|
||||
let offset_head = current_head - offset;
|
||||
|
||||
// We only check further if the match length can actually increase
|
||||
// Checking if the end byte and the potential next byte matches is generally
|
||||
// more likely to give a quick answer rather than checking from the start first, given
|
||||
// that the hashes match.
|
||||
// If there is no previous match, best_length will be 1 and the two first bytes will
|
||||
// be checked instead.
|
||||
// Since we've made sure best_length is always at least 1, this shouldn't underflow.
|
||||
if data[position + best_length - 1..position + best_length + 1]
|
||||
== data[offset_head + best_length - 1..offset_head + best_length + 1]
|
||||
{
|
||||
// Actually check how many bytes match.
|
||||
// At the moment this will check the two bytes we just checked again,
|
||||
// though adding code for skipping these bytes may not result in any speed
|
||||
// gain due to the added complexity.
|
||||
let length = get_match_length(data, position, offset_head);
|
||||
if length > best_length {
|
||||
best_length = length;
|
||||
best_distance = position - offset_head;
|
||||
if length == max_length {
|
||||
// We are at the max length, so there is no point
|
||||
// searching any longer
|
||||
break;
|
||||
}
|
||||
|
||||
// Find the position in the match where the next has position is the furthest away.
|
||||
// By moving to a different hash chain we can potentially skip a lot of checks,
|
||||
// saving time.
|
||||
// We avoid doing this for matches that extend past the starting position, as
|
||||
// those will contain positions that are not in the hash table yet.
|
||||
if best_distance > best_length {
|
||||
offset = hash_table.farthest_next(offset_head, length);
|
||||
current_head = offset_head + offset;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if best_length > prev_length {
|
||||
(best_length, best_distance)
|
||||
} else {
|
||||
(0, 0)
|
||||
}
|
||||
}
|
||||
|
||||
// Get the longest match from the current position of the hash table.
|
||||
#[inline]
|
||||
#[cfg(test)]
|
||||
pub fn longest_match_current(data: &[u8], hash_table: &ChainedHashTable) -> (usize, usize) {
|
||||
use crate::compression_options::MAX_HASH_CHECKS;
|
||||
longest_match(
|
||||
data,
|
||||
hash_table,
|
||||
hash_table.current_head() as usize,
|
||||
MIN_MATCH as usize - 1,
|
||||
MAX_HASH_CHECKS,
|
||||
)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::{get_match_length, longest_match, longest_match_fast};
|
||||
use crate::chained_hash_table::{filled_hash_table, ChainedHashTable, HASH_BYTES};
|
||||
|
||||
/// Test that match lengths are calculated correctly
|
||||
#[test]
|
||||
fn match_length() {
|
||||
let test_arr = [5u8, 5, 5, 5, 5, 9, 9, 2, 3, 5, 5, 5, 5, 5];
|
||||
let l = get_match_length(&test_arr, 9, 0);
|
||||
assert_eq!(l, 5);
|
||||
let l2 = get_match_length(&test_arr, 9, 7);
|
||||
assert_eq!(l2, 0);
|
||||
let l3 = get_match_length(&test_arr, 10, 0);
|
||||
assert_eq!(l3, 4);
|
||||
}
|
||||
|
||||
/// Test that we get the longest of the matches
|
||||
#[test]
|
||||
fn get_longest_match() {
|
||||
let test_data = b"xTest data, Test_data,zTest data";
|
||||
let hash_table = filled_hash_table(&test_data[..23 + 1 + HASH_BYTES - 1]);
|
||||
|
||||
let (length, distance) = super::longest_match_current(test_data, &hash_table);
|
||||
|
||||
// We check that we get the longest match, rather than the shorter, but closer one.
|
||||
assert_eq!(distance, 22);
|
||||
assert_eq!(length, 9);
|
||||
let test_arr2 = [
|
||||
10u8, 10, 10, 10, 10, 10, 10, 10, 2, 3, 5, 10, 10, 10, 10, 10,
|
||||
];
|
||||
let hash_table = filled_hash_table(&test_arr2[..HASH_BYTES + 1 + 1 + 2]);
|
||||
let (length, distance) = super::longest_match_current(&test_arr2, &hash_table);
|
||||
|
||||
assert_eq!(distance, 1);
|
||||
assert_eq!(length, 4);
|
||||
}
|
||||
|
||||
/// Make sure we can get a match at index zero
|
||||
#[test]
|
||||
fn match_index_zero() {
|
||||
let test_data = b"AAAAAAA";
|
||||
|
||||
let mut hash_table = ChainedHashTable::from_starting_values(test_data[0], test_data[1]);
|
||||
for (n, &b) in test_data[2..5].iter().enumerate() {
|
||||
hash_table.add_hash_value(n, b);
|
||||
}
|
||||
|
||||
let (match_length, match_dist) = longest_match(test_data, &hash_table, 1, 0, 4096);
|
||||
|
||||
assert_eq!(match_dist, 1);
|
||||
assert!(match_length == 6);
|
||||
}
|
||||
|
||||
/// Test for fast_zlib algorithm.
|
||||
/// Check that it doesn't give worse matches than the default one.
|
||||
/// ignored by default as it's slow, and best ran in release mode.
|
||||
#[ignore]
|
||||
#[test]
|
||||
fn fast_match_at_least_equal() {
|
||||
use crate::test_utils::get_test_data;
|
||||
for start_pos in 10000..50000 {
|
||||
const NUM_CHECKS: u16 = 400;
|
||||
let data = get_test_data();
|
||||
let hash_table = filled_hash_table(&data[..start_pos + 1]);
|
||||
let pos = hash_table.current_head() as usize;
|
||||
|
||||
let naive_match = longest_match(&data[..], &hash_table, pos, 0, NUM_CHECKS);
|
||||
let fast_match = longest_match_fast(&data[..], &hash_table, pos, 0, NUM_CHECKS);
|
||||
|
||||
if fast_match.0 > naive_match.0 {
|
||||
println!("Fast match found better match!");
|
||||
}
|
||||
|
||||
assert!(
|
||||
fast_match.0 >= naive_match.0,
|
||||
"naive match had better length! start_pos: {}, naive: {:?}, fast {:?}",
|
||||
start_pos,
|
||||
naive_match,
|
||||
fast_match
|
||||
);
|
||||
assert!(
|
||||
fast_match.1 >= naive_match.1,
|
||||
"naive match had better dist! start_pos: {} naive {:?}, fast {:?}",
|
||||
start_pos,
|
||||
naive_match,
|
||||
fast_match
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(all(test, feature = "benchmarks"))]
|
||||
mod bench {
|
||||
use super::{longest_match, longest_match_fast};
|
||||
use chained_hash_table::filled_hash_table;
|
||||
use test_std::Bencher;
|
||||
use test_utils::get_test_data;
|
||||
#[bench]
|
||||
fn matching(b: &mut Bencher) {
|
||||
const POS: usize = 29000;
|
||||
let data = get_test_data();
|
||||
let hash_table = filled_hash_table(&data[..POS + 1]);
|
||||
let pos = hash_table.current_head() as usize;
|
||||
println!(
|
||||
"M: {:?}",
|
||||
longest_match(&data[..], &hash_table, pos, 0, 4096)
|
||||
);
|
||||
b.iter(|| longest_match(&data[..], &hash_table, pos, 0, 4096));
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn fast_matching(b: &mut Bencher) {
|
||||
const POS: usize = 29000;
|
||||
let data = get_test_data();
|
||||
let hash_table = filled_hash_table(&data[..POS + 1]);
|
||||
let pos = hash_table.current_head() as usize;
|
||||
println!(
|
||||
"M: {:?}",
|
||||
longest_match_fast(&data[..], &hash_table, pos, 0, 4096)
|
||||
);
|
||||
b.iter(|| longest_match_fast(&data[..], &hash_table, pos, 0, 4096));
|
||||
}
|
||||
}
|
|
@ -0,0 +1,147 @@
|
|||
use std::u16;
|
||||
|
||||
use crate::huffman_table::{
|
||||
get_distance_code, get_length_code, END_OF_BLOCK_POSITION, NUM_DISTANCE_CODES,
|
||||
NUM_LITERALS_AND_LENGTHS,
|
||||
};
|
||||
use crate::lzvalue::LZValue;
|
||||
|
||||
/// The type used for representing how many times a literal, length or distance code has been output
|
||||
/// to the current buffer.
|
||||
/// As we are limiting the blocks to be at most 2^16 bytes long, we can represent frequencies using
|
||||
/// 16-bit values.
|
||||
pub type FrequencyType = u16;
|
||||
|
||||
/// The maximum number of literals/lengths in the buffer, which in practice also means the maximum
|
||||
/// number of literals/lengths output before a new block is started.
|
||||
/// This should not be larger than the maximum value `FrequencyType` can represent to prevent
|
||||
/// overflowing (which would degrade, or in the worst case break compression).
|
||||
pub const MAX_BUFFER_LENGTH: usize = 1024 * 31;
|
||||
|
||||
#[derive(Debug, PartialEq)]
|
||||
pub enum BufferStatus {
|
||||
NotFull,
|
||||
Full,
|
||||
}
|
||||
|
||||
/// Struct that buffers lz77 data and keeps track of the usage of different codes
|
||||
pub struct DynamicWriter {
|
||||
buffer: Vec<LZValue>,
|
||||
// The two last length codes are not actually used, but only participates in code construction
|
||||
// Therefore, we ignore them to get the correct number of lengths
|
||||
frequencies: [FrequencyType; NUM_LITERALS_AND_LENGTHS],
|
||||
distance_frequencies: [FrequencyType; NUM_DISTANCE_CODES],
|
||||
}
|
||||
|
||||
impl DynamicWriter {
|
||||
#[inline]
|
||||
pub fn check_buffer_length(&self) -> BufferStatus {
|
||||
if self.buffer.len() >= MAX_BUFFER_LENGTH {
|
||||
BufferStatus::Full
|
||||
} else {
|
||||
BufferStatus::NotFull
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn write_literal(&mut self, literal: u8) -> BufferStatus {
|
||||
debug_assert!(self.buffer.len() < MAX_BUFFER_LENGTH);
|
||||
self.buffer.push(LZValue::literal(literal));
|
||||
self.frequencies[usize::from(literal)] += 1;
|
||||
self.check_buffer_length()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn write_length_distance(&mut self, length: u16, distance: u16) -> BufferStatus {
|
||||
self.buffer.push(LZValue::length_distance(length, distance));
|
||||
let l_code_num = get_length_code(length);
|
||||
// As we limit the buffer to 2^16 values, this should be safe from overflowing.
|
||||
self.frequencies[l_code_num] += 1;
|
||||
|
||||
let d_code_num = get_distance_code(distance);
|
||||
// The compiler seems to be able to evade the bounds check here somehow.
|
||||
self.distance_frequencies[usize::from(d_code_num)] += 1;
|
||||
self.check_buffer_length()
|
||||
}
|
||||
|
||||
pub fn buffer_length(&self) -> usize {
|
||||
self.buffer.len()
|
||||
}
|
||||
|
||||
pub fn get_buffer(&self) -> &[LZValue] {
|
||||
&self.buffer
|
||||
}
|
||||
|
||||
pub fn new() -> DynamicWriter {
|
||||
let mut w = DynamicWriter {
|
||||
buffer: Vec::with_capacity(MAX_BUFFER_LENGTH),
|
||||
frequencies: [0; NUM_LITERALS_AND_LENGTHS],
|
||||
distance_frequencies: [0; NUM_DISTANCE_CODES],
|
||||
};
|
||||
// This will always be 1,
|
||||
// since there will always only be one end of block marker in each block
|
||||
w.frequencies[END_OF_BLOCK_POSITION] = 1;
|
||||
w
|
||||
}
|
||||
|
||||
/// Special output function used with RLE compression
|
||||
/// that avoids bothering to lookup a distance code.
|
||||
#[inline]
|
||||
pub fn write_length_rle(&mut self, length: u16) -> BufferStatus {
|
||||
self.buffer.push(LZValue::length_distance(length, 1));
|
||||
let l_code_num = get_length_code(length);
|
||||
// As we limit the buffer to 2^16 values, this should be safe from overflowing.
|
||||
if cfg!(debug_assertions) {
|
||||
self.frequencies[l_code_num] += 1;
|
||||
} else {
|
||||
// #Safety
|
||||
// None of the values in the table of length code numbers will give a value
|
||||
// that is out of bounds.
|
||||
// There is a test to ensure that these functions won't produce too large values.
|
||||
unsafe {
|
||||
*self.frequencies.get_unchecked_mut(l_code_num) += 1;
|
||||
}
|
||||
}
|
||||
self.distance_frequencies[0] += 1;
|
||||
self.check_buffer_length()
|
||||
}
|
||||
|
||||
pub fn get_frequencies(&self) -> (&[u16], &[u16]) {
|
||||
(&self.frequencies, &self.distance_frequencies)
|
||||
}
|
||||
|
||||
pub fn clear_frequencies(&mut self) {
|
||||
self.frequencies = [0; NUM_LITERALS_AND_LENGTHS];
|
||||
self.distance_frequencies = [0; NUM_DISTANCE_CODES];
|
||||
self.frequencies[END_OF_BLOCK_POSITION] = 1;
|
||||
}
|
||||
|
||||
pub fn clear_data(&mut self) {
|
||||
self.buffer.clear()
|
||||
}
|
||||
|
||||
pub fn clear(&mut self) {
|
||||
self.clear_frequencies();
|
||||
self.clear_data();
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use crate::huffman_table::{get_distance_code, get_length_code};
|
||||
#[test]
|
||||
/// Ensure that these function won't produce values that would overflow the output_writer
|
||||
/// tables since we use some unsafe indexing.
|
||||
fn array_bounds() {
|
||||
let w = DynamicWriter::new();
|
||||
|
||||
for i in 0..u16::max_value() {
|
||||
assert!(get_length_code(i) < w.frequencies.len());
|
||||
}
|
||||
|
||||
for i in 0..u16::max_value() {
|
||||
assert!(get_distance_code(i) < w.distance_frequencies.len() as u8);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,105 @@
|
|||
use crate::lz77::{buffer_full, ProcessStatus};
|
||||
use crate::output_writer::{BufferStatus, DynamicWriter};
|
||||
|
||||
use std::cmp;
|
||||
use std::ops::Range;
|
||||
|
||||
const MIN_MATCH: usize = crate::huffman_table::MIN_MATCH as usize;
|
||||
const MAX_MATCH: usize = crate::huffman_table::MAX_MATCH as usize;
|
||||
|
||||
/// Simple match function for run-length encoding.
|
||||
///
|
||||
/// Checks how many of the next bytes from the start of the slice `data` matches prev.
|
||||
fn get_match_length_rle(data: &[u8], prev: u8) -> usize {
|
||||
data.iter()
|
||||
.take(MAX_MATCH)
|
||||
.take_while(|&&b| b == prev)
|
||||
.count()
|
||||
}
|
||||
|
||||
/// L77-Compress data using the RLE(Run-length encoding) strategy
|
||||
///
|
||||
/// This function simply looks for runs of data of at least length 3.
|
||||
pub fn process_chunk_greedy_rle(
|
||||
data: &[u8],
|
||||
iterated_data: &Range<usize>,
|
||||
writer: &mut DynamicWriter,
|
||||
) -> (usize, ProcessStatus) {
|
||||
if data.is_empty() {
|
||||
return (0, ProcessStatus::Ok);
|
||||
};
|
||||
|
||||
let end = cmp::min(data.len(), iterated_data.end);
|
||||
// Start on at least byte 1.
|
||||
let start = cmp::max(iterated_data.start, 1);
|
||||
// The previous byte.
|
||||
let mut prev = data[start - 1];
|
||||
// Iterate through the requested range, but avoid going off the end.
|
||||
let current_chunk = &data[cmp::min(start, end)..end];
|
||||
let mut insert_it = current_chunk.iter().enumerate();
|
||||
let mut overlap = 0;
|
||||
// Make sure to output the first byte
|
||||
if iterated_data.start == 0 && !data.is_empty() {
|
||||
write_literal!(writer, data[0], 1);
|
||||
}
|
||||
|
||||
while let Some((n, &b)) = insert_it.next() {
|
||||
let position = n + start;
|
||||
let match_len = if prev == b {
|
||||
//TODO: Avoid comparing with self here.
|
||||
// Would use as_slice() but that doesn't work on an enumerated iterator.
|
||||
get_match_length_rle(&data[position..], prev)
|
||||
} else {
|
||||
0
|
||||
};
|
||||
if match_len >= MIN_MATCH {
|
||||
if position + match_len > end {
|
||||
overlap = position + match_len - end;
|
||||
};
|
||||
let b_status = writer.write_length_rle(match_len as u16);
|
||||
if b_status == BufferStatus::Full {
|
||||
return (overlap, buffer_full(position + match_len));
|
||||
}
|
||||
insert_it.nth(match_len - 2);
|
||||
} else {
|
||||
write_literal!(writer, b, position + 1);
|
||||
}
|
||||
prev = b;
|
||||
}
|
||||
|
||||
(overlap, ProcessStatus::Ok)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use crate::lzvalue::{ld, lit, LZValue};
|
||||
|
||||
fn l(c: char) -> LZValue {
|
||||
lit(c as u8)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn rle_compress() {
|
||||
let input = b"textaaaaaaaaatext";
|
||||
let mut w = DynamicWriter::new();
|
||||
let r = 0..input.len();
|
||||
let (overlap, _) = process_chunk_greedy_rle(input, &r, &mut w);
|
||||
let expected = [
|
||||
l('t'),
|
||||
l('e'),
|
||||
l('x'),
|
||||
l('t'),
|
||||
l('a'),
|
||||
ld(8, 1),
|
||||
l('t'),
|
||||
l('e'),
|
||||
l('x'),
|
||||
l('t'),
|
||||
];
|
||||
//println!("expected: {:?}", expected);
|
||||
//println!("actual: {:?}", w.get_buffer());
|
||||
assert!(w.get_buffer() == expected);
|
||||
assert_eq!(overlap, 0);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,97 @@
|
|||
use crate::bitstream::LsbWriter;
|
||||
use byteorder::{LittleEndian, WriteBytesExt};
|
||||
use std::io;
|
||||
use std::io::Write;
|
||||
use std::u16;
|
||||
|
||||
#[cfg(test)]
|
||||
const BLOCK_SIZE: u16 = 32000;
|
||||
|
||||
const STORED_FIRST_BYTE: u8 = 0b0000_0000;
|
||||
pub const STORED_FIRST_BYTE_FINAL: u8 = 0b0000_0001;
|
||||
pub const MAX_STORED_BLOCK_LENGTH: usize = (u16::MAX as usize) / 2;
|
||||
|
||||
pub fn write_stored_header(writer: &mut LsbWriter, final_block: bool) {
|
||||
let header = if final_block {
|
||||
STORED_FIRST_BYTE_FINAL
|
||||
} else {
|
||||
STORED_FIRST_BYTE
|
||||
};
|
||||
// Write the block header
|
||||
writer.write_bits(header.into(), 3);
|
||||
// Flush the writer to make sure we are aligned to the byte boundary.
|
||||
writer.flush_raw();
|
||||
}
|
||||
|
||||
// Compress one stored block (excluding the header)
|
||||
pub fn compress_block_stored<W: Write>(input: &[u8], writer: &mut W) -> io::Result<usize> {
|
||||
if input.len() > u16::max_value() as usize {
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::InvalidInput,
|
||||
"Stored block too long!",
|
||||
));
|
||||
};
|
||||
// The header is written before this function.
|
||||
// The next two bytes indicates the length
|
||||
writer.write_u16::<LittleEndian>(input.len() as u16)?;
|
||||
// the next two after the length is the ones complement of the length
|
||||
writer.write_u16::<LittleEndian>(!input.len() as u16)?;
|
||||
// After this the data is written directly with no compression
|
||||
writer.write(input)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub fn compress_data_stored(input: &[u8]) -> Vec<u8> {
|
||||
let block_length = BLOCK_SIZE as usize;
|
||||
|
||||
let mut output = Vec::with_capacity(input.len() + 2);
|
||||
let mut i = input.chunks(block_length).peekable();
|
||||
while let Some(chunk) = i.next() {
|
||||
let last_chunk = i.peek().is_none();
|
||||
// First bit tells us if this is the final chunk
|
||||
// the next two details compression type (none in this case)
|
||||
let first_byte = if last_chunk {
|
||||
STORED_FIRST_BYTE_FINAL
|
||||
} else {
|
||||
STORED_FIRST_BYTE
|
||||
};
|
||||
output.write(&[first_byte]).unwrap();
|
||||
|
||||
compress_block_stored(chunk, &mut output).unwrap();
|
||||
}
|
||||
output
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use crate::test_utils::decompress_to_end;
|
||||
|
||||
#[test]
|
||||
fn no_compression_one_chunk() {
|
||||
let test_data = vec![1u8, 2, 3, 4, 5, 6, 7, 8];
|
||||
let compressed = compress_data_stored(&test_data);
|
||||
let result = decompress_to_end(&compressed);
|
||||
assert_eq!(test_data, result);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn no_compression_multiple_chunks() {
|
||||
let test_data = vec![32u8; 40000];
|
||||
let compressed = compress_data_stored(&test_data);
|
||||
let result = decompress_to_end(&compressed);
|
||||
assert_eq!(test_data, result);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn no_compression_string() {
|
||||
let test_data = String::from(
|
||||
"This is some text, this is some more text, this is even \
|
||||
more text, lots of text here.",
|
||||
)
|
||||
.into_bytes();
|
||||
let compressed = compress_data_stored(&test_data);
|
||||
let result = decompress_to_end(&compressed);
|
||||
assert_eq!(test_data, result);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,72 @@
|
|||
#![cfg(test)]
|
||||
|
||||
#[cfg(feature = "gzip")]
|
||||
use gzip_header::GzHeader;
|
||||
|
||||
fn get_test_file_data(name: &str) -> Vec<u8> {
|
||||
use std::fs::File;
|
||||
use std::io::Read;
|
||||
let mut input = Vec::new();
|
||||
let mut f = File::open(name).unwrap();
|
||||
|
||||
f.read_to_end(&mut input).unwrap();
|
||||
input
|
||||
}
|
||||
|
||||
pub fn get_test_data() -> Vec<u8> {
|
||||
use std::env;
|
||||
let path = env::var("TEST_FILE").unwrap_or("tests/pg11.txt".to_string());
|
||||
get_test_file_data(&path)
|
||||
}
|
||||
|
||||
/// Helper function to decompress into a `Vec<u8>`
|
||||
pub fn decompress_to_end(input: &[u8]) -> Vec<u8> {
|
||||
use miniz_oxide::inflate::decompress_to_vec;
|
||||
|
||||
decompress_to_vec(input).expect("Decompression failed!")
|
||||
}
|
||||
|
||||
#[cfg(feature = "gzip")]
|
||||
pub fn decompress_gzip(compressed: &[u8]) -> (GzHeader, Vec<u8>) {
|
||||
use gzip_header::{read_gz_header, Crc};
|
||||
use std::io::Cursor;
|
||||
let mut c = Cursor::new(compressed);
|
||||
let h = read_gz_header(&mut c).expect("Failed to decode gzip header!");
|
||||
let pos = c.position();
|
||||
let compressed = &c.into_inner()[pos as usize..];
|
||||
|
||||
let result = miniz_oxide::inflate::decompress_to_vec(compressed).expect("Decompression failed");
|
||||
|
||||
let s = compressed.len();
|
||||
|
||||
let crc = u32::from_le_bytes([
|
||||
compressed[s - 8],
|
||||
compressed[s - 7],
|
||||
compressed[s - 6],
|
||||
compressed[s - 5],
|
||||
]);
|
||||
let len = u32::from_le_bytes([
|
||||
compressed[s - 4],
|
||||
compressed[s - 3],
|
||||
compressed[s - 2],
|
||||
compressed[s - 1],
|
||||
]);
|
||||
|
||||
let mut comp_crc = Crc::new();
|
||||
comp_crc.update(&result);
|
||||
|
||||
assert_eq!(
|
||||
crc,
|
||||
comp_crc.sum(),
|
||||
"Checksum failed File: {}, computed: {}",
|
||||
crc,
|
||||
comp_crc.sum()
|
||||
);
|
||||
assert_eq!(len, result.len() as u32, "Length mismatch");
|
||||
|
||||
(h, result)
|
||||
}
|
||||
|
||||
pub fn decompress_zlib(compressed: &[u8]) -> Vec<u8> {
|
||||
miniz_oxide::inflate::decompress_to_vec_zlib(&compressed).expect("Decompression failed!")
|
||||
}
|
|
@ -0,0 +1,662 @@
|
|||
use std::io::Write;
|
||||
use std::{io, thread};
|
||||
|
||||
use byteorder::{BigEndian, WriteBytesExt};
|
||||
|
||||
use crate::checksum::{Adler32Checksum, RollingChecksum};
|
||||
use crate::compress::compress_data_dynamic_n;
|
||||
use crate::compress::Flush;
|
||||
use crate::compression_options::CompressionOptions;
|
||||
use crate::deflate_state::DeflateState;
|
||||
use crate::zlib::{write_zlib_header, CompressionLevel};
|
||||
|
||||
const ERR_STR: &str = "Error! The wrapped writer is missing.\
|
||||
This is a bug, please file an issue.";
|
||||
|
||||
/// Keep compressing until all the input has been compressed and output or the writer returns `Err`.
|
||||
pub fn compress_until_done<W: Write>(
|
||||
mut input: &[u8],
|
||||
deflate_state: &mut DeflateState<W>,
|
||||
flush_mode: Flush,
|
||||
) -> io::Result<()> {
|
||||
// This should only be used for flushing.
|
||||
assert!(flush_mode != Flush::None);
|
||||
loop {
|
||||
match compress_data_dynamic_n(input, deflate_state, flush_mode) {
|
||||
Ok(0) => {
|
||||
if deflate_state.output_buf().is_empty() {
|
||||
break;
|
||||
} else {
|
||||
// If the output buffer isn't empty, keep going until it is, as there is still
|
||||
// data to be flushed.
|
||||
input = &[];
|
||||
}
|
||||
}
|
||||
Ok(n) => {
|
||||
if n < input.len() {
|
||||
input = &input[n..]
|
||||
} else {
|
||||
input = &[];
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
match e.kind() {
|
||||
// This error means that there may still be data to flush.
|
||||
// This could possibly get stuck if the underlying writer keeps returning this
|
||||
// error.
|
||||
io::ErrorKind::Interrupted => (),
|
||||
_ => return Err(e),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
debug_assert_eq!(
|
||||
deflate_state.bytes_written,
|
||||
deflate_state.bytes_written_control.get()
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// A DEFLATE encoder/compressor.
|
||||
///
|
||||
/// A struct implementing a [`Write`] interface that takes arbitrary data and compresses it to
|
||||
/// the provided writer using DEFLATE compression.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```rust
|
||||
/// # use std::io;
|
||||
/// #
|
||||
/// # fn try_main() -> io::Result<Vec<u8>> {
|
||||
/// #
|
||||
/// use std::io::Write;
|
||||
///
|
||||
/// use deflate::Compression;
|
||||
/// use deflate::write::DeflateEncoder;
|
||||
///
|
||||
/// let data = b"This is some test data";
|
||||
/// let mut encoder = DeflateEncoder::new(Vec::new(), Compression::Default);
|
||||
/// encoder.write_all(data)?;
|
||||
/// let compressed_data = encoder.finish()?;
|
||||
/// # Ok(compressed_data)
|
||||
/// #
|
||||
/// # }
|
||||
/// # fn main() {
|
||||
/// # try_main().unwrap();
|
||||
/// # }
|
||||
/// ```
|
||||
/// [`Write`]: https://doc.rust-lang.org/std/io/trait.Write.html
|
||||
pub struct DeflateEncoder<W: Write> {
|
||||
deflate_state: DeflateState<W>,
|
||||
}
|
||||
|
||||
impl<W: Write> DeflateEncoder<W> {
|
||||
/// Creates a new encoder using the provided compression options.
|
||||
pub fn new<O: Into<CompressionOptions>>(writer: W, options: O) -> DeflateEncoder<W> {
|
||||
DeflateEncoder {
|
||||
deflate_state: DeflateState::new(options.into(), writer),
|
||||
}
|
||||
}
|
||||
|
||||
/// Encode all pending data to the contained writer, consume this `DeflateEncoder`,
|
||||
/// and return the contained writer if writing succeeds.
|
||||
pub fn finish(mut self) -> io::Result<W> {
|
||||
self.output_all()?;
|
||||
// We have to move the inner writer out of the encoder, and replace it with `None`
|
||||
// to let the `DeflateEncoder` drop safely.
|
||||
Ok(self.deflate_state.inner.take().expect(ERR_STR))
|
||||
}
|
||||
|
||||
/// Resets the encoder (except the compression options), replacing the current writer
|
||||
/// with a new one, returning the old one.
|
||||
pub fn reset(&mut self, w: W) -> io::Result<W> {
|
||||
self.output_all()?;
|
||||
self.deflate_state.reset(w)
|
||||
}
|
||||
|
||||
/// Output all pending data as if encoding is done, but without resetting anything
|
||||
fn output_all(&mut self) -> io::Result<()> {
|
||||
compress_until_done(&[], &mut self.deflate_state, Flush::Finish)
|
||||
}
|
||||
}
|
||||
|
||||
impl<W: Write> io::Write for DeflateEncoder<W> {
|
||||
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
|
||||
let flush_mode = self.deflate_state.flush_mode;
|
||||
compress_data_dynamic_n(buf, &mut self.deflate_state, flush_mode)
|
||||
}
|
||||
|
||||
/// Flush the encoder.
|
||||
///
|
||||
/// This will flush the encoder, emulating the Sync flush method from Zlib.
|
||||
/// This essentially finishes the current block, and sends an additional empty stored block to
|
||||
/// the writer.
|
||||
fn flush(&mut self) -> io::Result<()> {
|
||||
compress_until_done(&[], &mut self.deflate_state, Flush::Sync)
|
||||
}
|
||||
}
|
||||
|
||||
impl<W: Write> Drop for DeflateEncoder<W> {
|
||||
/// When the encoder is dropped, output the rest of the data.
|
||||
///
|
||||
/// WARNING: This may silently fail if writing fails, so using this to finish encoding
|
||||
/// for writers where writing might fail is not recommended, for that call
|
||||
/// [`finish()`](#method.finish) instead.
|
||||
fn drop(&mut self) {
|
||||
// Not sure if implementing drop is a good idea or not, but we follow flate2 for now.
|
||||
// We only do this if we are not panicking, to avoid a double panic.
|
||||
if self.deflate_state.inner.is_some() && !thread::panicking() {
|
||||
let _ = self.output_all();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A Zlib encoder/compressor.
|
||||
///
|
||||
/// A struct implementing a [`Write`] interface that takes arbitrary data and compresses it to
|
||||
/// the provided writer using DEFLATE compression with Zlib headers and trailers.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```rust
|
||||
/// # use std::io;
|
||||
/// #
|
||||
/// # fn try_main() -> io::Result<Vec<u8>> {
|
||||
/// #
|
||||
/// use std::io::Write;
|
||||
///
|
||||
/// use deflate::Compression;
|
||||
/// use deflate::write::ZlibEncoder;
|
||||
///
|
||||
/// let data = b"This is some test data";
|
||||
/// let mut encoder = ZlibEncoder::new(Vec::new(), Compression::Default);
|
||||
/// encoder.write_all(data)?;
|
||||
/// let compressed_data = encoder.finish()?;
|
||||
/// # Ok(compressed_data)
|
||||
/// #
|
||||
/// # }
|
||||
/// # fn main() {
|
||||
/// # try_main().unwrap();
|
||||
/// # }
|
||||
/// ```
|
||||
/// [`Write`]: https://doc.rust-lang.org/std/io/trait.Write.html
|
||||
pub struct ZlibEncoder<W: Write> {
|
||||
deflate_state: DeflateState<W>,
|
||||
checksum: Adler32Checksum,
|
||||
header_written: bool,
|
||||
}
|
||||
|
||||
impl<W: Write> ZlibEncoder<W> {
|
||||
/// Create a new `ZlibEncoder` using the provided compression options.
|
||||
pub fn new<O: Into<CompressionOptions>>(writer: W, options: O) -> ZlibEncoder<W> {
|
||||
ZlibEncoder {
|
||||
deflate_state: DeflateState::new(options.into(), writer),
|
||||
checksum: Adler32Checksum::new(),
|
||||
header_written: false,
|
||||
}
|
||||
}
|
||||
|
||||
/// Output all pending data ,including the trailer(checksum) as if encoding is done,
|
||||
/// but without resetting anything.
|
||||
fn output_all(&mut self) -> io::Result<()> {
|
||||
self.check_write_header()?;
|
||||
compress_until_done(&[], &mut self.deflate_state, Flush::Finish)?;
|
||||
self.write_trailer()
|
||||
}
|
||||
|
||||
/// Encode all pending data to the contained writer, consume this `ZlibEncoder`,
|
||||
/// and return the contained writer if writing succeeds.
|
||||
pub fn finish(mut self) -> io::Result<W> {
|
||||
self.output_all()?;
|
||||
// We have to move the inner writer out of the encoder, and replace it with `None`
|
||||
// to let the `DeflateEncoder` drop safely.
|
||||
Ok(self.deflate_state.inner.take().expect(ERR_STR))
|
||||
}
|
||||
|
||||
/// Resets the encoder (except the compression options), replacing the current writer
|
||||
/// with a new one, returning the old one.
|
||||
pub fn reset(&mut self, writer: W) -> io::Result<W> {
|
||||
self.output_all()?;
|
||||
self.header_written = false;
|
||||
self.checksum = Adler32Checksum::new();
|
||||
self.deflate_state.reset(writer)
|
||||
}
|
||||
|
||||
/// Check if a zlib header should be written.
|
||||
fn check_write_header(&mut self) -> io::Result<()> {
|
||||
if !self.header_written {
|
||||
write_zlib_header(self.deflate_state.output_buf(), CompressionLevel::Default)?;
|
||||
self.header_written = true;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Write the trailer, which for zlib is the Adler32 checksum.
|
||||
fn write_trailer(&mut self) -> io::Result<()> {
|
||||
let hash = self.checksum.current_hash();
|
||||
|
||||
self.deflate_state
|
||||
.inner
|
||||
.as_mut()
|
||||
.expect(ERR_STR)
|
||||
.write_u32::<BigEndian>(hash)
|
||||
}
|
||||
|
||||
/// Return the adler32 checksum of the currently consumed data.
|
||||
pub fn checksum(&self) -> u32 {
|
||||
self.checksum.current_hash()
|
||||
}
|
||||
}
|
||||
|
||||
impl<W: Write> io::Write for ZlibEncoder<W> {
|
||||
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
|
||||
self.check_write_header()?;
|
||||
let flush_mode = self.deflate_state.flush_mode;
|
||||
let res = compress_data_dynamic_n(buf, &mut self.deflate_state, flush_mode);
|
||||
match res {
|
||||
// If this is returned, the whole buffer was consumed
|
||||
Ok(0) => self.checksum.update_from_slice(buf),
|
||||
// Otherwise, only part of it was consumed, so only that part
|
||||
// added to the checksum.
|
||||
Ok(n) => self.checksum.update_from_slice(&buf[0..n]),
|
||||
_ => (),
|
||||
};
|
||||
res
|
||||
}
|
||||
|
||||
/// Flush the encoder.
|
||||
///
|
||||
/// This will flush the encoder, emulating the Sync flush method from Zlib.
|
||||
/// This essentially finishes the current block, and sends an additional empty stored block to
|
||||
/// the writer.
|
||||
fn flush(&mut self) -> io::Result<()> {
|
||||
compress_until_done(&[], &mut self.deflate_state, Flush::Sync)
|
||||
}
|
||||
}
|
||||
|
||||
impl<W: Write> Drop for ZlibEncoder<W> {
|
||||
/// When the encoder is dropped, output the rest of the data.
|
||||
///
|
||||
/// WARNING: This may silently fail if writing fails, so using this to finish encoding
|
||||
/// for writers where writing might fail is not recommended, for that call
|
||||
/// [`finish()`](#method.finish) instead.
|
||||
fn drop(&mut self) {
|
||||
if self.deflate_state.inner.is_some() && !thread::panicking() {
|
||||
let _ = self.output_all();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "gzip")]
|
||||
pub mod gzip {
|
||||
|
||||
use std::io::{Cursor, Write};
|
||||
use std::{io, thread};
|
||||
|
||||
use super::*;
|
||||
|
||||
use byteorder::{LittleEndian, WriteBytesExt};
|
||||
use gzip_header::{Crc, GzBuilder};
|
||||
|
||||
/// A Gzip encoder/compressor.
|
||||
///
|
||||
/// A struct implementing a [`Write`] interface that takes arbitrary data and compresses it to
|
||||
/// the provided writer using DEFLATE compression with Gzip headers and trailers.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```rust
|
||||
/// # use std::io;
|
||||
/// #
|
||||
/// # fn try_main() -> io::Result<Vec<u8>> {
|
||||
/// #
|
||||
/// use std::io::Write;
|
||||
///
|
||||
/// use deflate::Compression;
|
||||
/// use deflate::write::GzEncoder;
|
||||
///
|
||||
/// let data = b"This is some test data";
|
||||
/// let mut encoder = GzEncoder::new(Vec::new(), Compression::Default);
|
||||
/// encoder.write_all(data)?;
|
||||
/// let compressed_data = encoder.finish()?;
|
||||
/// # Ok(compressed_data)
|
||||
/// #
|
||||
/// # }
|
||||
/// # fn main() {
|
||||
/// # try_main().unwrap();
|
||||
/// # }
|
||||
/// ```
|
||||
/// [`Write`]: https://doc.rust-lang.org/std/io/trait.Write.html
|
||||
pub struct GzEncoder<W: Write> {
|
||||
inner: DeflateEncoder<W>,
|
||||
checksum: Crc,
|
||||
header: Vec<u8>,
|
||||
}
|
||||
|
||||
impl<W: Write> GzEncoder<W> {
|
||||
/// Create a new `GzEncoder` writing deflate-compressed data to the underlying writer when
|
||||
/// written to, wrapped in a gzip header and trailer. The header details will be blank.
|
||||
pub fn new<O: Into<CompressionOptions>>(writer: W, options: O) -> GzEncoder<W> {
|
||||
GzEncoder::from_builder(GzBuilder::new(), writer, options)
|
||||
}
|
||||
|
||||
/// Create a new GzEncoder from the provided `GzBuilder`. This allows customising
|
||||
/// the details of the header, such as the filename and comment fields.
|
||||
pub fn from_builder<O: Into<CompressionOptions>>(
|
||||
builder: GzBuilder,
|
||||
writer: W,
|
||||
options: O,
|
||||
) -> GzEncoder<W> {
|
||||
GzEncoder {
|
||||
inner: DeflateEncoder::new(writer, options),
|
||||
checksum: Crc::new(),
|
||||
header: builder.into_header(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Write header to the output buffer if it hasn't been done yet.
|
||||
fn check_write_header(&mut self) {
|
||||
if !self.header.is_empty() {
|
||||
self.inner
|
||||
.deflate_state
|
||||
.output_buf()
|
||||
.extend_from_slice(&self.header);
|
||||
self.header.clear();
|
||||
}
|
||||
}
|
||||
|
||||
/// Output all pending data ,including the trailer(checksum + count) as if encoding is done.
|
||||
/// but without resetting anything.
|
||||
fn output_all(&mut self) -> io::Result<()> {
|
||||
self.check_write_header();
|
||||
self.inner.output_all()?;
|
||||
self.write_trailer()
|
||||
}
|
||||
|
||||
/// Encode all pending data to the contained writer, consume this `GzEncoder`,
|
||||
/// and return the contained writer if writing succeeds.
|
||||
pub fn finish(mut self) -> io::Result<W> {
|
||||
self.output_all()?;
|
||||
// We have to move the inner writer out of the encoder, and replace it with `None`
|
||||
// to let the `DeflateEncoder` drop safely.
|
||||
Ok(self.inner.deflate_state.inner.take().expect(ERR_STR))
|
||||
}
|
||||
|
||||
fn reset_no_header(&mut self, writer: W) -> io::Result<W> {
|
||||
self.output_all()?;
|
||||
self.checksum = Crc::new();
|
||||
self.inner.deflate_state.reset(writer)
|
||||
}
|
||||
|
||||
/// Resets the encoder (except the compression options), replacing the current writer
|
||||
/// with a new one, returning the old one. (Using a blank header).
|
||||
pub fn reset(&mut self, writer: W) -> io::Result<W> {
|
||||
let w = self.reset_no_header(writer);
|
||||
self.header = GzBuilder::new().into_header();
|
||||
w
|
||||
}
|
||||
|
||||
/// Resets the encoder (except the compression options), replacing the current writer
|
||||
/// with a new one, returning the old one, and using the provided `GzBuilder` to
|
||||
/// create the header.
|
||||
pub fn reset_with_builder(&mut self, writer: W, builder: GzBuilder) -> io::Result<W> {
|
||||
let w = self.reset_no_header(writer);
|
||||
self.header = builder.into_header();
|
||||
w
|
||||
}
|
||||
|
||||
/// Write the checksum and number of bytes mod 2^32 to the output writer.
|
||||
fn write_trailer(&mut self) -> io::Result<()> {
|
||||
let crc = self.checksum.sum();
|
||||
let amount = self.checksum.amt_as_u32();
|
||||
|
||||
// We use a buffer here to make sure we don't end up writing only half the header if
|
||||
// writing fails.
|
||||
let mut buf = [0u8; 8];
|
||||
let mut temp = Cursor::new(&mut buf[..]);
|
||||
temp.write_u32::<LittleEndian>(crc).unwrap();
|
||||
temp.write_u32::<LittleEndian>(amount).unwrap();
|
||||
self.inner
|
||||
.deflate_state
|
||||
.inner
|
||||
.as_mut()
|
||||
.expect(ERR_STR)
|
||||
.write_all(temp.into_inner())
|
||||
}
|
||||
|
||||
/// Get the crc32 checksum of the data consumed so far.
|
||||
pub fn checksum(&self) -> u32 {
|
||||
self.checksum.sum()
|
||||
}
|
||||
}
|
||||
|
||||
impl<W: Write> io::Write for GzEncoder<W> {
|
||||
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
|
||||
self.check_write_header();
|
||||
let res = self.inner.write(buf);
|
||||
match res {
|
||||
Ok(0) => self.checksum.update(buf),
|
||||
Ok(n) => self.checksum.update(&buf[0..n]),
|
||||
_ => (),
|
||||
};
|
||||
res
|
||||
}
|
||||
|
||||
/// Flush the encoder.
|
||||
///
|
||||
/// This will flush the encoder, emulating the Sync flush method from Zlib.
|
||||
/// This essentially finishes the current block, and sends an additional empty stored
|
||||
/// block to the writer.
|
||||
fn flush(&mut self) -> io::Result<()> {
|
||||
self.inner.flush()
|
||||
}
|
||||
}
|
||||
|
||||
impl<W: Write> Drop for GzEncoder<W> {
|
||||
/// When the encoder is dropped, output the rest of the data.
|
||||
///
|
||||
/// WARNING: This may silently fail if writing fails, so using this to finish encoding
|
||||
/// for writers where writing might fail is not recommended, for that call
|
||||
/// [`finish()`](#method.finish) instead.
|
||||
fn drop(&mut self) {
|
||||
if self.inner.deflate_state.inner.is_some() && !thread::panicking() {
|
||||
let _ = self.output_all();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use crate::test_utils::{decompress_gzip, get_test_data};
|
||||
#[test]
|
||||
fn gzip_writer() {
|
||||
let data = get_test_data();
|
||||
let comment = b"Comment";
|
||||
let compressed = {
|
||||
let mut compressor = GzEncoder::from_builder(
|
||||
GzBuilder::new().comment(&comment[..]),
|
||||
Vec::with_capacity(data.len() / 3),
|
||||
CompressionOptions::default(),
|
||||
);
|
||||
compressor.write_all(&data[0..data.len() / 2]).unwrap();
|
||||
compressor.write_all(&data[data.len() / 2..]).unwrap();
|
||||
compressor.finish().unwrap()
|
||||
};
|
||||
|
||||
let (dec, res) = decompress_gzip(&compressed);
|
||||
assert_eq!(dec.comment().unwrap(), comment);
|
||||
assert!(res == data);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use crate::compression_options::CompressionOptions;
|
||||
use crate::test_utils::{decompress_to_end, decompress_zlib, get_test_data};
|
||||
use std::io::Write;
|
||||
|
||||
#[test]
|
||||
fn deflate_writer() {
|
||||
let data = get_test_data();
|
||||
let compressed = {
|
||||
let mut compressor = DeflateEncoder::new(
|
||||
Vec::with_capacity(data.len() / 3),
|
||||
CompressionOptions::high(),
|
||||
);
|
||||
// Write in multiple steps to see if this works as it's supposed to.
|
||||
compressor.write_all(&data[0..data.len() / 2]).unwrap();
|
||||
compressor.write_all(&data[data.len() / 2..]).unwrap();
|
||||
compressor.finish().unwrap()
|
||||
};
|
||||
|
||||
let res = decompress_to_end(&compressed);
|
||||
assert!(res == data);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn zlib_writer() {
|
||||
let data = get_test_data();
|
||||
let compressed = {
|
||||
let mut compressor = ZlibEncoder::new(
|
||||
Vec::with_capacity(data.len() / 3),
|
||||
CompressionOptions::high(),
|
||||
);
|
||||
compressor.write_all(&data[0..data.len() / 2]).unwrap();
|
||||
compressor.write_all(&data[data.len() / 2..]).unwrap();
|
||||
compressor.finish().unwrap()
|
||||
};
|
||||
|
||||
let res = decompress_zlib(&compressed);
|
||||
assert!(res == data);
|
||||
}
|
||||
|
||||
#[test]
|
||||
/// Check if the result of compressing after resetting is the same as before.
|
||||
fn writer_reset() {
|
||||
let data = get_test_data();
|
||||
let mut compressor = DeflateEncoder::new(
|
||||
Vec::with_capacity(data.len() / 3),
|
||||
CompressionOptions::default(),
|
||||
);
|
||||
compressor.write_all(&data).unwrap();
|
||||
let res1 = compressor
|
||||
.reset(Vec::with_capacity(data.len() / 3))
|
||||
.unwrap();
|
||||
compressor.write_all(&data).unwrap();
|
||||
let res2 = compressor.finish().unwrap();
|
||||
assert!(res1 == res2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn writer_reset_zlib() {
|
||||
let data = get_test_data();
|
||||
let mut compressor = ZlibEncoder::new(
|
||||
Vec::with_capacity(data.len() / 3),
|
||||
CompressionOptions::default(),
|
||||
);
|
||||
compressor.write_all(&data).unwrap();
|
||||
let res1 = compressor
|
||||
.reset(Vec::with_capacity(data.len() / 3))
|
||||
.unwrap();
|
||||
compressor.write_all(&data).unwrap();
|
||||
let res2 = compressor.finish().unwrap();
|
||||
assert!(res1 == res2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn writer_sync() {
|
||||
let data = get_test_data();
|
||||
let compressed = {
|
||||
let mut compressor = DeflateEncoder::new(
|
||||
Vec::with_capacity(data.len() / 3),
|
||||
CompressionOptions::default(),
|
||||
);
|
||||
let split = data.len() / 2;
|
||||
compressor.write_all(&data[..split]).unwrap();
|
||||
compressor.flush().unwrap();
|
||||
{
|
||||
let buf = &mut compressor.deflate_state.inner.as_mut().unwrap();
|
||||
let buf_len = buf.len();
|
||||
// Check for the sync marker. (excluding the header as it might not line
|
||||
// up with the byte boundary.)
|
||||
assert_eq!(buf[buf_len - 4..], [0, 0, 255, 255]);
|
||||
}
|
||||
compressor.write_all(&data[split..]).unwrap();
|
||||
compressor.finish().unwrap()
|
||||
};
|
||||
|
||||
let decompressed = decompress_to_end(&compressed);
|
||||
|
||||
assert!(decompressed == data);
|
||||
}
|
||||
|
||||
#[test]
|
||||
/// Make sure compression works with the writer when the input is between 1 and 2 window sizes.
|
||||
fn issue_18() {
|
||||
use crate::compression_options::Compression;
|
||||
let data = vec![0; 61000];
|
||||
let compressed = {
|
||||
let mut compressor = ZlibEncoder::new(Vec::new(), Compression::Default);
|
||||
compressor.write_all(&data[..]).unwrap();
|
||||
compressor.finish().unwrap()
|
||||
};
|
||||
let decompressed = decompress_zlib(&compressed);
|
||||
assert!(decompressed == data);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn writer_sync_multiple() {
|
||||
use std::cmp;
|
||||
let data = get_test_data();
|
||||
let compressed = {
|
||||
let mut compressor = DeflateEncoder::new(
|
||||
Vec::with_capacity(data.len() / 3),
|
||||
CompressionOptions::default(),
|
||||
);
|
||||
let split = data.len() / 2;
|
||||
compressor.write_all(&data[..split]).unwrap();
|
||||
compressor.flush().unwrap();
|
||||
compressor.flush().unwrap();
|
||||
{
|
||||
let buf = &mut compressor.deflate_state.inner.as_mut().unwrap();
|
||||
let buf_len = buf.len();
|
||||
// Check for the sync marker. (excluding the header as it might not line
|
||||
// up with the byte boundary.)
|
||||
assert_eq!(buf[buf_len - 4..], [0, 0, 255, 255]);
|
||||
}
|
||||
compressor
|
||||
.write_all(&data[split..cmp::min(split + 2, data.len())])
|
||||
.unwrap();
|
||||
compressor.flush().unwrap();
|
||||
compressor
|
||||
.write_all(&data[cmp::min(split + 2, data.len())..])
|
||||
.unwrap();
|
||||
compressor.finish().unwrap()
|
||||
};
|
||||
|
||||
let decompressed = decompress_to_end(&compressed);
|
||||
|
||||
assert!(decompressed == data);
|
||||
|
||||
let mut compressor = DeflateEncoder::new(
|
||||
Vec::with_capacity(data.len() / 3),
|
||||
CompressionOptions::default(),
|
||||
);
|
||||
|
||||
compressor.flush().unwrap();
|
||||
compressor.write_all(&[1, 2]).unwrap();
|
||||
compressor.flush().unwrap();
|
||||
compressor.write_all(&[3]).unwrap();
|
||||
compressor.flush().unwrap();
|
||||
let compressed = compressor.finish().unwrap();
|
||||
|
||||
let decompressed = decompress_to_end(&compressed);
|
||||
|
||||
assert_eq!(decompressed, [1, 2, 3]);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,87 @@
|
|||
//! This module contains functionality for generating a [zlib](https://tools.ietf.org/html/rfc1950)
|
||||
//! header.
|
||||
//!
|
||||
//! The Zlib header contains some metadata (a window size and a compression level), and optionally
|
||||
//! a block of data serving as an extra dictionary for the compressor/decompressor.
|
||||
//! The dictionary is not implemented in this library.
|
||||
//! The data in the header aside from the dictionary doesn't actually have any effect on the
|
||||
//! decompressed data, it only offers some hints for the decompressor on how the data was
|
||||
//! compressed.
|
||||
|
||||
use std::io::{Result, Write};
|
||||
|
||||
// CM = 8 means to use the DEFLATE compression method.
|
||||
const DEFAULT_CM: u8 = 8;
|
||||
// CINFO = 7 Indicates a 32k window size.
|
||||
const DEFAULT_CINFO: u8 = 7 << 4;
|
||||
const DEFAULT_CMF: u8 = DEFAULT_CM | DEFAULT_CINFO;
|
||||
|
||||
// No dict by default.
|
||||
#[cfg(test)]
|
||||
const DEFAULT_FDICT: u8 = 0;
|
||||
// FLEVEL = 0 means fastest compression algorithm.
|
||||
const _DEFAULT_FLEVEL: u8 = 0 << 7;
|
||||
|
||||
// The 16-bit value consisting of CMF and FLG must be divisible by this to be valid.
|
||||
const FCHECK_DIVISOR: u8 = 31;
|
||||
|
||||
#[allow(dead_code)]
|
||||
#[repr(u8)]
|
||||
pub enum CompressionLevel {
|
||||
Fastest = 0 << 6,
|
||||
Fast = 1 << 6,
|
||||
Default = 2 << 6,
|
||||
Maximum = 3 << 6,
|
||||
}
|
||||
|
||||
/// Generate FCHECK from CMF and FLG (without FCKECH )so that they are correct according to the
|
||||
/// specification, i.e (CMF*256 + FCHK) % 31 = 0.
|
||||
/// Returns flg with the FCHKECK bits added (any existing FCHECK bits are ignored).
|
||||
fn add_fcheck(cmf: u8, flg: u8) -> u8 {
|
||||
let rem = ((usize::from(cmf) * 256) + usize::from(flg)) % usize::from(FCHECK_DIVISOR);
|
||||
|
||||
// Clear existing FCHECK if any
|
||||
let flg = flg & 0b11100000;
|
||||
|
||||
// Casting is safe as rem can't overflow since it is a value mod 31
|
||||
// We can simply add the value to flg as (31 - rem) will never be above 2^5
|
||||
flg + (FCHECK_DIVISOR - rem as u8)
|
||||
}
|
||||
|
||||
/// Write a zlib header with an empty dictionary to the writer using the specified
|
||||
/// compression level preset.
|
||||
pub fn write_zlib_header<W: Write>(writer: &mut W, level: CompressionLevel) -> Result<()> {
|
||||
writer.write_all(&get_zlib_header(level))
|
||||
}
|
||||
|
||||
/// Get the zlib header for the `CompressionLevel` level using the default window size and no
|
||||
/// dictionary.
|
||||
pub fn get_zlib_header(level: CompressionLevel) -> [u8; 2] {
|
||||
let cmf = DEFAULT_CMF;
|
||||
[cmf, add_fcheck(cmf, level as u8)]
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::DEFAULT_CMF;
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_gen_fcheck() {
|
||||
let cmf = DEFAULT_CMF;
|
||||
let flg = super::add_fcheck(
|
||||
DEFAULT_CMF,
|
||||
CompressionLevel::Default as u8 | super::DEFAULT_FDICT,
|
||||
);
|
||||
assert_eq!(((usize::from(cmf) * 256) + usize::from(flg)) % 31, 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_header() {
|
||||
let header = get_zlib_header(CompressionLevel::Fastest);
|
||||
assert_eq!(
|
||||
((usize::from(header[0]) * 256) + usize::from(header[1])) % 31,
|
||||
0
|
||||
);
|
||||
}
|
||||
}
|
|
@ -0,0 +1 @@
|
|||
{"files":{"Cargo.toml":"f64e4d72e7ea46794a911a98b24d3107b00339092792b3e6d5bbaaa0e532a2e5","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"7576269ea71f767b99297934c0b2367532690f8c4badc695edf8e04ab6a1e545","README-crates.io.md":"b775991a01ab4a0a8de6169f597775319d9ce8178f5c74ccdc634f13a286b20c","README.rst":"9c5d8e56338eb20f51b00db4467edadf5ea733da0365131dd39b3ad6a4c1412d","src/lib.rs":"40b9850fd27674a0f8931d5d6b1f226ec2f0119d6ecd2462a24287df40430fa4"},"package":"bb1f6b1ce1c140482ea30ddd3335fc0024ac7ee112895426e0a629a6c20adfe3"}
|
|
@ -0,0 +1,42 @@
|
|||
"""
|
||||
cargo-raze crate build file.
|
||||
|
||||
DO NOT EDIT! Replaced on runs of cargo-raze
|
||||
"""
|
||||
package(default_visibility = [
|
||||
# Public for visibility by "@raze__crate__version//" targets.
|
||||
#
|
||||
# Prefer access through "//third_party/cargo", which limits external
|
||||
# visibility to explicit Cargo.toml dependencies.
|
||||
"//visibility:public",
|
||||
])
|
||||
|
||||
licenses([
|
||||
"notice", # "MIT,Apache-2.0"
|
||||
])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_rust//rust:rust.bzl",
|
||||
"rust_library",
|
||||
"rust_binary",
|
||||
"rust_test",
|
||||
)
|
||||
|
||||
|
||||
|
||||
rust_library(
|
||||
name = "either",
|
||||
crate_root = "src/lib.rs",
|
||||
crate_type = "lib",
|
||||
edition = "2015",
|
||||
srcs = glob(["**/*.rs"]),
|
||||
deps = [
|
||||
],
|
||||
rustc_flags = [
|
||||
"--cap-lints=allow",
|
||||
],
|
||||
version = "1.5.3",
|
||||
crate_features = [
|
||||
],
|
||||
)
|
||||
|
|
@ -0,0 +1,37 @@
|
|||
# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
|
||||
#
|
||||
# When uploading crates to the registry Cargo will automatically
|
||||
# "normalize" Cargo.toml files for maximal compatibility
|
||||
# with all versions of Cargo and also rewrite `path` dependencies
|
||||
# to registry (e.g., crates.io) dependencies
|
||||
#
|
||||
# If you believe there's an error in this file please file an
|
||||
# issue against the rust-lang/cargo repository. If you're
|
||||
# editing this file be aware that the upstream Cargo.toml
|
||||
# will likely look very different (and much more reasonable)
|
||||
|
||||
[package]
|
||||
name = "either"
|
||||
version = "1.5.3"
|
||||
authors = ["bluss"]
|
||||
description = "The enum `Either` with variants `Left` and `Right` is a general purpose sum type with two cases.\n"
|
||||
documentation = "https://docs.rs/either/1/"
|
||||
readme = "README-crates.io.md"
|
||||
keywords = ["data-structure", "no_std"]
|
||||
categories = ["data-structures", "no-std"]
|
||||
license = "MIT/Apache-2.0"
|
||||
repository = "https://github.com/bluss/either"
|
||||
[package.metadata.docs.rs]
|
||||
features = ["serde"]
|
||||
|
||||
[package.metadata.release]
|
||||
no-dev-version = true
|
||||
tag-name = "{{version}}"
|
||||
[dependencies.serde]
|
||||
version = "1.0"
|
||||
features = ["derive"]
|
||||
optional = true
|
||||
|
||||
[features]
|
||||
default = ["use_std"]
|
||||
use_std = []
|
|
@ -0,0 +1,201 @@
|
|||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
|
@ -0,0 +1,25 @@
|
|||
Copyright (c) 2015
|
||||
|
||||
Permission is hereby granted, free of charge, to any
|
||||
person obtaining a copy of this software and associated
|
||||
documentation files (the "Software"), to deal in the
|
||||
Software without restriction, including without
|
||||
limitation the rights to use, copy, modify, merge,
|
||||
publish, distribute, sublicense, and/or sell copies of
|
||||
the Software, and to permit persons to whom the Software
|
||||
is furnished to do so, subject to the following
|
||||
conditions:
|
||||
|
||||
The above copyright notice and this permission notice
|
||||
shall be included in all copies or substantial portions
|
||||
of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
|
||||
ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
|
||||
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
|
||||
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
|
||||
SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
|
||||
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
|
||||
IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
DEALINGS IN THE SOFTWARE.
|
|
@ -0,0 +1,10 @@
|
|||
The enum `Either` with variants `Left` and `Right` is a general purpose
|
||||
sum type with two cases.
|
||||
|
||||
Either has methods that are similar to Option and Result, and it also implements
|
||||
traits like `Iterator`.
|
||||
|
||||
Includes macros `try_left!()` and `try_right!()` to use for
|
||||
short-circuiting logic, similar to how the `?` operator is used with `Result`.
|
||||
Note that `Either` is general purpose. For describing success or error, use the
|
||||
regular `Result`.
|
|
@ -0,0 +1,119 @@
|
|||
|
||||
Either
|
||||
======
|
||||
|
||||
The enum ``Either`` with variants ``Left`` and ``Right`` and trait
|
||||
implementations including Iterator, Read, Write.
|
||||
|
||||
Either has methods that are similar to Option and Result.
|
||||
|
||||
Includes convenience macros ``try_left!()`` and ``try_right!()`` to use for
|
||||
short-circuiting logic.
|
||||
|
||||
Please read the `API documentation here`__
|
||||
|
||||
__ https://docs.rs/either/
|
||||
|
||||
|build_status|_ |crates|_
|
||||
|
||||
.. |build_status| image:: https://travis-ci.org/bluss/either.svg?branch=master
|
||||
.. _build_status: https://travis-ci.org/bluss/either
|
||||
|
||||
.. |crates| image:: http://meritbadge.herokuapp.com/either
|
||||
.. _crates: https://crates.io/crates/either
|
||||
|
||||
How to use with cargo::
|
||||
|
||||
[dependencies]
|
||||
either = "1.5"
|
||||
|
||||
|
||||
Recent Changes
|
||||
--------------
|
||||
|
||||
- 1.5.3
|
||||
|
||||
- Add new method ``.map()`` for ``Either<T, T>`` by @nvzqz (#40).
|
||||
|
||||
- 1.5.2
|
||||
|
||||
- Add new methods ``.left_or()``, ``.left_or_default()``, ``.left_or_else()``,
|
||||
and equivalents on the right, by @DCjanus (#36)
|
||||
|
||||
- 1.5.1
|
||||
|
||||
- Add ``AsRef`` and ``AsMut`` implementations for common unsized types:
|
||||
``str``, ``[T]``, ``CStr``, ``OsStr``, and ``Path``, by @mexus (#29)
|
||||
|
||||
- 1.5.0
|
||||
|
||||
- Add new methods ``.factor_first()``, ``.factor_second()`` and ``.into_inner()``
|
||||
by @mathstuf (#19)
|
||||
|
||||
- 1.4.0
|
||||
|
||||
- Add inherent method ``.into_iter()`` by @cuviper (#12)
|
||||
|
||||
- 1.3.0
|
||||
|
||||
- Add opt-in serde support by @hcpl
|
||||
|
||||
- 1.2.0
|
||||
|
||||
- Add method ``.either_with()`` by @Twey (#13)
|
||||
|
||||
- 1.1.0
|
||||
|
||||
- Add methods ``left_and_then``, ``right_and_then`` by @rampantmonkey
|
||||
- Include license files in the repository and released crate
|
||||
|
||||
- 1.0.3
|
||||
|
||||
- Add crate categories
|
||||
|
||||
- 1.0.2
|
||||
|
||||
- Forward more ``Iterator`` methods
|
||||
- Implement ``Extend`` for ``Either<L, R>`` if ``L, R`` do.
|
||||
|
||||
- 1.0.1
|
||||
|
||||
- Fix ``Iterator`` impl for ``Either`` to forward ``.fold()``.
|
||||
|
||||
- 1.0.0
|
||||
|
||||
- Add default crate feature ``use_std`` so that you can opt out of linking to
|
||||
std.
|
||||
|
||||
- 0.1.7
|
||||
|
||||
- Add methods ``.map_left()``, ``.map_right()`` and ``.either()``.
|
||||
- Add more documentation
|
||||
|
||||
- 0.1.3
|
||||
|
||||
- Implement Display, Error
|
||||
|
||||
- 0.1.2
|
||||
|
||||
- Add macros ``try_left!`` and ``try_right!``.
|
||||
|
||||
- 0.1.1
|
||||
|
||||
- Implement Deref, DerefMut
|
||||
|
||||
- 0.1.0
|
||||
|
||||
- Initial release
|
||||
- Support Iterator, Read, Write
|
||||
|
||||
License
|
||||
-------
|
||||
|
||||
Dual-licensed to be compatible with the Rust project.
|
||||
|
||||
Licensed under the Apache License, Version 2.0
|
||||
http://www.apache.org/licenses/LICENSE-2.0 or the MIT license
|
||||
http://opensource.org/licenses/MIT, at your
|
||||
option. This file may not be copied, modified, or distributed
|
||||
except according to those terms.
|
|
@ -0,0 +1,974 @@
|
|||
//! The enum [`Either`] with variants `Left` and `Right` is a general purpose
|
||||
//! sum type with two cases.
|
||||
//!
|
||||
//! [`Either`]: enum.Either.html
|
||||
//!
|
||||
//! **Crate features:**
|
||||
//!
|
||||
//! * `"use_std"`
|
||||
//! Enabled by default. Disable to make the library `#![no_std]`.
|
||||
//!
|
||||
//! * `"serde"`
|
||||
//! Disabled by default. Enable to `#[derive(Serialize, Deserialize)]` for `Either`
|
||||
//!
|
||||
|
||||
#![doc(html_root_url = "https://docs.rs/either/1/")]
|
||||
#![cfg_attr(all(not(test), not(feature = "use_std")), no_std)]
|
||||
#[cfg(all(not(test), not(feature = "use_std")))]
|
||||
extern crate core as std;
|
||||
|
||||
#[cfg(feature = "serde")]
|
||||
#[macro_use]
|
||||
extern crate serde;
|
||||
|
||||
use std::convert::{AsRef, AsMut};
|
||||
use std::fmt;
|
||||
use std::iter;
|
||||
use std::ops::Deref;
|
||||
use std::ops::DerefMut;
|
||||
#[cfg(any(test, feature = "use_std"))]
|
||||
use std::io::{self, Write, Read, BufRead};
|
||||
#[cfg(any(test, feature = "use_std"))]
|
||||
use std::error::Error;
|
||||
|
||||
pub use Either::{Left, Right};
|
||||
|
||||
/// The enum `Either` with variants `Left` and `Right` is a general purpose
|
||||
/// sum type with two cases.
|
||||
///
|
||||
/// The `Either` type is symmetric and treats its variants the same way, without
|
||||
/// preference.
|
||||
/// (For representing success or error, use the regular `Result` enum instead.)
|
||||
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
|
||||
#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
|
||||
pub enum Either<L, R> {
|
||||
/// A value of type `L`.
|
||||
Left(L),
|
||||
/// A value of type `R`.
|
||||
Right(R),
|
||||
}
|
||||
|
||||
macro_rules! either {
|
||||
($value:expr, $pattern:pat => $result:expr) => (
|
||||
match $value {
|
||||
Either::Left($pattern) => $result,
|
||||
Either::Right($pattern) => $result,
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
/// Macro for unwrapping the left side of an `Either`, which fails early
|
||||
/// with the opposite side. Can only be used in functions that return
|
||||
/// `Either` because of the early return of `Right` that it provides.
|
||||
///
|
||||
/// See also `try_right!` for its dual, which applies the same just to the
|
||||
/// right side.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```
|
||||
/// #[macro_use] extern crate either;
|
||||
/// use either::{Either, Left, Right};
|
||||
///
|
||||
/// fn twice(wrapper: Either<u32, &str>) -> Either<u32, &str> {
|
||||
/// let value = try_left!(wrapper);
|
||||
/// Left(value * 2)
|
||||
/// }
|
||||
///
|
||||
/// fn main() {
|
||||
/// assert_eq!(twice(Left(2)), Left(4));
|
||||
/// assert_eq!(twice(Right("ups")), Right("ups"));
|
||||
/// }
|
||||
/// ```
|
||||
#[macro_export]
|
||||
macro_rules! try_left {
|
||||
($expr:expr) => (
|
||||
match $expr {
|
||||
$crate::Left(val) => val,
|
||||
$crate::Right(err) => return $crate::Right(::std::convert::From::from(err))
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
/// Dual to `try_left!`, see its documentation for more information.
|
||||
#[macro_export]
|
||||
macro_rules! try_right {
|
||||
($expr:expr) => (
|
||||
match $expr {
|
||||
$crate::Left(err) => return $crate::Left(::std::convert::From::from(err)),
|
||||
$crate::Right(val) => val
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
impl<L, R> Either<L, R> {
|
||||
/// Return true if the value is the `Left` variant.
|
||||
///
|
||||
/// ```
|
||||
/// use either::*;
|
||||
///
|
||||
/// let values = [Left(1), Right("the right value")];
|
||||
/// assert_eq!(values[0].is_left(), true);
|
||||
/// assert_eq!(values[1].is_left(), false);
|
||||
/// ```
|
||||
pub fn is_left(&self) -> bool {
|
||||
match *self {
|
||||
Left(_) => true,
|
||||
Right(_) => false,
|
||||
}
|
||||
}
|
||||
|
||||
/// Return true if the value is the `Right` variant.
|
||||
///
|
||||
/// ```
|
||||
/// use either::*;
|
||||
///
|
||||
/// let values = [Left(1), Right("the right value")];
|
||||
/// assert_eq!(values[0].is_right(), false);
|
||||
/// assert_eq!(values[1].is_right(), true);
|
||||
/// ```
|
||||
pub fn is_right(&self) -> bool {
|
||||
!self.is_left()
|
||||
}
|
||||
|
||||
/// Convert the left side of `Either<L, R>` to an `Option<L>`.
|
||||
///
|
||||
/// ```
|
||||
/// use either::*;
|
||||
///
|
||||
/// let left: Either<_, ()> = Left("some value");
|
||||
/// assert_eq!(left.left(), Some("some value"));
|
||||
///
|
||||
/// let right: Either<(), _> = Right(321);
|
||||
/// assert_eq!(right.left(), None);
|
||||
/// ```
|
||||
pub fn left(self) -> Option<L> {
|
||||
match self {
|
||||
Left(l) => Some(l),
|
||||
Right(_) => None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Convert the right side of `Either<L, R>` to an `Option<R>`.
|
||||
///
|
||||
/// ```
|
||||
/// use either::*;
|
||||
///
|
||||
/// let left: Either<_, ()> = Left("some value");
|
||||
/// assert_eq!(left.right(), None);
|
||||
///
|
||||
/// let right: Either<(), _> = Right(321);
|
||||
/// assert_eq!(right.right(), Some(321));
|
||||
/// ```
|
||||
pub fn right(self) -> Option<R> {
|
||||
match self {
|
||||
Left(_) => None,
|
||||
Right(r) => Some(r),
|
||||
}
|
||||
}
|
||||
|
||||
/// Convert `&Either<L, R>` to `Either<&L, &R>`.
|
||||
///
|
||||
/// ```
|
||||
/// use either::*;
|
||||
///
|
||||
/// let left: Either<_, ()> = Left("some value");
|
||||
/// assert_eq!(left.as_ref(), Left(&"some value"));
|
||||
///
|
||||
/// let right: Either<(), _> = Right("some value");
|
||||
/// assert_eq!(right.as_ref(), Right(&"some value"));
|
||||
/// ```
|
||||
pub fn as_ref(&self) -> Either<&L, &R> {
|
||||
match *self {
|
||||
Left(ref inner) => Left(inner),
|
||||
Right(ref inner) => Right(inner),
|
||||
}
|
||||
}
|
||||
|
||||
/// Convert `&mut Either<L, R>` to `Either<&mut L, &mut R>`.
|
||||
///
|
||||
/// ```
|
||||
/// use either::*;
|
||||
///
|
||||
/// fn mutate_left(value: &mut Either<u32, u32>) {
|
||||
/// if let Some(l) = value.as_mut().left() {
|
||||
/// *l = 999;
|
||||
/// }
|
||||
/// }
|
||||
///
|
||||
/// let mut left = Left(123);
|
||||
/// let mut right = Right(123);
|
||||
/// mutate_left(&mut left);
|
||||
/// mutate_left(&mut right);
|
||||
/// assert_eq!(left, Left(999));
|
||||
/// assert_eq!(right, Right(123));
|
||||
/// ```
|
||||
pub fn as_mut(&mut self) -> Either<&mut L, &mut R> {
|
||||
match *self {
|
||||
Left(ref mut inner) => Left(inner),
|
||||
Right(ref mut inner) => Right(inner),
|
||||
}
|
||||
}
|
||||
|
||||
/// Convert `Either<L, R>` to `Either<R, L>`.
|
||||
///
|
||||
/// ```
|
||||
/// use either::*;
|
||||
///
|
||||
/// let left: Either<_, ()> = Left(123);
|
||||
/// assert_eq!(left.flip(), Right(123));
|
||||
///
|
||||
/// let right: Either<(), _> = Right("some value");
|
||||
/// assert_eq!(right.flip(), Left("some value"));
|
||||
/// ```
|
||||
pub fn flip(self) -> Either<R, L> {
|
||||
match self {
|
||||
Left(l) => Right(l),
|
||||
Right(r) => Left(r),
|
||||
}
|
||||
}
|
||||
|
||||
/// Apply the function `f` on the value in the `Left` variant if it is present rewrapping the
|
||||
/// result in `Left`.
|
||||
///
|
||||
/// ```
|
||||
/// use either::*;
|
||||
///
|
||||
/// let left: Either<_, u32> = Left(123);
|
||||
/// assert_eq!(left.map_left(|x| x * 2), Left(246));
|
||||
///
|
||||
/// let right: Either<u32, _> = Right(123);
|
||||
/// assert_eq!(right.map_left(|x| x * 2), Right(123));
|
||||
/// ```
|
||||
pub fn map_left<F, M>(self, f: F) -> Either<M, R>
|
||||
where F: FnOnce(L) -> M
|
||||
{
|
||||
match self {
|
||||
Left(l) => Left(f(l)),
|
||||
Right(r) => Right(r),
|
||||
}
|
||||
}
|
||||
|
||||
/// Apply the function `f` on the value in the `Right` variant if it is present rewrapping the
|
||||
/// result in `Right`.
|
||||
///
|
||||
/// ```
|
||||
/// use either::*;
|
||||
///
|
||||
/// let left: Either<_, u32> = Left(123);
|
||||
/// assert_eq!(left.map_right(|x| x * 2), Left(123));
|
||||
///
|
||||
/// let right: Either<u32, _> = Right(123);
|
||||
/// assert_eq!(right.map_right(|x| x * 2), Right(246));
|
||||
/// ```
|
||||
pub fn map_right<F, S>(self, f: F) -> Either<L, S>
|
||||
where F: FnOnce(R) -> S
|
||||
{
|
||||
match self {
|
||||
Left(l) => Left(l),
|
||||
Right(r) => Right(f(r)),
|
||||
}
|
||||
}
|
||||
|
||||
/// Apply one of two functions depending on contents, unifying their result. If the value is
|
||||
/// `Left(L)` then the first function `f` is applied; if it is `Right(R)` then the second
|
||||
/// function `g` is applied.
|
||||
///
|
||||
/// ```
|
||||
/// use either::*;
|
||||
///
|
||||
/// fn square(n: u32) -> i32 { (n * n) as i32 }
|
||||
/// fn negate(n: i32) -> i32 { -n }
|
||||
///
|
||||
/// let left: Either<u32, i32> = Left(4);
|
||||
/// assert_eq!(left.either(square, negate), 16);
|
||||
///
|
||||
/// let right: Either<u32, i32> = Right(-4);
|
||||
/// assert_eq!(right.either(square, negate), 4);
|
||||
/// ```
|
||||
pub fn either<F, G, T>(self, f: F, g: G) -> T
|
||||
where F: FnOnce(L) -> T,
|
||||
G: FnOnce(R) -> T
|
||||
{
|
||||
match self {
|
||||
Left(l) => f(l),
|
||||
Right(r) => g(r),
|
||||
}
|
||||
}
|
||||
|
||||
/// Like `either`, but provide some context to whichever of the
|
||||
/// functions ends up being called.
|
||||
///
|
||||
/// ```
|
||||
/// // In this example, the context is a mutable reference
|
||||
/// use either::*;
|
||||
///
|
||||
/// let mut result = Vec::new();
|
||||
///
|
||||
/// let values = vec![Left(2), Right(2.7)];
|
||||
///
|
||||
/// for value in values {
|
||||
/// value.either_with(&mut result,
|
||||
/// |ctx, integer| ctx.push(integer),
|
||||
/// |ctx, real| ctx.push(f64::round(real) as i32));
|
||||
/// }
|
||||
///
|
||||
/// assert_eq!(result, vec![2, 3]);
|
||||
/// ```
|
||||
pub fn either_with<Ctx, F, G, T>(self, ctx: Ctx, f: F, g: G) -> T
|
||||
where F: FnOnce(Ctx, L) -> T,
|
||||
G: FnOnce(Ctx, R) -> T
|
||||
{
|
||||
match self {
|
||||
Left(l) => f(ctx, l),
|
||||
Right(r) => g(ctx, r),
|
||||
}
|
||||
}
|
||||
|
||||
/// Apply the function `f` on the value in the `Left` variant if it is present.
|
||||
///
|
||||
/// ```
|
||||
/// use either::*;
|
||||
///
|
||||
/// let left: Either<_, u32> = Left(123);
|
||||
/// assert_eq!(left.left_and_then::<_,()>(|x| Right(x * 2)), Right(246));
|
||||
///
|
||||
/// let right: Either<u32, _> = Right(123);
|
||||
/// assert_eq!(right.left_and_then(|x| Right::<(), _>(x * 2)), Right(123));
|
||||
/// ```
|
||||
pub fn left_and_then<F, S>(self, f: F) -> Either<S, R>
|
||||
where F: FnOnce(L) -> Either<S, R>
|
||||
{
|
||||
match self {
|
||||
Left(l) => f(l),
|
||||
Right(r) => Right(r),
|
||||
}
|
||||
}
|
||||
|
||||
/// Apply the function `f` on the value in the `Right` variant if it is present.
|
||||
///
|
||||
/// ```
|
||||
/// use either::*;
|
||||
///
|
||||
/// let left: Either<_, u32> = Left(123);
|
||||
/// assert_eq!(left.right_and_then(|x| Right(x * 2)), Left(123));
|
||||
///
|
||||
/// let right: Either<u32, _> = Right(123);
|
||||
/// assert_eq!(right.right_and_then(|x| Right(x * 2)), Right(246));
|
||||
/// ```
|
||||
pub fn right_and_then<F, S>(self, f: F) -> Either<L, S>
|
||||
where F: FnOnce(R) -> Either<L, S>
|
||||
{
|
||||
match self {
|
||||
Left(l) => Left(l),
|
||||
Right(r) => f(r),
|
||||
}
|
||||
}
|
||||
|
||||
/// Convert the inner value to an iterator.
|
||||
///
|
||||
/// ```
|
||||
/// use either::*;
|
||||
///
|
||||
/// let left: Either<_, Vec<u32>> = Left(vec![1, 2, 3, 4, 5]);
|
||||
/// let mut right: Either<Vec<u32>, _> = Right(vec![]);
|
||||
/// right.extend(left.into_iter());
|
||||
/// assert_eq!(right, Right(vec![1, 2, 3, 4, 5]));
|
||||
/// ```
|
||||
pub fn into_iter(self) -> Either<L::IntoIter, R::IntoIter>
|
||||
where L: IntoIterator,
|
||||
R: IntoIterator<Item = L::Item>
|
||||
{
|
||||
match self {
|
||||
Left(l) => Left(l.into_iter()),
|
||||
Right(r) => Right(r.into_iter()),
|
||||
}
|
||||
}
|
||||
|
||||
/// Return left value or given value
|
||||
///
|
||||
/// Arguments passed to `left_or` are eagerly evaluated; if you are passing
|
||||
/// the result of a function call, it is recommended to use [`left_or_else`],
|
||||
/// which is lazily evaluated.
|
||||
///
|
||||
/// [`left_or_else`]: #method.left_or_else
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// # use either::*;
|
||||
/// let left: Either<&str, &str> = Left("left");
|
||||
/// assert_eq!(left.left_or("foo"), "left");
|
||||
///
|
||||
/// let right: Either<&str, &str> = Right("right");
|
||||
/// assert_eq!(right.left_or("left"), "left");
|
||||
/// ```
|
||||
pub fn left_or(self, other: L) -> L {
|
||||
match self {
|
||||
Either::Left(l) => l,
|
||||
Either::Right(_) => other,
|
||||
}
|
||||
}
|
||||
|
||||
/// Return left or a default
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// # use either::*;
|
||||
/// let left: Either<String, u32> = Left("left".to_string());
|
||||
/// assert_eq!(left.left_or_default(), "left");
|
||||
///
|
||||
/// let right: Either<String, u32> = Right(42);
|
||||
/// assert_eq!(right.left_or_default(), String::default());
|
||||
/// ```
|
||||
pub fn left_or_default(self) -> L
|
||||
where
|
||||
L: Default,
|
||||
{
|
||||
match self {
|
||||
Either::Left(l) => l,
|
||||
Either::Right(_) => L::default(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns left value or computes it from a closure
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// # use either::*;
|
||||
/// let left: Either<String, u32> = Left("3".to_string());
|
||||
/// assert_eq!(left.left_or_else(|_| unreachable!()), "3");
|
||||
///
|
||||
/// let right: Either<String, u32> = Right(3);
|
||||
/// assert_eq!(right.left_or_else(|x| x.to_string()), "3");
|
||||
/// ```
|
||||
pub fn left_or_else<F>(self, f: F) -> L
|
||||
where
|
||||
F: FnOnce(R) -> L,
|
||||
{
|
||||
match self {
|
||||
Either::Left(l) => l,
|
||||
Either::Right(r) => f(r),
|
||||
}
|
||||
}
|
||||
|
||||
/// Return right value or given value
|
||||
///
|
||||
/// Arguments passed to `right_or` are eagerly evaluated; if you are passing
|
||||
/// the result of a function call, it is recommended to use [`right_or_else`],
|
||||
/// which is lazily evaluated.
|
||||
///
|
||||
/// [`right_or_else`]: #method.right_or_else
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// # use either::*;
|
||||
/// let right: Either<&str, &str> = Right("right");
|
||||
/// assert_eq!(right.right_or("foo"), "right");
|
||||
///
|
||||
/// let left: Either<&str, &str> = Left("left");
|
||||
/// assert_eq!(left.right_or("right"), "right");
|
||||
/// ```
|
||||
pub fn right_or(self, other: R) -> R {
|
||||
match self {
|
||||
Either::Left(_) => other,
|
||||
Either::Right(r) => r,
|
||||
}
|
||||
}
|
||||
|
||||
/// Return right or a default
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// # use either::*;
|
||||
/// let left: Either<String, u32> = Left("left".to_string());
|
||||
/// assert_eq!(left.right_or_default(), u32::default());
|
||||
///
|
||||
/// let right: Either<String, u32> = Right(42);
|
||||
/// assert_eq!(right.right_or_default(), 42);
|
||||
/// ```
|
||||
pub fn right_or_default(self) -> R
|
||||
where
|
||||
R: Default,
|
||||
{
|
||||
match self {
|
||||
Either::Left(_) => R::default(),
|
||||
Either::Right(r) => r,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns right value or computes it from a closure
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// # use either::*;
|
||||
/// let left: Either<String, u32> = Left("3".to_string());
|
||||
/// assert_eq!(left.right_or_else(|x| x.parse().unwrap()), 3);
|
||||
///
|
||||
/// let right: Either<String, u32> = Right(3);
|
||||
/// assert_eq!(right.right_or_else(|_| unreachable!()), 3);
|
||||
/// ```
|
||||
pub fn right_or_else<F>(self, f: F) -> R
|
||||
where
|
||||
F: FnOnce(L) -> R,
|
||||
{
|
||||
match self {
|
||||
Either::Left(l) => f(l),
|
||||
Either::Right(r) => r,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, L, R> Either<(T, L), (T, R)> {
|
||||
/// Factor out a homogeneous type from an either of pairs.
|
||||
///
|
||||
/// Here, the homogeneous type is the first element of the pairs.
|
||||
///
|
||||
/// ```
|
||||
/// use either::*;
|
||||
/// let left: Either<_, (u32, String)> = Left((123, vec![0]));
|
||||
/// assert_eq!(left.factor_first().0, 123);
|
||||
///
|
||||
/// let right: Either<(u32, Vec<u8>), _> = Right((123, String::new()));
|
||||
/// assert_eq!(right.factor_first().0, 123);
|
||||
/// ```
|
||||
pub fn factor_first(self) -> (T, Either<L, R>) {
|
||||
match self {
|
||||
Left((t, l)) => (t, Left(l)),
|
||||
Right((t, r)) => (t, Right(r)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, L, R> Either<(L, T), (R, T)> {
|
||||
/// Factor out a homogeneous type from an either of pairs.
|
||||
///
|
||||
/// Here, the homogeneous type is the second element of the pairs.
|
||||
///
|
||||
/// ```
|
||||
/// use either::*;
|
||||
/// let left: Either<_, (String, u32)> = Left((vec![0], 123));
|
||||
/// assert_eq!(left.factor_second().1, 123);
|
||||
///
|
||||
/// let right: Either<(Vec<u8>, u32), _> = Right((String::new(), 123));
|
||||
/// assert_eq!(right.factor_second().1, 123);
|
||||
/// ```
|
||||
pub fn factor_second(self) -> (Either<L, R>, T) {
|
||||
match self {
|
||||
Left((l, t)) => (Left(l), t),
|
||||
Right((r, t)) => (Right(r), t),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Either<T, T> {
|
||||
/// Extract the value of an either over two equivalent types.
|
||||
///
|
||||
/// ```
|
||||
/// use either::*;
|
||||
///
|
||||
/// let left: Either<_, u32> = Left(123);
|
||||
/// assert_eq!(left.into_inner(), 123);
|
||||
///
|
||||
/// let right: Either<u32, _> = Right(123);
|
||||
/// assert_eq!(right.into_inner(), 123);
|
||||
/// ```
|
||||
pub fn into_inner(self) -> T {
|
||||
either!(self, inner => inner)
|
||||
}
|
||||
|
||||
/// Map `f` over the contained value and return the result in the
|
||||
/// corresponding variant.
|
||||
///
|
||||
/// ```
|
||||
/// use either::*;
|
||||
///
|
||||
/// let value: Either<_, i32> = Right(42);
|
||||
///
|
||||
/// let other = value.map(|x| x * 2);
|
||||
/// assert_eq!(other, Right(84));
|
||||
/// ```
|
||||
pub fn map<F, M>(self, f: F) -> Either<M, M>
|
||||
where F: FnOnce(T) -> M
|
||||
{
|
||||
match self {
|
||||
Left(l) => Left(f(l)),
|
||||
Right(r) => Right(f(r)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Convert from `Result` to `Either` with `Ok => Right` and `Err => Left`.
|
||||
impl<L, R> From<Result<R, L>> for Either<L, R> {
|
||||
fn from(r: Result<R, L>) -> Self {
|
||||
match r {
|
||||
Err(e) => Left(e),
|
||||
Ok(o) => Right(o),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Convert from `Either` to `Result` with `Right => Ok` and `Left => Err`.
|
||||
impl<L, R> Into<Result<R, L>> for Either<L, R> {
|
||||
fn into(self) -> Result<R, L> {
|
||||
match self {
|
||||
Left(l) => Err(l),
|
||||
Right(r) => Ok(r),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<L, R, A> Extend<A> for Either<L, R>
|
||||
where L: Extend<A>, R: Extend<A>
|
||||
{
|
||||
fn extend<T>(&mut self, iter: T)
|
||||
where T: IntoIterator<Item=A>
|
||||
{
|
||||
either!(*self, ref mut inner => inner.extend(iter))
|
||||
}
|
||||
}
|
||||
|
||||
/// `Either<L, R>` is an iterator if both `L` and `R` are iterators.
|
||||
impl<L, R> Iterator for Either<L, R>
|
||||
where L: Iterator, R: Iterator<Item=L::Item>
|
||||
{
|
||||
type Item = L::Item;
|
||||
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
either!(*self, ref mut inner => inner.next())
|
||||
}
|
||||
|
||||
fn size_hint(&self) -> (usize, Option<usize>) {
|
||||
either!(*self, ref inner => inner.size_hint())
|
||||
}
|
||||
|
||||
fn fold<Acc, G>(self, init: Acc, f: G) -> Acc
|
||||
where G: FnMut(Acc, Self::Item) -> Acc,
|
||||
{
|
||||
either!(self, inner => inner.fold(init, f))
|
||||
}
|
||||
|
||||
fn count(self) -> usize {
|
||||
either!(self, inner => inner.count())
|
||||
}
|
||||
|
||||
fn last(self) -> Option<Self::Item> {
|
||||
either!(self, inner => inner.last())
|
||||
}
|
||||
|
||||
fn nth(&mut self, n: usize) -> Option<Self::Item> {
|
||||
either!(*self, ref mut inner => inner.nth(n))
|
||||
}
|
||||
|
||||
fn collect<B>(self) -> B
|
||||
where B: iter::FromIterator<Self::Item>
|
||||
{
|
||||
either!(self, inner => inner.collect())
|
||||
}
|
||||
|
||||
fn all<F>(&mut self, f: F) -> bool
|
||||
where F: FnMut(Self::Item) -> bool
|
||||
{
|
||||
either!(*self, ref mut inner => inner.all(f))
|
||||
}
|
||||
}
|
||||
|
||||
impl<L, R> DoubleEndedIterator for Either<L, R>
|
||||
where L: DoubleEndedIterator, R: DoubleEndedIterator<Item=L::Item>
|
||||
{
|
||||
fn next_back(&mut self) -> Option<Self::Item> {
|
||||
either!(*self, ref mut inner => inner.next_back())
|
||||
}
|
||||
}
|
||||
|
||||
impl<L, R> ExactSizeIterator for Either<L, R>
|
||||
where L: ExactSizeIterator, R: ExactSizeIterator<Item=L::Item>
|
||||
{
|
||||
}
|
||||
|
||||
#[cfg(any(test, feature = "use_std"))]
|
||||
/// `Either<L, R>` implements `Read` if both `L` and `R` do.
|
||||
///
|
||||
/// Requires crate feature `"use_std"`
|
||||
impl<L, R> Read for Either<L, R>
|
||||
where L: Read, R: Read
|
||||
{
|
||||
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
|
||||
either!(*self, ref mut inner => inner.read(buf))
|
||||
}
|
||||
|
||||
fn read_to_end(&mut self, buf: &mut Vec<u8>) -> io::Result<usize> {
|
||||
either!(*self, ref mut inner => inner.read_to_end(buf))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(any(test, feature = "use_std"))]
|
||||
/// Requires crate feature `"use_std"`
|
||||
impl<L, R> BufRead for Either<L, R>
|
||||
where L: BufRead, R: BufRead
|
||||
{
|
||||
fn fill_buf(&mut self) -> io::Result<&[u8]> {
|
||||
either!(*self, ref mut inner => inner.fill_buf())
|
||||
}
|
||||
|
||||
fn consume(&mut self, amt: usize) {
|
||||
either!(*self, ref mut inner => inner.consume(amt))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(any(test, feature = "use_std"))]
|
||||
/// `Either<L, R>` implements `Write` if both `L` and `R` do.
|
||||
///
|
||||
/// Requires crate feature `"use_std"`
|
||||
impl<L, R> Write for Either<L, R>
|
||||
where L: Write, R: Write
|
||||
{
|
||||
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
|
||||
either!(*self, ref mut inner => inner.write(buf))
|
||||
}
|
||||
|
||||
fn flush(&mut self) -> io::Result<()> {
|
||||
either!(*self, ref mut inner => inner.flush())
|
||||
}
|
||||
}
|
||||
|
||||
impl<L, R, Target> AsRef<Target> for Either<L, R>
|
||||
where L: AsRef<Target>, R: AsRef<Target>
|
||||
{
|
||||
fn as_ref(&self) -> &Target {
|
||||
either!(*self, ref inner => inner.as_ref())
|
||||
}
|
||||
}
|
||||
|
||||
macro_rules! impl_specific_ref_and_mut {
|
||||
($t:ty, $($attr:meta),* ) => {
|
||||
$(#[$attr])*
|
||||
impl<L, R> AsRef<$t> for Either<L, R>
|
||||
where L: AsRef<$t>, R: AsRef<$t>
|
||||
{
|
||||
fn as_ref(&self) -> &$t {
|
||||
either!(*self, ref inner => inner.as_ref())
|
||||
}
|
||||
}
|
||||
|
||||
$(#[$attr])*
|
||||
impl<L, R> AsMut<$t> for Either<L, R>
|
||||
where L: AsMut<$t>, R: AsMut<$t>
|
||||
{
|
||||
fn as_mut(&mut self) -> &mut $t {
|
||||
either!(*self, ref mut inner => inner.as_mut())
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
impl_specific_ref_and_mut!(str,);
|
||||
impl_specific_ref_and_mut!(
|
||||
::std::path::Path,
|
||||
cfg(feature = "use_std"),
|
||||
doc = "Requires crate feature `use_std`."
|
||||
);
|
||||
impl_specific_ref_and_mut!(
|
||||
::std::ffi::OsStr,
|
||||
cfg(feature = "use_std"),
|
||||
doc = "Requires crate feature `use_std`."
|
||||
);
|
||||
impl_specific_ref_and_mut!(
|
||||
::std::ffi::CStr,
|
||||
cfg(feature = "use_std"),
|
||||
doc = "Requires crate feature `use_std`."
|
||||
);
|
||||
|
||||
impl<L, R, Target> AsRef<[Target]> for Either<L, R>
|
||||
where L: AsRef<[Target]>, R: AsRef<[Target]>
|
||||
{
|
||||
fn as_ref(&self) -> &[Target] {
|
||||
either!(*self, ref inner => inner.as_ref())
|
||||
}
|
||||
}
|
||||
|
||||
impl<L, R, Target> AsMut<Target> for Either<L, R>
|
||||
where L: AsMut<Target>, R: AsMut<Target>
|
||||
{
|
||||
fn as_mut(&mut self) -> &mut Target {
|
||||
either!(*self, ref mut inner => inner.as_mut())
|
||||
}
|
||||
}
|
||||
|
||||
impl<L, R, Target> AsMut<[Target]> for Either<L, R>
|
||||
where L: AsMut<[Target]>, R: AsMut<[Target]>
|
||||
{
|
||||
fn as_mut(&mut self) -> &mut [Target] {
|
||||
either!(*self, ref mut inner => inner.as_mut())
|
||||
}
|
||||
}
|
||||
|
||||
impl<L, R> Deref for Either<L, R>
|
||||
where L: Deref, R: Deref<Target=L::Target>
|
||||
{
|
||||
type Target = L::Target;
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
either!(*self, ref inner => &*inner)
|
||||
}
|
||||
}
|
||||
|
||||
impl<L, R> DerefMut for Either<L, R>
|
||||
where L: DerefMut, R: DerefMut<Target=L::Target>
|
||||
{
|
||||
fn deref_mut(&mut self) -> &mut Self::Target {
|
||||
either!(*self, ref mut inner => &mut *inner)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(any(test, feature = "use_std"))]
|
||||
/// `Either` implements `Error` if *both* `L` and `R` implement it.
|
||||
impl<L, R> Error for Either<L, R>
|
||||
where L: Error, R: Error
|
||||
{
|
||||
fn description(&self) -> &str {
|
||||
either!(*self, ref inner => inner.description())
|
||||
}
|
||||
|
||||
#[allow(deprecated)]
|
||||
fn cause(&self) -> Option<&Error> {
|
||||
either!(*self, ref inner => inner.cause())
|
||||
}
|
||||
}
|
||||
|
||||
impl<L, R> fmt::Display for Either<L, R>
|
||||
where L: fmt::Display, R: fmt::Display
|
||||
{
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
either!(*self, ref inner => inner.fmt(f))
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn basic() {
|
||||
let mut e = Left(2);
|
||||
let r = Right(2);
|
||||
assert_eq!(e, Left(2));
|
||||
e = r;
|
||||
assert_eq!(e, Right(2));
|
||||
assert_eq!(e.left(), None);
|
||||
assert_eq!(e.right(), Some(2));
|
||||
assert_eq!(e.as_ref().right(), Some(&2));
|
||||
assert_eq!(e.as_mut().right(), Some(&mut 2));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn macros() {
|
||||
fn a() -> Either<u32, u32> {
|
||||
let x: u32 = try_left!(Right(1337u32));
|
||||
Left(x * 2)
|
||||
}
|
||||
assert_eq!(a(), Right(1337));
|
||||
|
||||
fn b() -> Either<String, &'static str> {
|
||||
Right(try_right!(Left("foo bar")))
|
||||
}
|
||||
assert_eq!(b(), Left(String::from("foo bar")));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn deref() {
|
||||
fn is_str(_: &str) {}
|
||||
let value: Either<String, &str> = Left(String::from("test"));
|
||||
is_str(&*value);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn iter() {
|
||||
let x = 3;
|
||||
let mut iter = match x {
|
||||
3 => Left(0..10),
|
||||
_ => Right(17..),
|
||||
};
|
||||
|
||||
assert_eq!(iter.next(), Some(0));
|
||||
assert_eq!(iter.count(), 9);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn read_write() {
|
||||
use std::io;
|
||||
|
||||
let use_stdio = false;
|
||||
let mockdata = [0xff; 256];
|
||||
|
||||
let mut reader = if use_stdio {
|
||||
Left(io::stdin())
|
||||
} else {
|
||||
Right(&mockdata[..])
|
||||
};
|
||||
|
||||
let mut buf = [0u8; 16];
|
||||
assert_eq!(reader.read(&mut buf).unwrap(), buf.len());
|
||||
assert_eq!(&buf, &mockdata[..buf.len()]);
|
||||
|
||||
let mut mockbuf = [0u8; 256];
|
||||
let mut writer = if use_stdio {
|
||||
Left(io::stdout())
|
||||
} else {
|
||||
Right(&mut mockbuf[..])
|
||||
};
|
||||
|
||||
let buf = [1u8; 16];
|
||||
assert_eq!(writer.write(&buf).unwrap(), buf.len());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn error() {
|
||||
let invalid_utf8 = b"\xff";
|
||||
let res = || -> Result<_, Either<_, _>> {
|
||||
try!(::std::str::from_utf8(invalid_utf8).map_err(Left));
|
||||
try!("x".parse::<i32>().map_err(Right));
|
||||
Ok(())
|
||||
}();
|
||||
assert!(res.is_err());
|
||||
res.unwrap_err().description(); // make sure this can be called
|
||||
}
|
||||
|
||||
/// A helper macro to check if AsRef and AsMut are implemented for a given type.
|
||||
macro_rules! check_t {
|
||||
($t:ty) => {{
|
||||
fn check_ref<T: AsRef<$t>>() {}
|
||||
fn propagate_ref<T1: AsRef<$t>, T2: AsRef<$t>>() {
|
||||
check_ref::<Either<T1, T2>>()
|
||||
}
|
||||
fn check_mut<T: AsMut<$t>>() {}
|
||||
fn propagate_mut<T1: AsMut<$t>, T2: AsMut<$t>>() {
|
||||
check_mut::<Either<T1, T2>>()
|
||||
}
|
||||
}};
|
||||
}
|
||||
|
||||
// This "unused" method is here to ensure that compilation doesn't fail on given types.
|
||||
fn _unsized_ref_propagation() {
|
||||
check_t!(str);
|
||||
|
||||
fn check_array_ref<T: AsRef<[Item]>, Item>() {}
|
||||
fn check_array_mut<T: AsMut<[Item]>, Item>() {}
|
||||
|
||||
fn propagate_array_ref<T1: AsRef<[Item]>, T2: AsRef<[Item]>, Item>() {
|
||||
check_array_ref::<Either<T1, T2>, _>()
|
||||
}
|
||||
|
||||
fn propagate_array_mut<T1: AsMut<[Item]>, T2: AsMut<[Item]>, Item>() {
|
||||
check_array_mut::<Either<T1, T2>, _>()
|
||||
}
|
||||
}
|
||||
|
||||
// This "unused" method is here to ensure that compilation doesn't fail on given types.
|
||||
#[cfg(feature = "use_std")]
|
||||
fn _unsized_std_propagation() {
|
||||
check_t!(::std::path::Path);
|
||||
check_t!(::std::ffi::OsStr);
|
||||
check_t!(::std::ffi::CStr);
|
||||
}
|
|
@ -0,0 +1 @@
|
|||
{"files":{"Cargo.toml":"c0c9e1f6d9fe40e773a72f5a223be5037ca1bb28162528c763d189fa9894e126","LICENSE-APACHE":"769f80b5bcb42ed0af4e4d2fd74e1ac9bf843cb80c5a29219d1ef3544428a6bb","LICENSE-MIT":"77257f3d2181236b1aee78920238062ae64efe13c5d858b2db126e79c9e1b14f","README.md":"a9896805c546a08c41c0fbfc81becc06618d9f8e64b360a20ebcf382aec4113d","src/c_api.rs":"376938337b3b72f07fce672f41125182cbef9e801582e188f40faac6d7620d41","src/c_api_utils.rs":"cdb751c22f42a6b245988f4970a7f5d2751c5457f6ed3905e968bb2eda4d631e","src/common.rs":"4ab2da1c9ca28485cb7b25820f5e0c145e1a41129485c60435787b501618121a","src/encoder.rs":"babef5a8ce2e90d184ce8f1271fbad43c69d7f32b5f75d60d4be71e10cd052e9","src/lib.rs":"75b2de55eecf6c20ab544c6e6be0a581564675483db48dbe782d051a491a9b35","src/reader/decoder.rs":"3f8d8d89ea378522be44f816daaa4de12bcfc75b18ef687a9ef9e8eee8aea544","src/reader/mod.rs":"2913aaed706025f2779ed93b17d3d570cc1aeb190acfa2c35ff130249be30d40","src/traits.rs":"9b308f3be116d7bbe9c1ca8e2e8b729cd1468f34e21e55d5c638a9c79870711f"},"package":"471d90201b3b223f3451cd4ad53e34295f16a1df17b1edf3736d47761c3981af"}
|
|
@ -0,0 +1,46 @@
|
|||
"""
|
||||
cargo-raze crate build file.
|
||||
|
||||
DO NOT EDIT! Replaced on runs of cargo-raze
|
||||
"""
|
||||
package(default_visibility = [
|
||||
# Public for visibility by "@raze__crate__version//" targets.
|
||||
#
|
||||
# Prefer access through "//third_party/cargo", which limits external
|
||||
# visibility to explicit Cargo.toml dependencies.
|
||||
"//visibility:public",
|
||||
])
|
||||
|
||||
licenses([
|
||||
"notice", # "MIT,Apache-2.0"
|
||||
])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_rust//rust:rust.bzl",
|
||||
"rust_library",
|
||||
"rust_binary",
|
||||
"rust_test",
|
||||
)
|
||||
|
||||
|
||||
|
||||
rust_library(
|
||||
name = "gif",
|
||||
crate_root = "src/lib.rs",
|
||||
crate_type = "lib",
|
||||
edition = "2015",
|
||||
srcs = glob(["**/*.rs"]),
|
||||
deps = [
|
||||
"//third_party/cargo/vendor/color_quant-1.0.1:color_quant",
|
||||
"//third_party/cargo/vendor/lzw-0.10.0:lzw",
|
||||
],
|
||||
rustc_flags = [
|
||||
"--cap-lints=allow",
|
||||
],
|
||||
version = "0.10.3",
|
||||
crate_features = [
|
||||
"default",
|
||||
"raii_no_panic",
|
||||
],
|
||||
)
|
||||
|
|
@ -0,0 +1,39 @@
|
|||
# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
|
||||
#
|
||||
# When uploading crates to the registry Cargo will automatically
|
||||
# "normalize" Cargo.toml files for maximal compatibility
|
||||
# with all versions of Cargo and also rewrite `path` dependencies
|
||||
# to registry (e.g., crates.io) dependencies
|
||||
#
|
||||
# If you believe there's an error in this file please file an
|
||||
# issue against the rust-lang/cargo repository. If you're
|
||||
# editing this file be aware that the upstream Cargo.toml
|
||||
# will likely look very different (and much more reasonable)
|
||||
|
||||
[package]
|
||||
name = "gif"
|
||||
version = "0.10.3"
|
||||
authors = ["nwin <nwin@users.noreply.github.com>"]
|
||||
exclude = ["tests/*", "gif-afl/*"]
|
||||
description = "GIF de- and encoder"
|
||||
homepage = "https://github.com/image-rs/image-gif"
|
||||
documentation = "https://docs.rs/gif"
|
||||
readme = "README.md"
|
||||
license = "MIT/Apache-2.0"
|
||||
repository = "https://github.com/image-rs/image-gif"
|
||||
[dependencies.color_quant]
|
||||
version = "1.0"
|
||||
|
||||
[dependencies.libc]
|
||||
version = "0.2.1"
|
||||
optional = true
|
||||
|
||||
[dependencies.lzw]
|
||||
version = "0.10"
|
||||
[dev-dependencies.glob]
|
||||
version = "0.3"
|
||||
|
||||
[features]
|
||||
c_api = ["libc"]
|
||||
default = ["raii_no_panic"]
|
||||
raii_no_panic = []
|
|
@ -0,0 +1,201 @@
|
|||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
|
@ -0,0 +1,21 @@
|
|||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2015 nwin
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
|
@ -0,0 +1,85 @@
|
|||
# GIF en- and decoding library [![Build Status](https://travis-ci.org/image-rs/image-gif.svg?branch=master)](https://travis-ci.org/image-rs/image-gif)
|
||||
|
||||
GIF en- and decoder written in Rust ([API Documentation](https://docs.rs/gif/)).
|
||||
|
||||
# GIF encoding and decoding library
|
||||
|
||||
This library provides all functions necessary to de- and encode GIF files.
|
||||
|
||||
## High level interface
|
||||
|
||||
The high level interface consists of the two types
|
||||
[`Encoder`](https://docs.rs/gif/0.10.1/gif/struct.Encoder.html) and [`Decoder`](https://docs.rs/gif/0.10.1/gif/struct.Decoder.html).
|
||||
They as builders for the actual en- and decoders and can be used to set various
|
||||
options beforehand.
|
||||
|
||||
### Decoding GIF files
|
||||
|
||||
```rust
|
||||
// Open the file
|
||||
use std::fs::File;
|
||||
use gif::SetParameter;
|
||||
let mut decoder = gif::Decoder::new(File::open("tests/samples/sample_1.gif").unwrap());
|
||||
// Configure the decoder such that it will expand the image to RGBA.
|
||||
decoder.set(gif::ColorOutput::RGBA);
|
||||
// Read the file header
|
||||
let mut decoder = decoder.read_info().unwrap();
|
||||
while let Some(frame) = decoder.read_next_frame().unwrap() {
|
||||
// Process every frame
|
||||
}
|
||||
```
|
||||
|
||||
### Encoding GIF files
|
||||
|
||||
The encoder can be used so save simple computer generated images:
|
||||
|
||||
```rust
|
||||
use gif::{Frame, Encoder, Repeat, SetParameter};
|
||||
use std::fs::File;
|
||||
use std::borrow::Cow;
|
||||
|
||||
let color_map = &[0xFF, 0xFF, 0xFF, 0, 0, 0];
|
||||
let (width, height) = (6, 6);
|
||||
let beacon_states = [[
|
||||
0, 0, 0, 0, 0, 0,
|
||||
0, 1, 1, 0, 0, 0,
|
||||
0, 1, 1, 0, 0, 0,
|
||||
0, 0, 0, 1, 1, 0,
|
||||
0, 0, 0, 1, 1, 0,
|
||||
0, 0, 0, 0, 0, 0,
|
||||
], [
|
||||
0, 0, 0, 0, 0, 0,
|
||||
0, 1, 1, 0, 0, 0,
|
||||
0, 1, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 1, 0,
|
||||
0, 0, 0, 1, 1, 0,
|
||||
0, 0, 0, 0, 0, 0,
|
||||
]];
|
||||
let mut image = File::create("target/beacon.gif").unwrap();
|
||||
let mut encoder = Encoder::new(&mut image, width, height, color_map).unwrap();
|
||||
encoder.set(Repeat::Infinite).unwrap();
|
||||
for state in &beacon_states {
|
||||
let mut frame = Frame::default();
|
||||
frame.width = width;
|
||||
frame.height = height;
|
||||
frame.buffer = Cow::Borrowed(&*state);
|
||||
encoder.write_frame(&frame).unwrap();
|
||||
}
|
||||
```
|
||||
|
||||
[`Frame::from_*`](https://docs.rs/gif/0.10.1/gif/struct.Frame.html) can be used to convert a true color image to a paletted
|
||||
image with a maximum of 256 colors:
|
||||
|
||||
```rust
|
||||
use std::fs::File;
|
||||
|
||||
// Get pixel data from some source
|
||||
let mut pixels: Vec<u8> = vec![0; 30_000];
|
||||
// Create frame from data
|
||||
let frame = gif::Frame::from_rgb(100, 100, &mut *pixels);
|
||||
// Create encoder
|
||||
let mut image = File::create("target/indexed_color.gif").unwrap();
|
||||
let mut encoder = gif::Encoder::new(&mut image, frame.width, frame.height, &[]).unwrap();
|
||||
// Write frame to file
|
||||
encoder.write_frame(&frame).unwrap();
|
||||
```
|
|
@ -0,0 +1,379 @@
|
|||
//! C API, drop-in replacement for libgif
|
||||
|
||||
#![allow(non_snake_case)]
|
||||
#![allow(non_camel_case_types)]
|
||||
#![allow(dead_code)]
|
||||
#![allow(missing_docs)] //FIXME
|
||||
|
||||
use std::cmp;
|
||||
use std::mem;
|
||||
use std::ptr;
|
||||
use std::boxed;
|
||||
use std::fs::File;
|
||||
use std::ffi::CStr;
|
||||
use std::str;
|
||||
use std::slice;
|
||||
|
||||
use libc::{free, c_int, c_uint, c_char, c_uchar, c_void};
|
||||
|
||||
use reader::{Decoder, Reader, Decoded};
|
||||
use c_api_utils::{CInterface, CFile, FnInputFile};
|
||||
|
||||
/// NOTE As of rust issue #954 `bool` is compatible with c_bool.
|
||||
pub type c_bool = bool;
|
||||
|
||||
pub type GifPixelType = c_uchar;
|
||||
pub type GifRowType = *mut c_uchar;
|
||||
pub type GifByteType = c_uchar;
|
||||
pub type GifPrefixType = c_uint;
|
||||
pub type GifWord = c_int;
|
||||
|
||||
#[repr(C)]
|
||||
pub struct GifColorType {
|
||||
pub Red: GifByteType,
|
||||
pub Green: GifByteType,
|
||||
pub Blue: GifByteType
|
||||
}
|
||||
|
||||
#[repr(C)]
|
||||
pub struct ColorMapObject {
|
||||
pub ColorCount: c_int,
|
||||
pub BitsPerPixel: c_int,
|
||||
pub SortFlag: c_bool,
|
||||
/// on malloc(3) heap
|
||||
pub Colors: *mut GifColorType // TODO USE MALLOC for this
|
||||
}
|
||||
|
||||
#[repr(C)]
|
||||
pub struct ExtensionBlock {
|
||||
pub ByteCount: c_int,
|
||||
/// on malloc(3) heap
|
||||
pub Bytes: *mut GifByteType, // TODO USE MALLOC for this
|
||||
/// The block function code
|
||||
pub Function: c_int
|
||||
//#define CONTINUE_EXT_FUNC_CODE 0x00 /* continuation subblock */
|
||||
//#define COMMENT_EXT_FUNC_CODE 0xfe /* comment */
|
||||
//#define GRAPHICS_EXT_FUNC_CODE 0xf9 /* graphics control (GIF89) */
|
||||
//#define PLAINTEXT_EXT_FUNC_CODE 0x01 /* plaintext */
|
||||
//#define APPLICATION_EXT_FUNC_CODE 0xff /* application block */
|
||||
}
|
||||
|
||||
#[repr(C)]
|
||||
pub struct SavedImage {
|
||||
pub ImageDesc: GifImageDesc,
|
||||
/// on malloc(3) heap
|
||||
pub RasterBits: *mut GifByteType,
|
||||
/// Count of extensions before image
|
||||
pub ExtensionBlockCount: c_int,
|
||||
/// Extensions before image
|
||||
pub ExtensionBlocks: *mut ExtensionBlock
|
||||
}
|
||||
|
||||
#[repr(C)]
|
||||
pub struct GifImageDesc {
|
||||
/// Current image dimensions. (left)
|
||||
pub Left: GifWord,
|
||||
/// Current image dimensions. (top)
|
||||
pub Top: GifWord,
|
||||
/// Current image dimensions. (width)
|
||||
pub Width: GifWord,
|
||||
/// Current image dimensions. (height)
|
||||
pub Height: GifWord,
|
||||
/// Sequential/Interlaced lines.
|
||||
pub Interlace: c_bool,
|
||||
/// The local color map
|
||||
pub ColorMap: *mut ColorMapObject
|
||||
}
|
||||
|
||||
#[repr(C)]
|
||||
pub struct GifFileType {
|
||||
/// Size of virtual canvas (width)
|
||||
pub SWidth: GifWord,
|
||||
/// Size of virtual canvas (height)
|
||||
pub SHeight: GifWord,
|
||||
/// How many colors can we generate?
|
||||
pub SColorResolution: GifWord,
|
||||
/// Background color for virtual canvas
|
||||
pub SBackGroundColor: GifWord,
|
||||
/// Used to compute pixel aspect ratio
|
||||
pub AspectByte: GifByteType,
|
||||
/// Global colormap, NULL if nonexistent.
|
||||
pub SColorMap: *mut ColorMapObject,
|
||||
/// Number of current image (both APIs)
|
||||
pub ImageCount: c_int,
|
||||
/// Current image (low-level API)
|
||||
pub Image: GifImageDesc,
|
||||
/// Image sequence (high-level API)
|
||||
pub SavedImages: *mut SavedImage,
|
||||
/// Count extensions past last image
|
||||
pub ExtensionBlockCount: c_int,
|
||||
/// Extensions past last image
|
||||
pub ExtensionBlocks: *mut ExtensionBlock,
|
||||
/// Last error condition reported
|
||||
pub Error: c_int,
|
||||
/// hook to attach user data (TVT)
|
||||
pub UserData: *mut c_void,
|
||||
/// Don't mess with this!
|
||||
pub Private: *mut c_void,
|
||||
}
|
||||
|
||||
#[repr(C)]
|
||||
pub enum GifRecordType {
|
||||
UNDEFINED_RECORD_TYPE,
|
||||
SCREEN_DESC_RECORD_TYPE,
|
||||
IMAGE_DESC_RECORD_TYPE, /* Begin with ',' */
|
||||
EXTENSION_RECORD_TYPE, /* Begin with '!' */
|
||||
TERMINATE_RECORD_TYPE /* Begin with ';' */
|
||||
}
|
||||
|
||||
/// Input callback for DGifOpen. Returns `c_int` bytes input the buffer
|
||||
/// and returns the number of bytes read.
|
||||
pub type InputFunc = extern "C" fn(*mut GifFileType, *mut GifByteType, c_int) -> c_int;
|
||||
|
||||
const D_GIF_SUCCEEDED : c_int = 0;
|
||||
const D_GIF_ERR_OPEN_FAILED : c_int = 101; /* And DGif possible errors. */
|
||||
const D_GIF_ERR_READ_FAILED : c_int = 102;
|
||||
const D_GIF_ERR_NOT_GIF_FILE : c_int = 103;
|
||||
const D_GIF_ERR_NO_SCRN_DSCR : c_int = 104;
|
||||
const D_GIF_ERR_NO_IMAG_DSCR : c_int = 105;
|
||||
const D_GIF_ERR_NO_COLOR_MAP : c_int = 106;
|
||||
const D_GIF_ERR_WRONG_RECORD : c_int = 107;
|
||||
const D_GIF_ERR_DATA_TOO_BIG : c_int = 108;
|
||||
const D_GIF_ERR_NOT_ENOUGH_MEM: c_int = 109;
|
||||
const D_GIF_ERR_CLOSE_FAILED : c_int = 110;
|
||||
const D_GIF_ERR_NOT_READABLE : c_int = 111;
|
||||
const D_GIF_ERR_IMAGE_DEFECT : c_int = 112;
|
||||
const D_GIF_ERR_EOF_TOO_SOON : c_int = 113;
|
||||
|
||||
const GIF_ERROR: c_int = 0;
|
||||
const GIF_OK : c_int = 1;
|
||||
|
||||
macro_rules! try_capi {
|
||||
($val:expr, $err:expr, $code:expr, $retval:expr) => (
|
||||
match $val {
|
||||
Ok(val) => val,
|
||||
Err(_) => {
|
||||
if $err != ptr::null_mut() {
|
||||
*$err = $code
|
||||
}
|
||||
return $retval
|
||||
}
|
||||
}
|
||||
);
|
||||
($val:expr) => (
|
||||
match $val {
|
||||
Ok(val) => val,
|
||||
Err(_) => return GIF_ERROR
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
macro_rules! try_get_decoder {
|
||||
($this:expr) => (
|
||||
if $this != ptr::null_mut() {
|
||||
let decoder: &mut &mut CInterface = mem::transmute((*$this).Private);
|
||||
decoder
|
||||
} else {
|
||||
return GIF_ERROR
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
#[no_mangle] pub unsafe extern "C"
|
||||
fn DGifOpenFileName(gif_file_name: *const c_char, err: *mut c_int) -> *mut GifFileType {
|
||||
let file = try_capi!(
|
||||
File::open(try_capi!(
|
||||
str::from_utf8(CStr::from_ptr(gif_file_name).to_bytes()),
|
||||
err, D_GIF_ERR_OPEN_FAILED, ptr::null_mut()
|
||||
)),
|
||||
err, D_GIF_ERR_OPEN_FAILED, ptr::null_mut()
|
||||
);
|
||||
let mut decoder = try_capi!(
|
||||
Decoder::new(file).read_info(),
|
||||
err, D_GIF_ERR_READ_FAILED, ptr::null_mut()
|
||||
).into_c_interface();
|
||||
let this: *mut GifFileType = Box::into_raw(Box::new(mem::zeroed()));
|
||||
decoder.read_screen_desc(&mut *this);
|
||||
let decoder = Box::into_raw(Box::new(Box::into_raw(decoder)));
|
||||
(*this).Private = mem::transmute(decoder);
|
||||
this
|
||||
}
|
||||
|
||||
#[no_mangle] pub unsafe extern "C"
|
||||
fn DGifOpenFileHandle(fp: c_int, err: *mut c_int) -> *mut GifFileType {
|
||||
let mut decoder = try_capi!(
|
||||
Decoder::new(CFile::new(fp)).read_info(),
|
||||
err, D_GIF_ERR_READ_FAILED, ptr::null_mut()
|
||||
).into_c_interface();
|
||||
let this: *mut GifFileType = Box::into_raw(Box::new(mem::zeroed()));
|
||||
decoder.read_screen_desc(&mut *this);
|
||||
let decoder = Box::into_raw(Box::new(Box::into_raw(decoder)));
|
||||
(*this).Private = mem::transmute(decoder);
|
||||
this
|
||||
}
|
||||
|
||||
/*
|
||||
#[no_mangle] pub unsafe extern "C"
|
||||
fn DGifSlurp(this: *mut GifFileType) -> c_int {
|
||||
match try_get_decoder!(this).read_to_end(mem::transmute(this)) {
|
||||
Ok(()) => GIF_OK,
|
||||
Err(_) => GIF_ERROR
|
||||
}
|
||||
}
|
||||
*/
|
||||
#[no_mangle] pub unsafe extern "C"
|
||||
fn DGifOpen(user_data: *mut c_void, read_fn: InputFunc, err: *mut c_int) -> *mut GifFileType {
|
||||
let this: *mut GifFileType = Box::into_raw(Box::new(mem::zeroed()));
|
||||
(*this).UserData = user_data;
|
||||
let decoder = try_capi!(
|
||||
Decoder::new(FnInputFile::new(read_fn, this)).read_info(),
|
||||
err, D_GIF_ERR_READ_FAILED, {
|
||||
// TODO: check if it is ok and expected to free GifFileType
|
||||
// This is unclear since the API exposes the whole struct to the read
|
||||
// function and not only the user data
|
||||
let _: Box<GifFileType> = Box::from_raw(this);
|
||||
ptr::null_mut()
|
||||
}
|
||||
).into_c_interface();
|
||||
let decoder = Box::into_raw(Box::new(Box::into_raw(decoder)));
|
||||
(*this).Private = mem::transmute(decoder);
|
||||
this
|
||||
}
|
||||
|
||||
/// Closes the file and also frees all data structures.
|
||||
#[no_mangle] pub unsafe extern "C"
|
||||
fn DGifCloseFile(this: *mut GifFileType, _: *mut c_int)
|
||||
-> c_int {
|
||||
if this != ptr::null_mut() {
|
||||
let this: Box<GifFileType> = Box::from_raw(this);
|
||||
let _: Box<Box<CInterface>> = mem::transmute(this.Private);
|
||||
for image in slice::from_raw_parts_mut(this.SavedImages, this.ImageCount as usize) {
|
||||
free(mem::transmute(image.RasterBits));
|
||||
if image.ImageDesc.ColorMap != ptr::null_mut() {
|
||||
free(mem::transmute((*image.ImageDesc.ColorMap).Colors))
|
||||
}
|
||||
free(mem::transmute(image.ImageDesc.ColorMap));
|
||||
if image.ExtensionBlockCount != 0 {
|
||||
GifFreeExtensions(&mut image.ExtensionBlockCount, &mut image.ExtensionBlocks)
|
||||
}
|
||||
}
|
||||
free(mem::transmute(this.SavedImages));
|
||||
}
|
||||
GIF_OK
|
||||
}
|
||||
|
||||
// legacy but needed API
|
||||
#[no_mangle] pub unsafe extern "C"
|
||||
fn DGifGetScreenDesc(_: *mut GifFileType) -> c_int {
|
||||
GIF_OK
|
||||
}
|
||||
/*
|
||||
#[no_mangle] pub unsafe extern "C"
|
||||
fn DGifGetRecordType(this: *mut GifFileType, record_type: *mut GifRecordType) -> c_int {
|
||||
use common::Block::*;
|
||||
use self::GifRecordType::*;
|
||||
*record_type = match try_capi!(try_get_decoder!(this).next_record_type()) {
|
||||
Image => IMAGE_DESC_RECORD_TYPE,
|
||||
Extension => EXTENSION_RECORD_TYPE,
|
||||
Trailer => TERMINATE_RECORD_TYPE
|
||||
};
|
||||
GIF_OK
|
||||
}
|
||||
*/
|
||||
#[no_mangle] pub unsafe extern "C"
|
||||
fn DGifGetImageDesc(this: *mut GifFileType) -> c_int {
|
||||
match try_get_decoder!(this).current_image_buffer() {
|
||||
Ok(_) => GIF_OK,
|
||||
Err(_) => GIF_ERROR
|
||||
}
|
||||
}
|
||||
|
||||
#[no_mangle] pub unsafe extern "C"
|
||||
fn DGifGetLine(this: *mut GifFileType, line: *mut GifPixelType, len: c_int) -> c_int {
|
||||
let (buffer, offset) = try_capi!(try_get_decoder!(this).current_image_buffer());
|
||||
let buffer = &buffer[*offset..];
|
||||
let len = cmp::min(buffer.len(), len as usize);
|
||||
*offset = *offset + len;
|
||||
let line = slice::from_raw_parts_mut(line, len);
|
||||
line.copy_from_slice(&buffer[..len]);
|
||||
GIF_OK
|
||||
}
|
||||
//int DGifGetPixel(GifFileType *GifFile, GifPixelType GifPixel);
|
||||
//int DGifGetComment(GifFi leType *GifFile, char *GifComment);
|
||||
|
||||
/// Returns the type of the extension and the first extension sub-block `(size, data...)`
|
||||
#[no_mangle] pub unsafe extern "C"
|
||||
fn DGifGetExtension(this: *mut GifFileType, ext_type: *mut c_int, ext_block: *mut *const GifByteType) -> c_int {
|
||||
use common::Block::*;
|
||||
let decoder = try_get_decoder!(this);
|
||||
match try_capi!(decoder.next_record_type()) {
|
||||
Image | Trailer => {
|
||||
if ext_block != ptr::null_mut() {
|
||||
*ext_block = ptr::null_mut();
|
||||
}
|
||||
if ext_type != ptr::null_mut() {
|
||||
*ext_type = 0;
|
||||
}
|
||||
}
|
||||
Extension => {
|
||||
match try_capi!(decoder.decode_next()) {
|
||||
Some(Decoded::SubBlockFinished(type_, data))
|
||||
| Some(Decoded::BlockFinished(type_, data)) => {
|
||||
if ext_block != ptr::null_mut() {
|
||||
*ext_block = data.as_ptr();
|
||||
}
|
||||
if ext_type != ptr::null_mut() {
|
||||
*ext_type = type_ as c_int;
|
||||
}
|
||||
}
|
||||
_ => return GIF_ERROR
|
||||
}
|
||||
}
|
||||
}
|
||||
GIF_OK
|
||||
}
|
||||
|
||||
/// Returns the next extension sub-block `(size, data...)`
|
||||
#[no_mangle] pub unsafe extern "C"
|
||||
fn DGifGetExtensionNext(this: *mut GifFileType, ext_block: *mut *const GifByteType) -> c_int {
|
||||
// TODO extract next sub block
|
||||
let mut decoder = try_get_decoder!(this);
|
||||
if decoder.last_ext().2 {
|
||||
if ext_block != ptr::null_mut() {
|
||||
*ext_block = ptr::null_mut();
|
||||
}
|
||||
GIF_OK
|
||||
} else {
|
||||
match try_capi!(decoder.decode_next()) {
|
||||
Some(Decoded::SubBlockFinished(_, data))
|
||||
| Some(Decoded::BlockFinished(_, data)) => {
|
||||
if ext_block != ptr::null_mut() {
|
||||
*ext_block = data.as_ptr();
|
||||
}
|
||||
GIF_OK
|
||||
}
|
||||
_ => GIF_ERROR
|
||||
}
|
||||
}
|
||||
}
|
||||
/*
|
||||
/// This function reallocs `ext_blocks` and copies `data`
|
||||
#[no_mangle] pub unsafe extern "C"
|
||||
fn GifAddExtensionBlock(block_count: *mut c_int, ext_blocks: *mut *const ExtensionBlock,
|
||||
ext_type: c_int, len: c_uint, data: *const c_uchar) -> c_int {
|
||||
GIF_OK
|
||||
}
|
||||
*/
|
||||
#[no_mangle] pub unsafe extern "C"
|
||||
fn GifFreeExtensions(block_count: *mut c_int, ext_blocks: *mut *mut ExtensionBlock) {
|
||||
if ext_blocks == ptr::null_mut() || block_count == ptr::null_mut() {
|
||||
return
|
||||
}
|
||||
for i in 0..(*block_count) as isize {
|
||||
let block = (*ext_blocks).offset(i);
|
||||
free(mem::transmute((*block).Bytes));
|
||||
}
|
||||
free(mem::transmute(ext_blocks));
|
||||
*ext_blocks = ptr::null_mut();
|
||||
*block_count = 0;
|
||||
}
|
|
@ -0,0 +1,124 @@
|
|||
use std::io::{self, Read};
|
||||
use std::mem;
|
||||
use std::ops;
|
||||
use std::slice;
|
||||
|
||||
use libc::{malloc, size_t, c_int, read, close};
|
||||
|
||||
use common::Block;
|
||||
use reader::{Decoded, DecodingError, PLTE_CHANNELS};
|
||||
use c_api::{GifFileType, SavedImage, ColorMapObject, GifColorType, c_bool,
|
||||
InputFunc
|
||||
};
|
||||
|
||||
pub trait CInterface {
|
||||
fn read_screen_desc(&mut self, &mut GifFileType);
|
||||
fn current_image_buffer(&mut self) -> Result<(&[u8], &mut usize), DecodingError>;
|
||||
//fn seek_to(&mut self, position: Progress) -> Result<(), DecodingError>;
|
||||
fn last_ext(&self) -> (u8, &[u8], bool);
|
||||
fn next_record_type(&mut self) -> Result<Block, DecodingError>;
|
||||
fn decode_next(&mut self) -> Result<Option<Decoded>, DecodingError>;
|
||||
//unsafe fn read_to_end(&mut self, &mut GifFileType) -> Result<(), DecodingError>;
|
||||
}
|
||||
|
||||
pub unsafe fn saved_images_new(count: usize) -> *mut SavedImage {
|
||||
mem::transmute::<_, *mut SavedImage>(malloc(
|
||||
(mem::size_of::<SavedImage>() * count) as size_t
|
||||
))
|
||||
}
|
||||
|
||||
pub unsafe fn copy_data(buf: &[u8]) -> *mut u8 {
|
||||
let data = mem::transmute::<_, *mut u8>(malloc(
|
||||
(mem::size_of::<SavedImage>() * buf.len()) as size_t
|
||||
));
|
||||
slice::from_raw_parts_mut(data, buf.len()).copy_from_slice(buf);
|
||||
//for (i, &b) in buf.iter().enumerate() {
|
||||
// *data.offset(i as isize) = b
|
||||
//}
|
||||
data
|
||||
}
|
||||
|
||||
pub unsafe fn copy_colormap(map: &Option<Vec<u8>>) -> *mut ColorMapObject {
|
||||
let map: &[u8] = match *map {
|
||||
Some(ref map) => &*map,
|
||||
None => &[]
|
||||
};
|
||||
let new_map = mem::transmute::<_, *mut ColorMapObject>(malloc(mem::size_of::<ColorMapObject>() as size_t));
|
||||
(*new_map).ColorCount = (map.len()/PLTE_CHANNELS) as c_int;
|
||||
(*new_map).BitsPerPixel = 8;
|
||||
(*new_map).SortFlag = false;
|
||||
let colors = mem::transmute::<_, *mut GifColorType>(malloc(
|
||||
(mem::size_of::<GifColorType>() * (*new_map).ColorCount as usize) as size_t
|
||||
));
|
||||
for (i, c) in map.chunks(PLTE_CHANNELS).enumerate() {
|
||||
*colors.offset(i as isize) = GifColorType {
|
||||
Red: c[0],
|
||||
Green: c[1],
|
||||
Blue: c[2],
|
||||
}
|
||||
}
|
||||
(*new_map).Colors = colors;
|
||||
new_map
|
||||
}
|
||||
|
||||
/// A simple wrapper around a C file handle
|
||||
pub struct CFile {
|
||||
fp: c_int
|
||||
}
|
||||
|
||||
impl CFile {
|
||||
pub fn new(fp: c_int) -> CFile {
|
||||
CFile {
|
||||
fp: fp
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Read for CFile {
|
||||
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
|
||||
let count = unsafe { read(
|
||||
self.fp,
|
||||
mem::transmute(buf.as_mut_ptr()),
|
||||
buf.len() as size_t
|
||||
) };
|
||||
match count {
|
||||
-1 => Err(io::Error::last_os_error()),
|
||||
n => Ok(n as usize)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl ops::Drop for CFile {
|
||||
fn drop(&mut self) {
|
||||
unsafe {close(self.fp)};
|
||||
}
|
||||
}
|
||||
|
||||
/// A wrapper around `InputFunc`
|
||||
pub struct FnInputFile {
|
||||
func: InputFunc,
|
||||
file: *mut GifFileType
|
||||
}
|
||||
|
||||
impl FnInputFile {
|
||||
pub fn new(func: InputFunc, file: *mut GifFileType) -> FnInputFile {
|
||||
FnInputFile {
|
||||
func: func,
|
||||
file: file
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Read for FnInputFile {
|
||||
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
|
||||
let count = unsafe { (self.func)(
|
||||
self.file,
|
||||
mem::transmute(buf.as_mut_ptr()),
|
||||
buf.len() as c_int
|
||||
) };
|
||||
match count {
|
||||
-1 => Err(io::Error::from_raw_os_error(count)),
|
||||
n => Ok(n as usize)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,240 @@
|
|||
//! Common common used both by decoder and encoder
|
||||
extern crate color_quant;
|
||||
|
||||
use std::borrow::Cow;
|
||||
|
||||
/// Disposal method
|
||||
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
|
||||
#[repr(u8)]
|
||||
pub enum DisposalMethod {
|
||||
/// StreamingDecoder is not required to take any action.
|
||||
Any = 0,
|
||||
/// Do not dispose.
|
||||
Keep = 1,
|
||||
/// Restore to background color.
|
||||
Background = 2,
|
||||
/// Restore to previous.
|
||||
Previous = 3,
|
||||
}
|
||||
|
||||
impl DisposalMethod {
|
||||
/// Converts `u8` to `Option<Self>`
|
||||
pub fn from_u8(n: u8) -> Option<DisposalMethod> {
|
||||
match n {
|
||||
0 => Some(DisposalMethod::Any),
|
||||
1 => Some(DisposalMethod::Keep),
|
||||
2 => Some(DisposalMethod::Background),
|
||||
3 => Some(DisposalMethod::Previous),
|
||||
_ => None
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Known GIF block types
|
||||
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
|
||||
#[repr(u8)]
|
||||
pub enum Block {
|
||||
/// Image block.
|
||||
Image = 0x2C,
|
||||
/// Extension block.
|
||||
Extension = 0x21,
|
||||
/// Image trailer.
|
||||
Trailer = 0x3B
|
||||
}
|
||||
|
||||
impl Block {
|
||||
/// Converts `u8` to `Option<Self>`
|
||||
pub fn from_u8(n: u8) -> Option<Block> {
|
||||
match n {
|
||||
0x2C => Some(Block::Image),
|
||||
0x21 => Some(Block::Extension),
|
||||
0x3B => Some(Block::Trailer),
|
||||
_ => None
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/// Known GIF extensions
|
||||
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
|
||||
#[repr(u8)]
|
||||
pub enum Extension {
|
||||
/// Text extension.
|
||||
Text = 0x01,
|
||||
/// Control extension.
|
||||
Control = 0xF9,
|
||||
/// Comment extension.
|
||||
Comment = 0xFE,
|
||||
/// Application extension.
|
||||
Application = 0xFF
|
||||
}
|
||||
|
||||
impl Extension {
|
||||
/// Converts `u8` to `Option<Self>`
|
||||
pub fn from_u8(n: u8) -> Option<Extension> {
|
||||
match n {
|
||||
0x01 => Some(Extension::Text),
|
||||
0xF9 => Some(Extension::Control),
|
||||
0xFE => Some(Extension::Comment),
|
||||
0xFF => Some(Extension::Application),
|
||||
_ => None
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A GIF frame
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct Frame<'a> {
|
||||
/// Frame delay in units of 10 ms.
|
||||
pub delay: u16,
|
||||
/// Disposal method.
|
||||
pub dispose: DisposalMethod,
|
||||
/// Transparent index (if available).
|
||||
pub transparent: Option<u8>,
|
||||
/// True if the frame needs user input to be displayed.
|
||||
pub needs_user_input: bool,
|
||||
/// Offset from the top border of the canvas.
|
||||
pub top: u16,
|
||||
/// Offset from the left border of the canvas.
|
||||
pub left: u16,
|
||||
/// Width of the frame.
|
||||
pub width: u16,
|
||||
/// Height of the frame.
|
||||
pub height: u16,
|
||||
/// True if the image is interlaced.
|
||||
pub interlaced: bool,
|
||||
/// Frame local color palette if available.
|
||||
pub palette: Option<Vec<u8>>,
|
||||
/// Buffer containing the image data.
|
||||
/// Only indices unless configured differently.
|
||||
pub buffer: Cow<'a, [u8]>
|
||||
}
|
||||
|
||||
impl<'a> Default for Frame<'a> {
|
||||
fn default() -> Frame<'a> {
|
||||
Frame {
|
||||
delay: 0,
|
||||
dispose: DisposalMethod::Keep,
|
||||
transparent: None,
|
||||
needs_user_input: false,
|
||||
top: 0,
|
||||
left: 0,
|
||||
width: 0,
|
||||
height: 0,
|
||||
interlaced: false,
|
||||
palette: None,
|
||||
buffer: Cow::Borrowed(&[])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Frame<'static> {
|
||||
/// Creates a frame from pixels in RGBA format.
|
||||
/// *Note: This method is not optimized for speed.*
|
||||
///
|
||||
/// # Panics:
|
||||
/// * If the length of pixels does not equal `width * height * 4`.
|
||||
pub fn from_rgba(width: u16, height: u16, pixels: &mut [u8]) -> Frame<'static> {
|
||||
Frame::from_rgba_speed(width, height, pixels, 1)
|
||||
}
|
||||
|
||||
/// Creates a frame from pixels in RGBA format.
|
||||
/// `speed` is a value in the range [1, 30].
|
||||
/// The higher the value the faster it runs at the cost of image quality.
|
||||
/// A `speed` of 10 is a good compromise between speed and quality.
|
||||
///
|
||||
/// # Panics:
|
||||
/// * If the length of pixels does not equal `width * height * 4`.
|
||||
/// * If `speed < 1` or `speed > 30`
|
||||
pub fn from_rgba_speed(width: u16, height: u16, pixels: &mut [u8], speed: i32) -> Frame<'static> {
|
||||
assert_eq!(width as usize * height as usize * 4, pixels.len(), "Too much or too little pixel data for the given width and height to create a GIF Frame");
|
||||
assert!(speed >= 1 && speed <= 30, "speed needs to be in the range [1, 30]");
|
||||
let mut frame = Frame::default();
|
||||
let mut transparent = None;
|
||||
for pix in pixels.chunks_mut(4) {
|
||||
if pix[3] != 0 {
|
||||
pix[3] = 0xFF;
|
||||
} else {
|
||||
transparent = Some([pix[0], pix[1], pix[2], pix[3]])
|
||||
}
|
||||
}
|
||||
frame.width = width;
|
||||
frame.height = height;
|
||||
let nq = color_quant::NeuQuant::new(speed, 256, pixels);
|
||||
frame.buffer = Cow::Owned(pixels.chunks(4).map(|pix| nq.index_of(pix) as u8).collect());
|
||||
frame.palette = Some(nq.color_map_rgb());
|
||||
frame.transparent = if let Some(t) = transparent {
|
||||
Some(nq.index_of(&t) as u8)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
frame
|
||||
}
|
||||
|
||||
/// Creates a frame from a palette and indexed pixels.
|
||||
///
|
||||
/// # Panics:
|
||||
/// * If the length of pixels does not equal `width * height`.
|
||||
/// * If the length of palette > `256 * 3`.
|
||||
pub fn from_palette_pixels(width: u16, height: u16, pixels: &[u8], palette: &[u8], transparent: Option<u8>) -> Frame<'static> {
|
||||
assert_eq!(width as usize * height as usize, pixels.len(), "Too many or too little pixels for the given width and height to create a GIF Frame");
|
||||
assert!(palette.len() <= 256*3, "Too many palette values to create a GIF Frame");
|
||||
let mut frame = Frame::default();
|
||||
|
||||
frame.width = width;
|
||||
frame.height = height;
|
||||
|
||||
frame.buffer = Cow::Owned(pixels.to_vec());
|
||||
frame.palette = Some(palette.to_vec());
|
||||
|
||||
frame.transparent = transparent;
|
||||
|
||||
frame
|
||||
}
|
||||
|
||||
/// Creates a frame from indexed pixels in the global palette.
|
||||
///
|
||||
/// # Panics:
|
||||
/// * If the length of pixels does not equal `width * height`.
|
||||
pub fn from_indexed_pixels(width: u16, height: u16, pixels: &[u8], transparent: Option<u8>) -> Frame<'static> {
|
||||
assert_eq!(width as usize * height as usize, pixels.len(), "Too many or too little pixels for the given width and height to create a GIF Frame");
|
||||
let mut frame = Frame::default();
|
||||
|
||||
frame.width = width;
|
||||
frame.height = height;
|
||||
|
||||
frame.buffer = Cow::Owned(pixels.to_vec());
|
||||
frame.palette = None;
|
||||
|
||||
frame.transparent = transparent;
|
||||
|
||||
frame
|
||||
}
|
||||
|
||||
/// Creates a frame from pixels in RGB format.
|
||||
/// *Note: This method is not optimized for speed.*
|
||||
///
|
||||
/// # Panics:
|
||||
/// * If the length of pixels does not equal `width * height * 3`.
|
||||
pub fn from_rgb(width: u16, height: u16, pixels: &[u8]) -> Frame<'static> {
|
||||
Frame::from_rgb_speed(width, height, pixels, 1)
|
||||
}
|
||||
|
||||
/// Creates a frame from pixels in RGB format.
|
||||
/// `speed` is a value in the range [1, 30].
|
||||
/// The higher the value the faster it runs at the cost of image quality.
|
||||
/// A `speed` of 10 is a good compromise between speed and quality.
|
||||
///
|
||||
/// # Panics:
|
||||
/// * If the length of pixels does not equal `width * height * 3`.
|
||||
/// * If `speed < 1` or `speed > 30`
|
||||
pub fn from_rgb_speed(width: u16, height: u16, pixels: &[u8], speed: i32) -> Frame<'static> {
|
||||
assert_eq!(width as usize * height as usize * 3, pixels.len(), "Too much or too little pixel data for the given width and height to create a GIF Frame");
|
||||
let mut vec: Vec<u8> = Vec::with_capacity(pixels.len() + width as usize * height as usize);
|
||||
for v in pixels.chunks(3) {
|
||||
vec.extend([v[0], v[1], v[2], 0xFF].iter().cloned())
|
||||
}
|
||||
Frame::from_rgba_speed(width, height, &mut vec, speed)
|
||||
}
|
||||
}
|
|
@ -0,0 +1,329 @@
|
|||
|
||||
//! # Minimal gif encoder
|
||||
|
||||
|
||||
|
||||
use std::cmp::min;
|
||||
use std::io;
|
||||
use std::io::prelude::*;
|
||||
|
||||
use lzw;
|
||||
|
||||
use traits::{Parameter, WriteBytesExt};
|
||||
use common::{Block, Frame, Extension, DisposalMethod};
|
||||
|
||||
/// Number of repetitions
|
||||
pub enum Repeat {
|
||||
/// Finite number of repetitions
|
||||
Finite(u16),
|
||||
/// Infinite number of repetitions
|
||||
Infinite
|
||||
}
|
||||
|
||||
impl<W: Write> Parameter<Encoder<W>> for Repeat {
|
||||
type Result = Result<(), io::Error>;
|
||||
fn set_param(self, this: &mut Encoder<W>) -> Self::Result {
|
||||
this.write_extension(ExtensionData::Repetitions(self))
|
||||
}
|
||||
}
|
||||
|
||||
/// Extension data.
|
||||
pub enum ExtensionData {
|
||||
/// Control extension. Use `ExtensionData::new_control_ext` to construct.
|
||||
Control {
|
||||
/// Flags.
|
||||
flags: u8,
|
||||
/// Frame delay.
|
||||
delay: u16,
|
||||
/// Transparent index.
|
||||
trns: u8
|
||||
},
|
||||
/// Sets the number of repetitions
|
||||
Repetitions(Repeat)
|
||||
}
|
||||
|
||||
impl ExtensionData {
|
||||
/// Constructor for control extension data.
|
||||
///
|
||||
/// `delay` is given in units of 10 ms.
|
||||
pub fn new_control_ext(delay: u16, dispose: DisposalMethod,
|
||||
needs_user_input: bool, trns: Option<u8>) -> ExtensionData {
|
||||
let mut flags = 0;
|
||||
let trns = match trns {
|
||||
Some(trns) => {
|
||||
flags |= 1;
|
||||
trns as u8
|
||||
},
|
||||
None => 0
|
||||
};
|
||||
flags |= (needs_user_input as u8) << 1;
|
||||
flags |= (dispose as u8) << 2;
|
||||
ExtensionData::Control {
|
||||
flags: flags,
|
||||
delay: delay,
|
||||
trns: trns
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
struct BlockWriter<'a, W: Write + 'a> {
|
||||
w: &'a mut W,
|
||||
bytes: usize,
|
||||
buf: [u8; 0xFF]
|
||||
}
|
||||
|
||||
|
||||
impl<'a, W: Write + 'a> BlockWriter<'a, W> {
|
||||
fn new(w: &'a mut W) -> BlockWriter<'a, W> {
|
||||
BlockWriter {
|
||||
w: w,
|
||||
bytes: 0,
|
||||
buf: [0; 0xFF]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, W: Write + 'a> Write for BlockWriter<'a, W> {
|
||||
|
||||
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
|
||||
let to_copy = min(buf.len(), 0xFF - self.bytes);
|
||||
{ // isolation to please borrow checker
|
||||
let destination = &mut self.buf[self.bytes..];
|
||||
destination[..to_copy].copy_from_slice(&buf[..to_copy]);
|
||||
}
|
||||
self.bytes += to_copy;
|
||||
if self.bytes == 0xFF {
|
||||
self.bytes = 0;
|
||||
self.w.write_le(0xFFu8)?;
|
||||
self.w.write_all(&self.buf)?;
|
||||
}
|
||||
Ok(to_copy)
|
||||
}
|
||||
fn flush(&mut self) -> io::Result<()> {
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
"Cannot flush a BlockWriter, use `drop` instead."
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, W: Write + 'a> Drop for BlockWriter<'a, W> {
|
||||
|
||||
#[cfg(feature = "raii_no_panic")]
|
||||
fn drop(&mut self) {
|
||||
if self.bytes > 0 {
|
||||
let _ = self.w.write_le(self.bytes as u8);
|
||||
let _ = self.w.write_all(&self.buf[..self.bytes]);
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(not(feature = "raii_no_panic"))]
|
||||
fn drop(&mut self) {
|
||||
if self.bytes > 0 {
|
||||
self.w.write_le(self.bytes as u8).unwrap();
|
||||
self.w.write_all(&self.buf[..self.bytes]).unwrap();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// GIF encoder.
|
||||
pub struct Encoder<W: Write> {
|
||||
w: W,
|
||||
global_palette: bool,
|
||||
width: u16,
|
||||
height: u16
|
||||
}
|
||||
|
||||
impl<W: Write> Encoder<W> {
|
||||
/// Creates a new encoder.
|
||||
///
|
||||
/// `global_palette` gives the global color palette in the format `[r, g, b, ...]`,
|
||||
/// if no global palette shall be used an empty slice may be supplied.
|
||||
pub fn new(w: W, width: u16, height: u16, global_palette: &[u8]) -> io::Result<Self> {
|
||||
Encoder {
|
||||
w: w,
|
||||
global_palette: false,
|
||||
width: width,
|
||||
height: height
|
||||
}.write_global_palette(global_palette)
|
||||
}
|
||||
|
||||
/// Writes the global color palette.
|
||||
pub fn write_global_palette(mut self, palette: &[u8]) -> io::Result<Self> {
|
||||
self.global_palette = true;
|
||||
let mut flags = 0;
|
||||
flags |= 0b1000_0000;
|
||||
let num_colors = palette.len() / 3;
|
||||
if num_colors > 256 {
|
||||
return Err(io::Error::new(io::ErrorKind::InvalidInput, "Too many colors"));
|
||||
}
|
||||
flags |= flag_size(num_colors);
|
||||
flags |= flag_size(num_colors) << 4; // wtf flag
|
||||
self.write_screen_desc(flags)?;
|
||||
self.write_color_table(palette)?;
|
||||
Ok(self)
|
||||
}
|
||||
|
||||
/// Writes a frame to the image.
|
||||
///
|
||||
/// Note: This function also writes a control extension if necessary.
|
||||
pub fn write_frame(&mut self, frame: &Frame) -> io::Result<()> {
|
||||
// TODO commented off to pass test in lib.rs
|
||||
//if frame.delay > 0 || frame.transparent.is_some() {
|
||||
self.write_extension(ExtensionData::new_control_ext(
|
||||
frame.delay,
|
||||
frame.dispose,
|
||||
frame.needs_user_input,
|
||||
frame.transparent
|
||||
|
||||
))?;
|
||||
//}
|
||||
self.w.write_le(Block::Image as u8)?;
|
||||
self.w.write_le(frame.left)?;
|
||||
self.w.write_le(frame.top)?;
|
||||
self.w.write_le(frame.width)?;
|
||||
self.w.write_le(frame.height)?;
|
||||
let mut flags = 0;
|
||||
if frame.interlaced {
|
||||
flags |= 0b0100_0000;
|
||||
}
|
||||
match frame.palette {
|
||||
Some(ref palette) => {
|
||||
flags |= 0b1000_0000;
|
||||
let num_colors = palette.len() / 3;
|
||||
if num_colors > 256 {
|
||||
return Err(io::Error::new(io::ErrorKind::InvalidInput, "Too many colors"));
|
||||
}
|
||||
flags |= flag_size(num_colors);
|
||||
self.w.write_le(flags)?;
|
||||
self.write_color_table(palette)
|
||||
},
|
||||
None => if !self.global_palette {
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::InvalidInput,
|
||||
"The GIF format requires a color palette but none was given."
|
||||
))
|
||||
} else {
|
||||
self.w.write_le(flags)
|
||||
}
|
||||
}?;
|
||||
self.write_image_block(&frame.buffer)
|
||||
}
|
||||
|
||||
fn write_image_block(&mut self, data: &[u8]) -> io::Result<()> {
|
||||
{
|
||||
let min_code_size: u8 = match flag_size(*data.iter().max().unwrap_or(&0) as usize + 1) + 1 {
|
||||
1 => 2, // As per gif spec: The minimal code size has to be >= 2
|
||||
n => n
|
||||
};
|
||||
self.w.write_le(min_code_size)?;
|
||||
let mut bw = BlockWriter::new(&mut self.w);
|
||||
let mut enc = lzw::Encoder::new(lzw::LsbWriter::new(&mut bw), min_code_size)?;
|
||||
enc.encode_bytes(data)?;
|
||||
}
|
||||
self.w.write_le(0u8)
|
||||
}
|
||||
|
||||
fn write_color_table(&mut self, table: &[u8]) -> io::Result<()> {
|
||||
let num_colors = table.len() / 3;
|
||||
if num_colors > 256 {
|
||||
return Err(io::Error::new(io::ErrorKind::InvalidInput, "Too many colors"));
|
||||
}
|
||||
let size = flag_size(num_colors);
|
||||
self.w.write_all(&table[..num_colors * 3])?;
|
||||
// Waste some space as of gif spec
|
||||
for _ in 0..((2 << size) - num_colors) {
|
||||
self.w.write_all(&[0, 0, 0])?
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Writes an extension to the image.
|
||||
///
|
||||
/// It is normally not necessary to call this method manually.
|
||||
pub fn write_extension(&mut self, extension: ExtensionData) -> io::Result<()> {
|
||||
use self::ExtensionData::*;
|
||||
// 0 finite repetitions can only be achieved
|
||||
// if the corresponting extension is not written
|
||||
if let Repetitions(Repeat::Finite(0)) = extension {
|
||||
return Ok(())
|
||||
}
|
||||
self.w.write_le(Block::Extension as u8)?;
|
||||
match extension {
|
||||
Control { flags, delay, trns } => {
|
||||
self.w.write_le(Extension::Control as u8)?;
|
||||
self.w.write_le(4u8)?;
|
||||
self.w.write_le(flags)?;
|
||||
self.w.write_le(delay)?;
|
||||
self.w.write_le(trns)?;
|
||||
}
|
||||
Repetitions(repeat) => {
|
||||
self.w.write_le(Extension::Application as u8)?;
|
||||
self.w.write_le(11u8)?;
|
||||
self.w.write(b"NETSCAPE2.0")?;
|
||||
self.w.write_le(3u8)?;
|
||||
self.w.write_le(1u8)?;
|
||||
match repeat {
|
||||
Repeat::Finite(no) => self.w.write_le(no)?,
|
||||
Repeat::Infinite => self.w.write_le(0u16)?,
|
||||
}
|
||||
}
|
||||
}
|
||||
self.w.write_le(0u8)
|
||||
}
|
||||
|
||||
/// Writes a raw extension to the image.
|
||||
///
|
||||
/// This method can be used to write an unsupported extesion to the file. `func` is the extension
|
||||
/// identifier (e.g. `Extension::Application as u8`). `data` are the extension payload blocks. If any
|
||||
/// contained slice has a lenght > 255 it is automatically divided into sub-blocks.
|
||||
pub fn write_raw_extension(&mut self, func: u8, data: &[&[u8]]) -> io::Result<()> {
|
||||
self.w.write_le(Block::Extension as u8)?;
|
||||
self.w.write_le(func as u8)?;
|
||||
for block in data {
|
||||
for chunk in block.chunks(0xFF) {
|
||||
self.w.write_le(chunk.len() as u8)?;
|
||||
self.w.write_all(chunk)?;
|
||||
}
|
||||
}
|
||||
self.w.write_le(0u8)
|
||||
}
|
||||
|
||||
/// Writes the logical screen desriptor
|
||||
fn write_screen_desc(&mut self, flags: u8) -> io::Result<()> {
|
||||
self.w.write_all(b"GIF89a")?;
|
||||
self.w.write_le(self.width)?;
|
||||
self.w.write_le(self.height)?;
|
||||
self.w.write_le(flags)?; // packed field
|
||||
self.w.write_le(0u8)?; // bg index
|
||||
self.w.write_le(0u8) // aspect ratio
|
||||
}
|
||||
}
|
||||
|
||||
impl<W: Write> Drop for Encoder<W> {
|
||||
|
||||
#[cfg(feature = "raii_no_panic")]
|
||||
fn drop(&mut self) {
|
||||
let _ = self.w.write_le(Block::Trailer as u8);
|
||||
}
|
||||
|
||||
#[cfg(not(feature = "raii_no_panic"))]
|
||||
fn drop(&mut self) {
|
||||
self.w.write_le(Block::Trailer as u8).unwrap()
|
||||
}
|
||||
}
|
||||
|
||||
// Color table size converted to flag bits
|
||||
fn flag_size(size: usize) -> u8 {
|
||||
match size {
|
||||
0 ...2 => 0,
|
||||
3 ...4 => 1,
|
||||
5 ...8 => 2,
|
||||
7 ...16 => 3,
|
||||
17 ...32 => 4,
|
||||
33 ...64 => 5,
|
||||
65 ...128 => 6,
|
||||
129...256 => 7,
|
||||
_ => 7
|
||||
}
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue