- * When compressing or decompressing many (very) small files in a row, the - * time spent in construction of new compressor or decompressor objects - * can be longer than the time spent in actual compression or decompression. - * A large part of this initialization overhead comes from allocation and - * garbage collection of large arrays. - *
- * The {@code ArrayCache} API provides a way to cache large array allocations - * for reuse. It can give a major performance improvement when compressing or - * decompressing many tiny files. If you are only (de)compressing one or two - * files or the files a very big, array caching won't improve anything, - * although it won't make anything slower either. - *
- * Important: The users of ArrayCache don't return the allocated arrays - * back to the cache in all situations. - * This a reason why it's called a cache instead of a pool. - * If it is important to be able to return every array back to a cache, - * {@link ResettableArrayCache} can be useful. - *
- * In compressors (OutputStreams) the arrays are returned to the cache - * when a call to {@code finish()} or {@code close()} returns - * successfully (no exceptions are thrown). - *
- * In decompressors (InputStreams) the arrays are returned to the cache when - * the decompression is successfully finished ({@code read} returns {@code -1}) - * or {@code close()} or {@code close(boolean)} is called. This is true even - * if closing throws an exception. - *
- * Raw decompressors don't support {@code close(boolean)}. With raw - * decompressors, if one wants to put the arrays back to the cache without - * closing the underlying {@code InputStream}, one can wrap the - * {@code InputStream} into {@link CloseIgnoringInputStream} when creating - * the decompressor instance. Then one can use {@code close()}. - *
- * Different cache implementations can be extended from this base class. - * All cache implementations must be thread safe. - *
- * This class also works as a dummy cache that simply calls {@code new} - * to allocate new arrays and doesn't try to cache anything. A statically - * allocated dummy cache is available via {@link #getDummyCache()}. - *
- * If no {@code ArrayCache} is specified when constructing a compressor or - * decompressor, the default {@code ArrayCache} implementation is used. - * See {@link #getDefaultCache()} and {@link #setDefaultCache(ArrayCache)}. - *
- * This is a class instead of an interface because it's possible that in the - * future we may want to cache other array types too. New methods can be - * added to this class without breaking existing cache implementations. - * - * @since 1.7 - * - * @see BasicArrayCache - */ -public class ArrayCache { - /** - * Global dummy cache instance that is returned by {@code getDummyCache()}. - */ - private static final ArrayCache dummyCache = new ArrayCache(); - - /** - * Global default {@code ArrayCache} that is used when no other cache has - * been specified. - */ - private static volatile ArrayCache defaultCache = dummyCache; - - /** - * Returns a statically-allocated {@code ArrayCache} instance. - * It can be shared by all code that needs a dummy cache. - */ - public static ArrayCache getDummyCache() { - return dummyCache; - } - - /** - * Gets the default {@code ArrayCache} instance. - * This is a global cache that is used when the application - * specifies nothing else. The default is a dummy cache - * (see {@link #getDummyCache()}). - */ - public static ArrayCache getDefaultCache() { - // It's volatile so no need for synchronization. - return defaultCache; - } - - /** - * Sets the default {@code ArrayCache} instance. - * Use with care. Other libraries using this package probably shouldn't - * call this function as libraries cannot know if there are other users - * of the xz package in the same application. - */ - public static void setDefaultCache(ArrayCache arrayCache) { - if (arrayCache == null) - throw new NullPointerException(); - - // It's volatile so no need for synchronization. - defaultCache = arrayCache; - } - - /** - * Creates a new {@code ArrayCache} that does no caching - * (a dummy cache). If you need a dummy cache, you may want to call - * {@link #getDummyCache()} instead. - */ - public ArrayCache() {} - - /** - * Allocates a new byte array. - *
- * This implementation simply returns {@code new byte[size]}. - * - * @param size the minimum size of the array to allocate; - * an implementation may return an array that - * is larger than the given {@code size} - * - * @param fillWithZeros if true, the caller expects that the first - * {@code size} elements in the array are zero; - * if false, the array contents can be anything, - * which speeds things up when reusing a cached - * array - */ - public byte[] getByteArray(int size, boolean fillWithZeros) { - return new byte[size]; - } - - /** - * Puts the given byte array to the cache. The caller must no longer - * use the array. - *
- * This implementation does nothing. - */ - public void putArray(byte[] array) {} - - /** - * Allocates a new int array. - *
- * This implementation simply returns {@code new int[size]}. - * - * @param size the minimum size of the array to allocate; - * an implementation may return an array that - * is larger than the given {@code size} - * - * @param fillWithZeros if true, the caller expects that the first - * {@code size} elements in the array are zero; - * if false, the array contents can be anything, - * which speeds things up when reusing a cached - * array - */ - public int[] getIntArray(int size, boolean fillWithZeros) { - return new int[size]; - } - - /** - * Puts the given int array to the cache. The caller must no longer - * use the array. - *
- * This implementation does nothing.
- */
- public void putArray(int[] array) {}
-}
diff --git a/app/src/main/java/org/tukaani/xz/BCJCoder.java b/app/src/main/java/org/tukaani/xz/BCJCoder.java
deleted file mode 100644
index 81862f7..0000000
--- a/app/src/main/java/org/tukaani/xz/BCJCoder.java
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- * BCJCoder
- *
- * Author: Lasse Collin
- * This caches exact array sizes, that is, {@code getByteArray} will return
- * an array whose size is exactly the requested size. A limited number
- * of different array sizes are cached at the same time; least recently used
- * sizes will be dropped from the cache if needed (can happen if several
- * different (de)compression options are used with the same cache).
- *
- * The current implementation uses
- * {@link java.util.LinkedHashMap LinkedHashMap} to map different array sizes
- * to separate array-based data structures which hold
- * {@link java.lang.ref.SoftReference SoftReferences} to the cached arrays.
- * In the common case this should give good performance and fairly low
- * memory usage overhead.
- *
- * A statically allocated global {@code BasicArrayCache} instance is
- * available via {@link #getInstance()} which is a good choice in most
- * situations where caching is wanted.
- *
- * @since 1.7
- */
-public class BasicArrayCache extends ArrayCache {
- /**
- * Arrays smaller than this many elements will not be cached.
- */
- private static final int CACHEABLE_SIZE_MIN = 32 << 10;
-
- /**
- * Number of stacks i.e. how many different array sizes to cache.
- */
- private static final int STACKS_MAX = 32;
-
- /**
- * Number of arrays of the same type and size to keep in the cache.
- * (ELEMENTS_PER_STACK - 1) is used as a bit mask so ELEMENTS_PER_STACK
- * must be a power of two!
- */
- private static final int ELEMENTS_PER_STACK = 512;
-
- /**
- * A thread-safe stack-like data structure whose {@code push} method
- * overwrites the oldest element in the stack if the stack is full.
- */
- private static class CyclicStack
- * Note that {@code pop()} always modifies {@code pos}, even if
- * the stack is empty. This means that when the first element is
- * added by {@code push(T)}, it can get added in any position in
- * {@code refs} and the stack will start growing from there.
- */
- private int pos = 0;
-
- /**
- * Gets the most recently added element from the stack.
- * If the stack is empty, {@code null} is returned.
- */
- public synchronized T pop() {
- T e = elements[pos];
- elements[pos] = null;
- pos = (pos - 1) & (ELEMENTS_PER_STACK - 1);
- return e;
- }
-
- /**
- * Adds a new element to the stack. If the stack is full, the oldest
- * element is overwritten.
- */
- public synchronized void push(T e) {
- pos = (pos + 1) & (ELEMENTS_PER_STACK - 1);
- elements[pos] = e;
- }
- }
-
- /**
- * Maps Integer (array size) to stacks of references to arrays. At most
- * STACKS_MAX number of stacks are kept in the map (LRU cache).
- */
- private static class CacheMap
- * Small arrays aren't cached and will be ignored by this method.
- */
- public void putArray(byte[] array) {
- putArray(byteArrayCache, array, array.length);
- }
-
- /**
- * This is like getByteArray but for int arrays.
- */
- public int[] getIntArray(int size, boolean fillWithZeros) {
- int[] array = getArray(intArrayCache, size);
-
- if (array == null)
- array = new int[size];
- else if (fillWithZeros)
- Arrays.fill(array, 0);
-
- return array;
- }
-
- /**
- * Puts the given int array to the cache. The caller must no longer
- * use the array.
- *
- * Small arrays aren't cached and will be ignored by this method.
- */
- public void putArray(int[] array) {
- putArray(intArrayCache, array, array.length);
- }
-}
diff --git a/app/src/main/java/org/tukaani/xz/BlockInputStream.java b/app/src/main/java/org/tukaani/xz/BlockInputStream.java
deleted file mode 100644
index a9fff5f..0000000
--- a/app/src/main/java/org/tukaani/xz/BlockInputStream.java
+++ /dev/null
@@ -1,306 +0,0 @@
-/*
- * BlockInputStream
- *
- * Author: Lasse Collin
- * With {@link XZInputStream}, {@link SingleXZInputStream}, and
- * {@link SeekableXZInputStream} you can use their {@code close(boolean)}
- * method to avoid closing the underlying {@code InputStream}; with
- * those classes {@code CloseIgnoringInputStream} isn't needed.
- *
- * @since 1.7
- */
-public class CloseIgnoringInputStream extends FilterInputStream {
- /**
- * Creates a new {@code CloseIgnoringInputStream}.
- */
- public CloseIgnoringInputStream(InputStream in) {
- super(in);
- }
-
- /**
- * This does nothing (doesn't call {@code in.close()}).
- */
- public void close() {}
-}
diff --git a/app/src/main/java/org/tukaani/xz/CorruptedInputException.java b/app/src/main/java/org/tukaani/xz/CorruptedInputException.java
deleted file mode 100644
index d7d9520..0000000
--- a/app/src/main/java/org/tukaani/xz/CorruptedInputException.java
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * CorruptedInputException
- *
- * Author: Lasse Collin
- * The
- * The delta filter doesn't change the size of the data and thus it
- * cannot have an end-of-payload marker. It will simply decode until
- * its input stream indicates end of input.
- */
-public class DeltaInputStream extends InputStream {
- /**
- * Smallest supported delta calculation distance.
- */
- public static final int DISTANCE_MIN = 1;
-
- /**
- * Largest supported delta calculation distance.
- */
- public static final int DISTANCE_MAX = 256;
-
- private InputStream in;
- private final DeltaDecoder delta;
-
- private IOException exception = null;
-
- private final byte[] tempBuf = new byte[1];
-
- /**
- * Creates a new Delta decoder with the given delta calculation distance.
- *
- * @param in input stream from which Delta filtered data
- * is read
- *
- * @param distance delta calculation distance, must be in the
- * range [
- * This calls
- * Currently only simple byte-wise delta is supported. The only option
- * is the delta distance, which you should set to match your data.
- * It's not possible to provide a generic default value for it.
- *
- * For example, with distance = 2 and eight-byte input
- * A1 B1 A2 B3 A3 B5 A4 B7, the output will be A1 B1 01 02 01 02 01 02.
- *
- * The Delta filter can be good with uncompressed bitmap images. It can
- * also help with PCM audio, although special-purpose compressors like
- * FLAC will give much smaller result at much better compression speed.
- */
-public class DeltaOptions extends FilterOptions {
- /**
- * Smallest supported delta calculation distance.
- */
- public static final int DISTANCE_MIN = 1;
-
- /**
- * Largest supported delta calculation distance.
- */
- public static final int DISTANCE_MAX = 256;
-
- private int distance = DISTANCE_MIN;
-
- /**
- * Creates new Delta options and sets the delta distance to 1 byte.
- */
- public DeltaOptions() {}
-
- /**
- * Creates new Delta options and sets the distance to the given value.
- */
- public DeltaOptions(int distance) throws UnsupportedOptionsException {
- setDistance(distance);
- }
-
- /**
- * Sets the delta distance in bytes. The new distance must be in
- * the range [DISTANCE_MIN, DISTANCE_MAX].
- */
- public void setDistance(int distance) throws UnsupportedOptionsException {
- if (distance < DISTANCE_MIN || distance > DISTANCE_MAX)
- throw new UnsupportedOptionsException(
- "Delta distance must be in the range [" + DISTANCE_MIN
- + ", " + DISTANCE_MAX + "]: " + distance);
-
- this.distance = distance;
- }
-
- /**
- * Gets the delta distance.
- */
- public int getDistance() {
- return distance;
- }
-
- public int getEncoderMemoryUsage() {
- return DeltaOutputStream.getMemoryUsage();
- }
-
- public FinishableOutputStream getOutputStream(FinishableOutputStream out,
- ArrayCache arrayCache) {
- return new DeltaOutputStream(out, this);
- }
-
- public int getDecoderMemoryUsage() {
- return 1;
- }
-
- public InputStream getInputStream(InputStream in, ArrayCache arrayCache) {
- return new DeltaInputStream(in, distance);
- }
-
- FilterEncoder getFilterEncoder() {
- return new DeltaEncoder(this);
- }
-
- public Object clone() {
- try {
- return super.clone();
- } catch (CloneNotSupportedException e) {
- assert false;
- throw new RuntimeException();
- }
- }
-}
diff --git a/app/src/main/java/org/tukaani/xz/DeltaOutputStream.java b/app/src/main/java/org/tukaani/xz/DeltaOutputStream.java
deleted file mode 100644
index bd880db..0000000
--- a/app/src/main/java/org/tukaani/xz/DeltaOutputStream.java
+++ /dev/null
@@ -1,113 +0,0 @@
-/*
- * DeltaOutputStream
- *
- * Author: Lasse Collin
- * This is uses ArrayCache.getDefaultCache() as the ArrayCache.
- */
- public FinishableOutputStream getOutputStream(FinishableOutputStream out) {
- return getOutputStream(out, ArrayCache.getDefaultCache());
- }
-
- /**
- * Gets a raw (no XZ headers) encoder output stream using these options
- * and the given ArrayCache.
- * Raw streams are an advanced feature. In most cases you want to store
- * the compressed data in the .xz container format instead of using
- * a raw stream. To use this filter in a .xz file, pass this object
- * to XZOutputStream.
- */
- public abstract FinishableOutputStream getOutputStream(
- FinishableOutputStream out, ArrayCache arrayCache);
-
- /**
- * Gets how much memory the decoder will need to decompress the data
- * that was encoded with these options.
- */
- public abstract int getDecoderMemoryUsage();
-
- /**
- * Gets a raw (no XZ headers) decoder input stream using these options.
- *
- * This is uses ArrayCache.getDefaultCache() as the ArrayCache.
- */
- public InputStream getInputStream(InputStream in) throws IOException {
- return getInputStream(in, ArrayCache.getDefaultCache());
- }
-
- /**
- * Gets a raw (no XZ headers) decoder input stream using these options
- * and the given ArrayCache.
- */
- public abstract InputStream getInputStream(
- InputStream in, ArrayCache arrayCache) throws IOException;
-
- abstract FilterEncoder getFilterEncoder();
-
- FilterOptions() {}
-}
diff --git a/app/src/main/java/org/tukaani/xz/FinishableOutputStream.java b/app/src/main/java/org/tukaani/xz/FinishableOutputStream.java
deleted file mode 100644
index 64d4ca5..0000000
--- a/app/src/main/java/org/tukaani/xz/FinishableOutputStream.java
+++ /dev/null
@@ -1,31 +0,0 @@
-/*
- * FinishableOutputStream
- *
- * Author: Lasse Collin
- * The
- * Very tiny dictionaries would be a performance problem, so
- * the minimum is 4 KiB.
- */
- public static final int DICT_SIZE_MIN = 4096;
-
- /**
- * Largest dictionary size supported by this implementation.
- *
- * The LZMA2 algorithm allows dictionaries up to one byte less than 4 GiB.
- * This implementation supports only 16 bytes less than 2 GiB for raw
- * LZMA2 streams, and for .xz files the maximum is 1.5 GiB. This
- * limitation is due to Java using signed 32-bit integers for array
- * indexing. The limitation shouldn't matter much in practice since so
- * huge dictionaries are not normally used.
- */
- public static final int DICT_SIZE_MAX = Integer.MAX_VALUE & ~15;
-
- private static final int COMPRESSED_SIZE_MAX = 1 << 16;
-
- private final ArrayCache arrayCache;
- private DataInputStream in;
-
- private LZDecoder lz;
- private RangeDecoderFromBuffer rc;
- private LZMADecoder lzma;
-
- private int uncompressedSize = 0;
- private boolean isLZMAChunk = false;
-
- private boolean needDictReset = true;
- private boolean needProps = true;
- private boolean endReached = false;
-
- private IOException exception = null;
-
- private final byte[] tempBuf = new byte[1];
-
- /**
- * Gets approximate decompressor memory requirements as kibibytes for
- * the given dictionary size.
- *
- * @param dictSize LZMA2 dictionary size as bytes, must be
- * in the range [
- * The caller needs to know the dictionary size used when compressing;
- * the dictionary size isn't stored as part of a raw LZMA2 stream.
- *
- * Specifying a too small dictionary size will prevent decompressing
- * the stream. Specifying a too big dictionary is waste of memory but
- * decompression will work.
- *
- * There is no need to specify a dictionary bigger than
- * the uncompressed size of the data even if a bigger dictionary
- * was used when compressing. If you know the uncompressed size
- * of the data, this might allow saving some memory.
- *
- * @param in input stream from which LZMA2-compressed
- * data is read
- *
- * @param dictSize LZMA2 dictionary size as bytes, must be
- * in the range [
- * This is like
- * This is like
- * Reading lots of data with
- * If
- * In LZMA2InputStream, the return value will be non-zero when the
- * decompressor is in the middle of an LZMA2 chunk. The return value
- * will then be the number of uncompressed bytes remaining from that
- * chunk. The return value can also be non-zero in the middle of
- * an uncompressed chunk, but then the return value depends also on
- * the
- * While this allows setting the LZMA2 compression options in detail,
- * often you only need
- * The decompressor supports bigger dictionaries, up to almost 2 GiB.
- * With HC4 the encoder would support dictionaries bigger than 768 MiB.
- * The 768 MiB limit comes from the current implementation of BT4 where
- * we would otherwise hit the limits of signed ints in array indexing.
- *
- * If you really need bigger dictionary for decompression,
- * use {@link LZMA2InputStream} directly.
- */
- public static final int DICT_SIZE_MAX = 768 << 20;
-
- /**
- * The default dictionary size is 8 MiB.
- */
- public static final int DICT_SIZE_DEFAULT = 8 << 20;
-
- /**
- * Maximum value for lc + lp is 4.
- */
- public static final int LC_LP_MAX = 4;
-
- /**
- * The default number of literal context bits is 3.
- */
- public static final int LC_DEFAULT = 3;
-
- /**
- * The default number of literal position bits is 0.
- */
- public static final int LP_DEFAULT = 0;
-
- /**
- * Maximum value for pb is 4.
- */
- public static final int PB_MAX = 4;
-
- /**
- * The default number of position bits is 2.
- */
- public static final int PB_DEFAULT = 2;
-
- /**
- * Compression mode: uncompressed.
- * The data is wrapped into a LZMA2 stream without compression.
- */
- public static final int MODE_UNCOMPRESSED = 0;
-
- /**
- * Compression mode: fast.
- * This is usually combined with a hash chain match finder.
- */
- public static final int MODE_FAST = LZMAEncoder.MODE_FAST;
-
- /**
- * Compression mode: normal.
- * This is usually combined with a binary tree match finder.
- */
- public static final int MODE_NORMAL = LZMAEncoder.MODE_NORMAL;
-
- /**
- * Minimum value for
- * The presets 0-3 are fast presets with medium compression.
- * The presets 4-6 are fairly slow presets with high compression.
- * The default preset (
- * The presets 7-9 are like the preset 6 but use bigger dictionaries
- * and have higher compressor and decompressor memory requirements.
- * Unless the uncompressed size of the file exceeds 8 MiB,
- * 16 MiB, or 32 MiB, it is waste of memory to use the
- * presets 7, 8, or 9, respectively.
- *
- * @throws UnsupportedOptionsException
- *
- * The dictionary (or history buffer) holds the most recently seen
- * uncompressed data. Bigger dictionary usually means better compression.
- * However, using a dictioanary bigger than the size of the uncompressed
- * data is waste of memory.
- *
- * Any value in the range [DICT_SIZE_MIN, DICT_SIZE_MAX] is valid,
- * but sizes of 2^n and 2^n + 2^(n-1) bytes are somewhat
- * recommended.
- *
- * @throws UnsupportedOptionsException
- *
- * The .xz format doesn't support a preset dictionary for now.
- * Do not set a preset dictionary unless you use raw LZMA2.
- *
- * Preset dictionary can be useful when compressing many similar,
- * relatively small chunks of data independently from each other.
- * A preset dictionary should contain typical strings that occur in
- * the files being compressed. The most probable strings should be
- * near the end of the preset dictionary. The preset dictionary used
- * for compression is also needed for decompression.
- */
- public void setPresetDict(byte[] presetDict) {
- this.presetDict = presetDict;
- }
-
- /**
- * Gets the preset dictionary.
- */
- public byte[] getPresetDict() {
- return presetDict;
- }
-
- /**
- * Sets the number of literal context bits and literal position bits.
- *
- * The sum of
- * All bytes that cannot be encoded as matches are encoded as literals.
- * That is, literals are simply 8-bit bytes that are encoded one at
- * a time.
- *
- * The literal coding makes an assumption that the highest
- * The default value (3) is usually good. If you want maximum compression,
- * try
- * This affets what kind of alignment in the uncompressed data is
- * assumed when encoding literals. See {@link #setPb(int) setPb} for
- * more information about alignment.
- *
- * @throws UnsupportedOptionsException
- *
- * This affects what kind of alignment in the uncompressed data is
- * assumed in general. The default (2) means four-byte alignment
- * (2^
- * When the alignment is known, setting the number of position bits
- * accordingly may reduce the file size a little. For example with text
- * files having one-byte alignment (US-ASCII, ISO-8859-*, UTF-8), using
- *
- * Even though the assumed alignment can be adjusted with
- *
- * This specifies the method to analyze the data produced by
- * a match finder. The default is
- * Usually
- * The special mode
- * Match finder has a major effect on compression speed, memory usage,
- * and compression ratio. Usually Hash Chain match finders are faster
- * than Binary Tree match finders. The default depends on the preset:
- * 0-3 use
- * The default is a special value of
- * Reasonable depth limit for Hash Chain match finders is 4-100 and
- * 16-1000 for Binary Tree match finders. Using very high values can
- * make the compressor extremely slow with some files. Avoid settings
- * higher than 1000 unless you are prepared to interrupt the compression
- * in case it is taking far too long.
- *
- * @throws UnsupportedOptionsException
- *
- * The returned value may bigger than the value returned by a direct call
- * to {@link LZMA2InputStream#getMemoryUsage(int)} if the dictionary size
- * is not 2^n or 2^n + 2^(n-1) bytes. This is because the .xz
- * headers store the dictionary size in such a format and other values
- * are rounded up to the next such value. Such rounding is harmess except
- * it might waste some memory if an unsual dictionary size is used.
- *
- * If you use raw LZMA2 streams and unusual dictioanary size, call
- * {@link LZMA2InputStream#getMemoryUsage} directly to get raw decoder
- * memory requirements.
- */
- public int getDecoderMemoryUsage() {
- // Round the dictionary size up to the next 2^n or 2^n + 2^(n-1).
- int d = dictSize - 1;
- d |= d >>> 2;
- d |= d >>> 3;
- d |= d >>> 4;
- d |= d >>> 8;
- d |= d >>> 16;
- return LZMA2InputStream.getMemoryUsage(d + 1);
- }
-
- public InputStream getInputStream(InputStream in, ArrayCache arrayCache)
- throws IOException {
- return new LZMA2InputStream(in, dictSize, presetDict, arrayCache);
- }
-
- FilterEncoder getFilterEncoder() {
- return new LZMA2Encoder(this);
- }
-
- public Object clone() {
- try {
- return super.clone();
- } catch (CloneNotSupportedException e) {
- assert false;
- throw new RuntimeException();
- }
- }
-}
diff --git a/app/src/main/java/org/tukaani/xz/LZMA2OutputStream.java b/app/src/main/java/org/tukaani/xz/LZMA2OutputStream.java
deleted file mode 100644
index 1bb2d85..0000000
--- a/app/src/main/java/org/tukaani/xz/LZMA2OutputStream.java
+++ /dev/null
@@ -1,276 +0,0 @@
-/*
- * LZMA2OutputStream
- *
- * Authors: Lasse Collin
- * IMPORTANT: In contrast to other classes in this package, this class
- * reads data from its input stream one byte at a time. If the input stream
- * is for example {@link java.io.FileInputStream}, wrapping it into
- * {@link java.io.BufferedInputStream} tends to improve performance a lot.
- * This is not automatically done by this class because there may be use
- * cases where it is desired that this class won't read any bytes past
- * the end of the LZMA stream.
- *
- * Even when using
- * LZMA allows dictionaries up to one byte less than 4 GiB. This
- * implementation supports only 16 bytes less than 2 GiB. This
- * limitation is due to Java using signed 32-bit integers for array
- * indexing. The limitation shouldn't matter much in practice since so
- * huge dictionaries are not normally used.
- */
- public static final int DICT_SIZE_MAX = Integer.MAX_VALUE & ~15;
-
- private InputStream in;
- private ArrayCache arrayCache;
- private LZDecoder lz;
- private RangeDecoderFromStream rc;
- private LZMADecoder lzma;
-
- private boolean endReached = false;
- private boolean relaxedEndCondition = false;
-
- private final byte[] tempBuf = new byte[1];
-
- /**
- * Number of uncompressed bytes left to be decompressed, or -1 if
- * the end marker is used.
- */
- private long remainingSize;
-
- private IOException exception = null;
-
- /**
- * Gets approximate decompressor memory requirements as kibibytes for
- * the given dictionary size and LZMA properties byte (lc, lp, and pb).
- *
- * @param dictSize LZMA dictionary size as bytes, should be
- * in the range [
- * This is identical to
- * This is identical to
- * The caller needs to know if the "end of payload marker (EOPM)" alias
- * "end of stream marker (EOS marker)" alias "end marker" present.
- * If the end marker isn't used, the caller must know the exact
- * uncompressed size of the stream.
- *
- * The caller also needs to provide the LZMA properties byte that encodes
- * the number of literal context bits (lc), literal position bits (lp),
- * and position bits (pb).
- *
- * The dictionary size used when compressing is also needed. Specifying
- * a too small dictionary size will prevent decompressing the stream.
- * Specifying a too big dictionary is waste of memory but decompression
- * will work.
- *
- * There is no need to specify a dictionary bigger than
- * the uncompressed size of the data even if a bigger dictionary
- * was used when compressing. If you know the uncompressed size
- * of the data, this might allow saving some memory.
- *
- * @param in input stream from which compressed
- * data is read
- *
- * @param uncompSize uncompressed size of the LZMA stream or -1
- * if the end marker is used in the LZMA stream
- *
- * @param propsByte LZMA properties byte that has the encoded
- * values for literal context bits (lc), literal
- * position bits (lp), and position bits (pb)
- *
- * @param dictSize dictionary size as bytes, must be in the range
- * [
- * This is identical to
- * This is identical to
- * Note that this doesn't actually check if the EOS marker is present.
- * This introduces a few minor downsides:
- *
- * This should be called after the constructor before reading any data
- * from the stream. This is a separate function because adding even more
- * constructors to this class didn't look like a good alternative.
- *
- * @since 1.9
- */
- public void enableRelaxedEndCondition() {
- relaxedEndCondition = true;
- }
-
- /**
- * Decompresses the next byte from this input stream.
- *
- * Reading lots of data with
- * If
- * If the uncompressed size of the input data is known, it will be stored
- * in the .lzma header and no end of stream marker will be used. Otherwise
- * the header will indicate unknown uncompressed size and the end of stream
- * marker will be used.
- *
- * Note that a preset dictionary cannot be used in .lzma files but
- * it can be used for raw LZMA streams.
- *
- * @param out output stream to which the compressed data
- * will be written
- *
- * @param options LZMA compression options; the same class
- * is used here as is for LZMA2
- *
- * @param inputSize uncompressed size of the data to be compressed;
- * use
- * This is identical to
- *
- * Raw LZMA streams can be encoded with or without end of stream marker.
- * When decompressing the stream, one must know if the end marker was used
- * and tell it to the decompressor. If the end marker wasn't used, the
- * decompressor will also need to know the uncompressed size.
- *
- * @param out output stream to which the compressed data
- * will be written
- *
- * @param options LZMA compression options; the same class
- * is used here as is for LZMA2
- *
- * @param useEndMarker
- * if end of stream marker should be written
- *
- * @throws IOException may be thrown from
- * This is identical to
- *
- * The amount of memory required and the memory usage limit are
- * included in the error detail message in human readable format.
- */
-public class MemoryLimitException extends XZIOException {
- private static final long serialVersionUID = 3L;
-
- private final int memoryNeeded;
- private final int memoryLimit;
-
- /**
- * Creates a new MemoryLimitException.
- *
- * The amount of memory needed and the memory usage limit are
- * included in the error detail message.
- *
- * @param memoryNeeded amount of memory needed as kibibytes (KiB)
- * @param memoryLimit specified memory usage limit as kibibytes (KiB)
- */
- public MemoryLimitException(int memoryNeeded, int memoryLimit) {
- super("" + memoryNeeded + " KiB of memory would be needed; limit was "
- + memoryLimit + " KiB");
-
- this.memoryNeeded = memoryNeeded;
- this.memoryLimit = memoryLimit;
- }
-
- /**
- * Gets how much memory is required to decompress the data.
- *
- * @return amount of memory needed as kibibytes (KiB)
- */
- public int getMemoryNeeded() {
- return memoryNeeded;
- }
-
- /**
- * Gets what the memory usage limit was at the time the exception
- * was created.
- *
- * @return memory usage limit as kibibytes (KiB)
- */
- public int getMemoryLimit() {
- return memoryLimit;
- }
-}
diff --git a/app/src/main/java/org/tukaani/xz/PowerPCOptions.java b/app/src/main/java/org/tukaani/xz/PowerPCOptions.java
deleted file mode 100644
index 9b6fce1..0000000
--- a/app/src/main/java/org/tukaani/xz/PowerPCOptions.java
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * PowerPCOptions
- *
- * Author: Lasse Collin
- * This will not seek past the end of the file. If the current position
- * is already at or past the end of the file, this doesn't seek at all
- * and returns
- * If
- * Seeking past the end of the file should be supported by the subclasses
- * unless there is a good reason to do otherwise. If one has seeked
- * past the end of the stream,
- * Each .xz file consist of one or more Streams. Each Stream consist of zero
- * or more Blocks. Each Stream contains an Index of Streams' Blocks.
- * The Indexes from all Streams are loaded in RAM by a constructor of this
- * class. A typical .xz file has only one Stream, and parsing its Index will
- * need only three or four seeks.
- *
- * To make random access possible, the data in a .xz file must be splitted
- * into multiple Blocks of reasonable size. Decompression can only start at
- * a Block boundary. When seeking to an uncompressed position that is not at
- * a Block boundary, decompression starts at the beginning of the Block and
- * throws away data until the target position is reached. Thus, smaller Blocks
- * mean faster seeks to arbitrary uncompressed positions. On the other hand,
- * smaller Blocks mean worse compression. So one has to make a compromise
- * between random access speed and compression ratio.
- *
- * Implementation note: This class uses linear search to locate the correct
- * Stream from the data structures in RAM. It was the simplest to implement
- * and should be fine as long as there aren't too many Streams. The correct
- * Block inside a Stream is located using binary search and thus is fast
- * even with a huge number of Blocks.
- *
- *
- * The amount of memory needed for the Indexes is taken into account when
- * checking the memory usage limit. Each Stream is calculated to need at
- * least 1 KiB of memory and each Block 16 bytes of memory, rounded up
- * to the next kibibyte. So unless the file has a huge number of Streams or
- * Blocks, these don't take significant amount of memory.
- *
- *
- * When using {@link XZOutputStream}, a new Block can be started by calling
- * its {@link XZOutputStream#endBlock() endBlock} method. If you know
- * that the decompressor will only need to seek to certain uncompressed
- * positions, it can be a good idea to start a new Block at (some of) these
- * positions (and only at these positions to get better compression ratio).
- *
- * liblzma in XZ Utils supports starting a new Block with
- *
- * This is identical to
- *
- * This is identical to
- *
- * Note that integrity check verification should almost never be disabled.
- * Possible reasons to disable integrity check verification:
- *
- *
- * This is identical to
- *
- * The returned value has a bit set for every check type that is present.
- * For example, if CRC64 and SHA-256 were used, the return value is
- *
- * If
- * This is equivalent to
- * If you don't want to close the underlying
- * Note that if you successfully reach the end of the stream
- * (
- * Seeking past the end of the stream is possible. In that case
- *
- * Unless you know what you are doing, don't use this class to decompress
- * standalone .xz files. For that purpose, use
- * If you are decompressing complete XZ streams and your application knows
- * exactly how much uncompressed data there should be, it is good to try
- * reading one more byte by calling
- * This constructor reads and parses the XZ Stream Header (12 bytes)
- * from
- * This is identical to
- * This is identical to
- * This is identical to
- * This is identical to
- * Note that integrity check verification should almost never be disabled.
- * Possible reasons to disable integrity check verification:
- *
- *
- * This is identical to
- *
- * Reading lots of data with
- * If
- * This is equivalent to
- * If you don't want to close the underlying
- * Note that if you successfully reach the end of the stream
- * (
- * Omitting the integrity check is strongly discouraged except when
- * the integrity of the data will be verified by other means anyway,
- * and calculating the check twice would be useless.
- */
- public static final int CHECK_NONE = 0;
-
- /**
- * Integrity check ID for CRC32.
- */
- public static final int CHECK_CRC32 = 1;
-
- /**
- * Integrity check ID for CRC64.
- */
- public static final int CHECK_CRC64 = 4;
-
- /**
- * Integrity check ID for SHA-256.
- */
- public static final int CHECK_SHA256 = 10;
-
- private XZ() {}
-}
diff --git a/app/src/main/java/org/tukaani/xz/XZFormatException.java b/app/src/main/java/org/tukaani/xz/XZFormatException.java
deleted file mode 100644
index 6f63020..0000000
--- a/app/src/main/java/org/tukaani/xz/XZFormatException.java
+++ /dev/null
@@ -1,24 +0,0 @@
-/*
- * XZFormatException
- *
- * Author: Lasse Collin
- * Use this to decompress regular standalone .xz files. This reads from
- * its input stream until the end of the input or until an error occurs.
- * This supports decompressing concatenated .xz files.
- *
- *
- * Getting an input stream to decompress a .xz file:
- *
- * It's important to keep in mind that decompressor memory usage depends
- * on the settings used to compress the file. The worst-case memory usage
- * of XZInputStream is currently 1.5 GiB. Still, very few files will
- * require more than about 65 MiB because that's how much decompressing
- * a file created with the highest preset level will need, and only a few
- * people use settings other than the predefined presets.
- *
- * It is possible to specify a memory usage limit for
- *
- * If you are decompressing complete files and your application knows
- * exactly how much uncompressed data there should be, it is good to try
- * reading one more byte by calling
- * This constructor reads and parses the XZ Stream Header (12 bytes)
- * from
- * This is identical to
- * This is identical to
- * This is identical to
- * This is identical to
- * Note that integrity check verification should almost never be disabled.
- * Possible reasons to disable integrity check verification:
- *
- *
- * This is identical to
- * Reading lots of data with
- * If
- * This is equivalent to
- * If you don't want to close the underlying
- * Note that if you successfully reach the end of the stream
- * (
- * Getting an output stream to compress with LZMA2 using the default
- * settings and the default integrity check type (CRC64):
- *
- * Using the preset level
- * Using the x86 BCJ filter together with LZMA2 to compress x86 executables
- * and printing the memory usage information before creating the
- * XZOutputStream:
- *
- * Currently this cannot be used to update e.g. LZMA2 options in the
- * middle of a XZ Block. Use
- * If there is no unfinished Block open, this function will do nothing.
- * (No empty XZ Block will be created.)
- *
- * This function can be useful, for example, to create
- * random-accessible .xz files.
- *
- * Starting a new XZ Block means that the encoder state is reset.
- * Doing this very often will increase the size of the compressed
- * file a lot (more than plain
- * Calling this function very often may increase the compressed
- * file size a lot. The filter chain options may affect the size
- * increase too. For example, with LZMA2 the HC4 match finder has
- * smaller penalty with flushing than BT4.
- *
- * Some filters don't support flushing. If the filter chain has
- * such a filter,
- * Repeated calls to
- * After finishing, the stream may be closed normally with
- *
- * @param dictSize dictionary size
- *
- * @param extraSizeBefore
- * number of bytes to keep available in the
- * history in addition to dictSize
- *
- * @param extraSizeAfter
- * number of bytes that must be available
- * after current position + matchLenMax
- *
- * @param niceLen if a match of at least
- * Note that the result is undefined if
- * The current byte is at
- * This function is equivalent to
- * A compressed LZMA2 chunk can hold 2 MiB of uncompressed data.
- * A single LZMA symbol may indicate up to MATCH_LEN_MAX bytes
- * of data, so the LZMA2 chunk is considered full when there is
- * less space than MATCH_LEN_MAX bytes.
- */
- private static final int LZMA2_UNCOMPRESSED_LIMIT
- = (2 << 20) - MATCH_LEN_MAX;
-
- /**
- * LZMA2 chunk is considered full when its compressed size exceeds
- *
- * The maximum compressed size of a LZMA2 chunk is 64 KiB.
- * A single LZMA symbol might use 20 bytes of space even though
- * it usually takes just one byte or so. Two more bytes are needed
- * for LZMA2 uncompressed chunks (see LZMA2OutputStream.writeChunk).
- * Leave a little safety margin and use 26 bytes.
- */
- private static final int LZMA2_COMPRESSED_LIMIT = (64 << 10) - 26;
-
- private static final int DIST_PRICE_UPDATE_INTERVAL = FULL_DISTANCES;
- private static final int ALIGN_PRICE_UPDATE_INTERVAL = ALIGN_SIZE;
-
- private final RangeEncoder rc;
- final LZEncoder lz;
- final LiteralEncoder literalEncoder;
- final LengthEncoder matchLenEncoder;
- final LengthEncoder repLenEncoder;
- final int niceLen;
-
- private int distPriceCount = 0;
- private int alignPriceCount = 0;
-
- private final int distSlotPricesSize;
- private final int[][] distSlotPrices;
- private final int[][] fullDistPrices
- = new int[DIST_STATES][FULL_DISTANCES];
- private final int[] alignPrices = new int[ALIGN_SIZE];
-
- int back = 0;
- int readAhead = -1;
- private int uncompressedSize = 0;
-
- public static int getMemoryUsage(int mode, int dictSize,
- int extraSizeBefore, int mf) {
- int m = 80;
-
- switch (mode) {
- case MODE_FAST:
- m += LZMAEncoderFast.getMemoryUsage(
- dictSize, extraSizeBefore, mf);
- break;
-
- case MODE_NORMAL:
- m += LZMAEncoderNormal.getMemoryUsage(
- dictSize, extraSizeBefore, mf);
- break;
-
- default:
- throw new IllegalArgumentException();
- }
-
- return m;
- }
-
- public static LZMAEncoder getInstance(
- RangeEncoder rc, int lc, int lp, int pb, int mode,
- int dictSize, int extraSizeBefore,
- int niceLen, int mf, int depthLimit,
- ArrayCache arrayCache) {
- switch (mode) {
- case MODE_FAST:
- return new LZMAEncoderFast(rc, lc, lp, pb,
- dictSize, extraSizeBefore,
- niceLen, mf, depthLimit,
- arrayCache);
-
- case MODE_NORMAL:
- return new LZMAEncoderNormal(rc, lc, lp, pb,
- dictSize, extraSizeBefore,
- niceLen, mf, depthLimit,
- arrayCache);
- }
-
- throw new IllegalArgumentException();
- }
-
- public void putArraysToCache(ArrayCache arrayCache) {
- lz.putArraysToCache(arrayCache);
- }
-
- /**
- * Gets an integer [0, 63] matching the highest two bits of an integer.
- * This is like bit scan reverse (BSR) on x86 except that this also
- * cares about the second highest bit.
- */
- public static int getDistSlot(int dist) {
- if (dist <= DIST_MODEL_START && dist >= 0)
- return dist;
-
- int n = dist;
- int i = 31;
-
- if ((n & 0xFFFF0000) == 0) {
- n <<= 16;
- i = 15;
- }
-
- if ((n & 0xFF000000) == 0) {
- n <<= 8;
- i -= 8;
- }
-
- if ((n & 0xF0000000) == 0) {
- n <<= 4;
- i -= 4;
- }
-
- if ((n & 0xC0000000) == 0) {
- n <<= 2;
- i -= 2;
- }
-
- if ((n & 0x80000000) == 0)
- --i;
-
- return (i << 1) + ((dist >>> (i - 1)) & 1);
- }
-
- /**
- * Gets the next LZMA symbol.
- *
- * There are three types of symbols: literal (a single byte),
- * repeated match, and normal match. The symbol is indicated
- * by the return value and by the variable
- * Literal:
- * Repeated match:
- * Normal match:
- * This aims to be a complete implementation of XZ data compression
- * in pure Java. Features:
- *
- * Threading is planned but it is unknown when it will be implemented.
- *
- * For the latest source code, see the
- * home page of XZ for Java.
- *
- *
- * Start by reading the documentation of {@link org.tukaani.xz.XZOutputStream}
- * and {@link org.tukaani.xz.XZInputStream}.
- * If you use XZ inside another file format or protocol,
- * see also {@link org.tukaani.xz.SingleXZInputStream}.
- *
- *
- * XZ for Java has been put into the public domain, thus you can do
- * whatever you want with it. All the files in the package have been
- * written by Lasse Collin, Igor Pavlov, and/or Brett Okken.
- *
- * This software is provided "as is", without any warranty.
- */
-package org.tukaani.xz;
diff --git a/app/src/main/java/org/tukaani/xz/rangecoder/RangeCoder.java b/app/src/main/java/org/tukaani/xz/rangecoder/RangeCoder.java
deleted file mode 100644
index df9b0c4..0000000
--- a/app/src/main/java/org/tukaani/xz/rangecoder/RangeCoder.java
+++ /dev/null
@@ -1,26 +0,0 @@
-/*
- * RangeCoder
- *
- * Authors: Lasse Collin 0
.
- */
- public void setStartOffset(int startOffset)
- throws UnsupportedOptionsException {
- if ((startOffset & (alignment - 1)) != 0)
- throw new UnsupportedOptionsException(
- "Start offset must be a multiple of " + alignment);
-
- this.startOffset = startOffset;
- }
-
- /**
- * Gets the start offset.
- */
- public int getStartOffset() {
- return startOffset;
- }
-
- public int getEncoderMemoryUsage() {
- return SimpleOutputStream.getMemoryUsage();
- }
-
- public int getDecoderMemoryUsage() {
- return SimpleInputStream.getMemoryUsage();
- }
-
- public Object clone() {
- try {
- return super.clone();
- } catch (CloneNotSupportedException e) {
- assert false;
- throw new RuntimeException();
- }
- }
-}
diff --git a/app/src/main/java/org/tukaani/xz/BasicArrayCache.java b/app/src/main/java/org/tukaani/xz/BasicArrayCache.java
deleted file mode 100644
index 90ebe1f..0000000
--- a/app/src/main/java/org/tukaani/xz/BasicArrayCache.java
+++ /dev/null
@@ -1,281 +0,0 @@
-/*
- * BasicArrayCache
- *
- * Author: Lasse Collin
- *
- * InputStream rawdec = new LZMA2InputStream(
- * new CloseIgnoringInputStream(myInputStream),
- * myDictSize, null, myArrayCache);
- * doSomething(rawdec);
- * rawdec.close(); // This doesn't close myInputStream.
- *
close()
method does nothing, that is, the underlying
- * InputStream
isn't closed.
- */
-class CountingInputStream extends CloseIgnoringInputStream {
- private long size = 0;
-
- public CountingInputStream(InputStream in) {
- super(in);
- }
-
- public int read() throws IOException {
- int ret = in.read();
- if (ret != -1 && size >= 0)
- ++size;
-
- return ret;
- }
-
- public int read(byte[] b, int off, int len) throws IOException {
- int ret = in.read(b, off, len);
- if (ret > 0 && size >= 0)
- size += ret;
-
- return ret;
- }
-
- public long getSize() {
- return size;
- }
-}
diff --git a/app/src/main/java/org/tukaani/xz/CountingOutputStream.java b/app/src/main/java/org/tukaani/xz/CountingOutputStream.java
deleted file mode 100644
index 9b3eef3..0000000
--- a/app/src/main/java/org/tukaani/xz/CountingOutputStream.java
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * CountingOutputStream
- *
- * Author: Lasse Collin finish
method does nothing.
- * This is FinishableOutputStream
instead
- * of OutputStream
solely because it allows
- * using this as the output stream for a chain of raw filters.
- */
-class CountingOutputStream extends FinishableOutputStream {
- private final OutputStream out;
- private long size = 0;
-
- public CountingOutputStream(OutputStream out) {
- this.out = out;
- }
-
- public void write(int b) throws IOException {
- out.write(b);
- if (size >= 0)
- ++size;
- }
-
- public void write(byte[] b, int off, int len) throws IOException {
- out.write(b, off, len);
- if (size >= 0)
- size += len;
- }
-
- public void flush() throws IOException {
- out.flush();
- }
-
- public void close() throws IOException {
- out.close();
- }
-
- public long getSize() {
- return size;
- }
-}
diff --git a/app/src/main/java/org/tukaani/xz/DeltaCoder.java b/app/src/main/java/org/tukaani/xz/DeltaCoder.java
deleted file mode 100644
index 808834c..0000000
--- a/app/src/main/java/org/tukaani/xz/DeltaCoder.java
+++ /dev/null
@@ -1,26 +0,0 @@
-/*
- * DeltaCoder
- *
- * Author: Lasse Collin DISTANCE_MIN
,
- * DISTANCE_MAX
]
- */
- public DeltaInputStream(InputStream in, int distance) {
- // Check for null because otherwise null isn't detect
- // in this constructor.
- if (in == null)
- throw new NullPointerException();
-
- this.in = in;
- this.delta = new DeltaDecoder(distance);
- }
-
- /**
- * Decode the next byte from this input stream.
- *
- * @return the next decoded byte, or -1
to indicate
- * the end of input on the input stream in
- *
- * @throws IOException may be thrown by in
- */
- public int read() throws IOException {
- return read(tempBuf, 0, 1) == -1 ? -1 : (tempBuf[0] & 0xFF);
- }
-
- /**
- * Decode into an array of bytes.
- * in.read(buf, off, len)
and defilters the
- * returned data.
- *
- * @param buf target buffer for decoded data
- * @param off start offset in buf
- * @param len maximum number of bytes to read
- *
- * @return number of bytes read, or -1
to indicate
- * the end of the input stream in
- *
- * @throws XZIOException if the stream has been closed
- *
- * @throws IOException may be thrown by underlaying input
- * stream in
- */
- public int read(byte[] buf, int off, int len) throws IOException {
- if (len == 0)
- return 0;
-
- if (in == null)
- throw new XZIOException("Stream closed");
-
- if (exception != null)
- throw exception;
-
- int size;
- try {
- size = in.read(buf, off, len);
- } catch (IOException e) {
- exception = e;
- throw e;
- }
-
- if (size == -1)
- return -1;
-
- delta.decode(buf, off, size);
- return size;
- }
-
- /**
- * Calls in.available()
.
- *
- * @return the value returned by in.available()
- */
- public int available() throws IOException {
- if (in == null)
- throw new XZIOException("Stream closed");
-
- if (exception != null)
- throw exception;
-
- return in.available();
- }
-
- /**
- * Closes the stream and calls in.close()
.
- * If the stream was already closed, this does nothing.
- *
- * @throws IOException if thrown by in.close()
- */
- public void close() throws IOException {
- if (in != null) {
- try {
- in.close();
- } finally {
- in = null;
- }
- }
- }
-}
diff --git a/app/src/main/java/org/tukaani/xz/DeltaOptions.java b/app/src/main/java/org/tukaani/xz/DeltaOptions.java
deleted file mode 100644
index fac74d9..0000000
--- a/app/src/main/java/org/tukaani/xz/DeltaOptions.java
+++ /dev/null
@@ -1,103 +0,0 @@
-/*
- * DeltaOptions
- *
- * Author: Lasse Collin getEncoderMemoryUsage()
for every filter
- * in the array and returns the sum of the returned values.
- */
- public static int getEncoderMemoryUsage(FilterOptions[] options) {
- int m = 0;
-
- for (int i = 0; i < options.length; ++i)
- m += options[i].getEncoderMemoryUsage();
-
- return m;
- }
-
- /**
- * Gets how much memory the decoder will need with
- * the given filter chain. This function simply calls
- * getDecoderMemoryUsage()
for every filter
- * in the array and returns the sum of the returned values.
- */
- public static int getDecoderMemoryUsage(FilterOptions[] options) {
- int m = 0;
-
- for (int i = 0; i < options.length; ++i)
- m += options[i].getDecoderMemoryUsage();
-
- return m;
- }
-
- /**
- * Gets how much memory the encoder will need with these options.
- */
- public abstract int getEncoderMemoryUsage();
-
- /**
- * Gets a raw (no XZ headers) encoder output stream using these options.
- * Raw streams are an advanced feature. In most cases you want to store
- * the compressed data in the .xz container format instead of using
- * a raw stream. To use this filter in a .xz file, pass this object
- * to XZOutputStream.
- * finish
method of FinishableOutputStream
- * does nothing. Subclasses should override it if they need finishing
- * support, which is the case, for example, with compressors.
- *
- * @throws IOException
- */
- public void finish() throws IOException {}
-}
diff --git a/app/src/main/java/org/tukaani/xz/FinishableWrapperOutputStream.java b/app/src/main/java/org/tukaani/xz/FinishableWrapperOutputStream.java
deleted file mode 100644
index 2e0ac99..0000000
--- a/app/src/main/java/org/tukaani/xz/FinishableWrapperOutputStream.java
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
- * FinishableWrapperOutputStream
- *
- * Author: Lasse Collin finish()
method will do nothing.
- */
- public FinishableWrapperOutputStream(OutputStream out) {
- this.out = out;
- }
-
- /**
- * Calls {@link java.io.OutputStream#write(int) out.write(b)}.
- */
- public void write(int b) throws IOException {
- out.write(b);
- }
-
- /**
- * Calls {@link java.io.OutputStream#write(byte[]) out.write(buf)}.
- */
- public void write(byte[] buf) throws IOException {
- out.write(buf);
- }
-
- /**
- * Calls {@link java.io.OutputStream#write(byte[],int,int)
- out.write(buf, off, len)}.
- */
- public void write(byte[] buf, int off, int len) throws IOException {
- out.write(buf, off, len);
- }
-
- /**
- * Calls {@link java.io.OutputStream#flush() out.flush()}.
- */
- public void flush() throws IOException {
- out.flush();
- }
-
- /**
- * Calls {@link java.io.OutputStream#close() out.close()}.
- */
- public void close() throws IOException {
- out.close();
- }
-}
diff --git a/app/src/main/java/org/tukaani/xz/IA64Options.java b/app/src/main/java/org/tukaani/xz/IA64Options.java
deleted file mode 100644
index 491edcf..0000000
--- a/app/src/main/java/org/tukaani/xz/IA64Options.java
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * IA64Options
- *
- * Author: Lasse Collin DICT_SIZE_MIN
,
- * DICT_SIZE_MAX
]
- *
- * @return approximate memory requirements as kibibytes (KiB)
- */
- public static int getMemoryUsage(int dictSize) {
- // The base state is around 30-40 KiB (probabilities etc.),
- // range decoder needs COMPRESSED_SIZE_MAX bytes for buffering,
- // and LZ decoder needs a dictionary buffer.
- return 40 + COMPRESSED_SIZE_MAX / 1024 + getDictSize(dictSize) / 1024;
- }
-
- private static int getDictSize(int dictSize) {
- if (dictSize < DICT_SIZE_MIN || dictSize > DICT_SIZE_MAX)
- throw new IllegalArgumentException(
- "Unsupported dictionary size " + dictSize);
-
- // Round dictionary size upward to a multiple of 16. This way LZMA
- // can use LZDecoder.getPos() for calculating LZMA's posMask.
- // Note that this check is needed only for raw LZMA2 streams; it is
- // redundant with .xz.
- return (dictSize + 15) & ~15;
- }
-
- /**
- * Creates a new input stream that decompresses raw LZMA2 data
- * from in
.
- * DICT_SIZE_MIN
,
- * DICT_SIZE_MAX
]
- */
- public LZMA2InputStream(InputStream in, int dictSize) {
- this(in, dictSize, null);
- }
-
- /**
- * Creates a new LZMA2 decompressor using a preset dictionary.
- * LZMA2InputStream(InputStream, int)
except
- * that the dictionary may be initialized using a preset dictionary.
- * If a preset dictionary was used when compressing the data, the
- * same preset dictionary must be provided when decompressing.
- *
- * @param in input stream from which LZMA2-compressed
- * data is read
- *
- * @param dictSize LZMA2 dictionary size as bytes, must be
- * in the range [DICT_SIZE_MIN
,
- * DICT_SIZE_MAX
]
- *
- * @param presetDict preset dictionary or null
- * to use no preset dictionary
- */
- public LZMA2InputStream(InputStream in, int dictSize, byte[] presetDict) {
- this(in, dictSize, presetDict, ArrayCache.getDefaultCache());
- }
-
- /**
- * Creates a new LZMA2 decompressor using a preset dictionary
- * and array cache.
- * LZMA2InputStream(InputStream, int, byte[])
- * except that this also takes the arrayCache
argument.
- *
- * @param in input stream from which LZMA2-compressed
- * data is read
- *
- * @param dictSize LZMA2 dictionary size as bytes, must be
- * in the range [DICT_SIZE_MIN
,
- * DICT_SIZE_MAX
]
- *
- * @param presetDict preset dictionary or null
- * to use no preset dictionary
- *
- * @param arrayCache cache to be used for allocating large arrays
- *
- * @since 1.7
- */
- LZMA2InputStream(InputStream in, int dictSize, byte[] presetDict,
- ArrayCache arrayCache) {
- // Check for null because otherwise null isn't detect
- // in this constructor.
- if (in == null)
- throw new NullPointerException();
-
- this.arrayCache = arrayCache;
- this.in = new DataInputStream(in);
- this.rc = new RangeDecoderFromBuffer(COMPRESSED_SIZE_MAX, arrayCache);
- this.lz = new LZDecoder(getDictSize(dictSize), presetDict, arrayCache);
-
- if (presetDict != null && presetDict.length > 0)
- needDictReset = false;
- }
-
- /**
- * Decompresses the next byte from this input stream.
- * read()
from this input stream
- * may be inefficient. Wrap it in java.io.BufferedInputStream
- * if you need to read lots of data one byte at a time.
- *
- * @return the next decompressed byte, or -1
- * to indicate the end of the compressed stream
- *
- * @throws CorruptedInputException
- *
- * @throws XZIOException if the stream has been closed
- *
- * @throws EOFException
- * compressed input is truncated or corrupt
- *
- * @throws IOException may be thrown by in
- */
- public int read() throws IOException {
- return read(tempBuf, 0, 1) == -1 ? -1 : (tempBuf[0] & 0xFF);
- }
-
- /**
- * Decompresses into an array of bytes.
- * len
is zero, no bytes are read and 0
- * is returned. Otherwise this will block until len
- * bytes have been decompressed, the end of the LZMA2 stream is reached,
- * or an exception is thrown.
- *
- * @param buf target buffer for uncompressed data
- * @param off start offset in buf
- * @param len maximum number of uncompressed bytes to read
- *
- * @return number of bytes read, or -1
to indicate
- * the end of the compressed stream
- *
- * @throws CorruptedInputException
- *
- * @throws XZIOException if the stream has been closed
- *
- * @throws EOFException
- * compressed input is truncated or corrupt
- *
- * @throws IOException may be thrown by in
- */
- public int read(byte[] buf, int off, int len) throws IOException {
- if (off < 0 || len < 0 || off + len < 0 || off + len > buf.length)
- throw new IndexOutOfBoundsException();
-
- if (len == 0)
- return 0;
-
- if (in == null)
- throw new XZIOException("Stream closed");
-
- if (exception != null)
- throw exception;
-
- if (endReached)
- return -1;
-
- try {
- int size = 0;
-
- while (len > 0) {
- if (uncompressedSize == 0) {
- decodeChunkHeader();
- if (endReached)
- return size == 0 ? -1 : size;
- }
-
- int copySizeMax = Math.min(uncompressedSize, len);
-
- if (!isLZMAChunk) {
- lz.copyUncompressed(in, copySizeMax);
- } else {
- lz.setLimit(copySizeMax);
- lzma.decode();
- }
-
- int copiedSize = lz.flush(buf, off);
- off += copiedSize;
- len -= copiedSize;
- size += copiedSize;
- uncompressedSize -= copiedSize;
-
- if (uncompressedSize == 0)
- if (!rc.isFinished() || lz.hasPending())
- throw new CorruptedInputException();
- }
-
- return size;
-
- } catch (IOException e) {
- exception = e;
- throw e;
- }
- }
-
- private void decodeChunkHeader() throws IOException {
- int control = in.readUnsignedByte();
-
- if (control == 0x00) {
- endReached = true;
- putArraysToCache();
- return;
- }
-
- if (control >= 0xE0 || control == 0x01) {
- needProps = true;
- needDictReset = false;
- lz.reset();
- } else if (needDictReset) {
- throw new CorruptedInputException();
- }
-
- if (control >= 0x80) {
- isLZMAChunk = true;
-
- uncompressedSize = (control & 0x1F) << 16;
- uncompressedSize += in.readUnsignedShort() + 1;
-
- int compressedSize = in.readUnsignedShort() + 1;
-
- if (control >= 0xC0) {
- needProps = false;
- decodeProps();
-
- } else if (needProps) {
- throw new CorruptedInputException();
-
- } else if (control >= 0xA0) {
- lzma.reset();
- }
-
- rc.prepareInputBuffer(in, compressedSize);
-
- } else if (control > 0x02) {
- throw new CorruptedInputException();
-
- } else {
- isLZMAChunk = false;
- uncompressedSize = in.readUnsignedShort() + 1;
- }
- }
-
- private void decodeProps() throws IOException {
- int props = in.readUnsignedByte();
-
- if (props > (4 * 5 + 4) * 9 + 8)
- throw new CorruptedInputException();
-
- int pb = props / (9 * 5);
- props -= pb * 9 * 5;
- int lp = props / 9;
- int lc = props - lp * 9;
-
- if (lc + lp > 4)
- throw new CorruptedInputException();
-
- lzma = new LZMADecoder(lz, rc, lc, lp, pb);
- }
-
- /**
- * Returns the number of uncompressed bytes that can be read
- * without blocking. The value is returned with an assumption
- * that the compressed input data will be valid. If the compressed
- * data is corrupt, CorruptedInputException
may get
- * thrown before the number of bytes claimed to be available have
- * been read from this input stream.
- * available()
method of the underlying InputStream.
- *
- * @return the number of uncompressed bytes that can be read
- * without blocking
- */
- public int available() throws IOException {
- if (in == null)
- throw new XZIOException("Stream closed");
-
- if (exception != null)
- throw exception;
-
- return isLZMAChunk ? uncompressedSize
- : Math.min(uncompressedSize, in.available());
- }
-
- private void putArraysToCache() {
- if (lz != null) {
- lz.putArraysToCache(arrayCache);
- lz = null;
-
- rc.putArraysToCache(arrayCache);
- rc = null;
- }
- }
-
- /**
- * Closes the stream and calls in.close()
.
- * If the stream was already closed, this does nothing.
- *
- * @throws IOException if thrown by in.close()
- */
- public void close() throws IOException {
- if (in != null) {
- putArraysToCache();
-
- try {
- in.close();
- } finally {
- in = null;
- }
- }
- }
-}
diff --git a/app/src/main/java/org/tukaani/xz/LZMA2Options.java b/app/src/main/java/org/tukaani/xz/LZMA2Options.java
deleted file mode 100644
index 21e186e..0000000
--- a/app/src/main/java/org/tukaani/xz/LZMA2Options.java
+++ /dev/null
@@ -1,583 +0,0 @@
-/*
- * LZMA2Options
- *
- * Author: Lasse Collin LZMA2Options()
or
- * LZMA2Options(int)
.
- */
-public class LZMA2Options extends FilterOptions {
- /**
- * Minimum valid compression preset level is 0.
- */
- public static final int PRESET_MIN = 0;
-
- /**
- * Maximum valid compression preset level is 9.
- */
- public static final int PRESET_MAX = 9;
-
- /**
- * Default compression preset level is 6.
- */
- public static final int PRESET_DEFAULT = 6;
-
- /**
- * Minimum dictionary size is 4 KiB.
- */
- public static final int DICT_SIZE_MIN = 4096;
-
- /**
- * Maximum dictionary size for compression is 768 MiB.
- * niceLen
is 8.
- */
- public static final int NICE_LEN_MIN = 8;
-
- /**
- * Maximum value for niceLen
is 273.
- */
- public static final int NICE_LEN_MAX = 273;
-
- /**
- * Match finder: Hash Chain 2-3-4
- */
- public static final int MF_HC4 = LZEncoder.MF_HC4;
-
- /**
- * Match finder: Binary tree 2-3-4
- */
- public static final int MF_BT4 = LZEncoder.MF_BT4;
-
- private static final int[] presetToDictSize = {
- 1 << 18, 1 << 20, 1 << 21, 1 << 22, 1 << 22,
- 1 << 23, 1 << 23, 1 << 24, 1 << 25, 1 << 26 };
-
- private static final int[] presetToDepthLimit = { 4, 8, 24, 48 };
-
- private int dictSize;
- private byte[] presetDict = null;
- private int lc;
- private int lp;
- private int pb;
- private int mode;
- private int niceLen;
- private int mf;
- private int depthLimit;
-
- /**
- * Creates new LZMA2 options and sets them to the default values.
- * This is equivalent to LZMA2Options(PRESET_DEFAULT)
.
- */
- public LZMA2Options() {
- try {
- setPreset(PRESET_DEFAULT);
- } catch (UnsupportedOptionsException e) {
- assert false;
- throw new RuntimeException();
- }
- }
-
- /**
- * Creates new LZMA2 options and sets them to the given preset.
- *
- * @throws UnsupportedOptionsException
- * preset
is not supported
- */
- public LZMA2Options(int preset) throws UnsupportedOptionsException {
- setPreset(preset);
- }
-
- /**
- * Creates new LZMA2 options and sets them to the given custom values.
- *
- * @throws UnsupportedOptionsException
- * unsupported options were specified
- */
- public LZMA2Options(int dictSize, int lc, int lp, int pb, int mode,
- int niceLen, int mf, int depthLimit)
- throws UnsupportedOptionsException {
- setDictSize(dictSize);
- setLcLp(lc, lp);
- setPb(pb);
- setMode(mode);
- setNiceLen(niceLen);
- setMatchFinder(mf);
- setDepthLimit(depthLimit);
- }
-
- /**
- * Sets the compression options to the given preset.
- * PRESET_DEFAULT
) is 6.
- * preset
is not supported
- */
- public void setPreset(int preset) throws UnsupportedOptionsException {
- if (preset < 0 || preset > 9)
- throw new UnsupportedOptionsException(
- "Unsupported preset: " + preset);
-
- lc = LC_DEFAULT;
- lp = LP_DEFAULT;
- pb = PB_DEFAULT;
- dictSize = presetToDictSize[preset];
-
- if (preset <= 3) {
- mode = MODE_FAST;
- mf = MF_HC4;
- niceLen = preset <= 1 ? 128 : NICE_LEN_MAX;
- depthLimit = presetToDepthLimit[preset];
- } else {
- mode = MODE_NORMAL;
- mf = MF_BT4;
- niceLen = (preset == 4) ? 16 : (preset == 5) ? 32 : 64;
- depthLimit = 0;
- }
- }
-
- /**
- * Sets the dictionary size in bytes.
- * dictSize
is not supported
- */
- public void setDictSize(int dictSize) throws UnsupportedOptionsException {
- if (dictSize < DICT_SIZE_MIN)
- throw new UnsupportedOptionsException(
- "LZMA2 dictionary size must be at least 4 KiB: "
- + dictSize + " B");
-
- if (dictSize > DICT_SIZE_MAX)
- throw new UnsupportedOptionsException(
- "LZMA2 dictionary size must not exceed "
- + (DICT_SIZE_MAX >> 20) + " MiB: " + dictSize + " B");
-
- this.dictSize = dictSize;
- }
-
- /**
- * Gets the dictionary size in bytes.
- */
- public int getDictSize() {
- return dictSize;
- }
-
- /**
- * Sets a preset dictionary. Use null to disable the use of
- * a preset dictionary. By default there is no preset dictionary.
- * lc
and lp
is limited to 4.
- * Trying to exceed it will throw an exception. This function lets
- * you change both at the same time.
- *
- * @throws UnsupportedOptionsException
- * lc
and lp
- * are invalid
- */
- public void setLcLp(int lc, int lp) throws UnsupportedOptionsException {
- if (lc < 0 || lp < 0 || lc > LC_LP_MAX || lp > LC_LP_MAX
- || lc + lp > LC_LP_MAX)
- throw new UnsupportedOptionsException(
- "lc + lp must not exceed " + LC_LP_MAX + ": "
- + lc + " + " + lp);
-
- this.lc = lc;
- this.lp = lp;
- }
-
- /**
- * Sets the number of literal context bits.
- * lc
- * bits of the previous uncompressed byte correlate with the next byte.
- * For example, in typical English text, an upper-case letter is often
- * followed by a lower-case letter, and a lower-case letter is usually
- * followed by another lower-case letter. In the US-ASCII character set,
- * the highest three bits are 010 for upper-case letters and 011 for
- * lower-case letters. When lc
is at least 3, the literal
- * coding can take advantage of this property in the uncompressed data.
- * setLc(4)
. Sometimes it helps a little, and sometimes it
- * makes compression worse. If it makes it worse, test for example
- * setLc(2)
too.
- *
- * @throws UnsupportedOptionsException
- * lc
is invalid, or the sum
- * of lc
and lp
- * exceed LC_LP_MAX
- */
- public void setLc(int lc) throws UnsupportedOptionsException {
- setLcLp(lc, lp);
- }
-
- /**
- * Sets the number of literal position bits.
- * lp
is invalid, or the sum
- * of lc
and lp
- * exceed LC_LP_MAX
- */
- public void setLp(int lp) throws UnsupportedOptionsException {
- setLcLp(lc, lp);
- }
-
- /**
- * Gets the number of literal context bits.
- */
- public int getLc() {
- return lc;
- }
-
- /**
- * Gets the number of literal position bits.
- */
- public int getLp() {
- return lp;
- }
-
- /**
- * Sets the number of position bits.
- * pb
= 2^2 = 4), which is often a good choice when
- * there's no better guess.
- * setPb(0)
can improve compression slightly. For UTF-16
- * text, setPb(1)
is a good choice. If the alignment is
- * an odd number like 3 bytes, setPb(0)
might be the best
- * choice.
- * setPb
and setLp
, LZMA2 still slightly favors
- * 16-byte alignment. It might be worth taking into account when designing
- * file formats that are likely to be often compressed with LZMA2.
- *
- * @throws UnsupportedOptionsException
- * pb
is invalid
- */
- public void setPb(int pb) throws UnsupportedOptionsException {
- if (pb < 0 || pb > PB_MAX)
- throw new UnsupportedOptionsException(
- "pb must not exceed " + PB_MAX + ": " + pb);
-
- this.pb = pb;
- }
-
- /**
- * Gets the number of position bits.
- */
- public int getPb() {
- return pb;
- }
-
- /**
- * Sets the compression mode.
- * MODE_FAST
for presets
- * 0-3 and MODE_NORMAL
for presets 4-9.
- * MODE_FAST
is used with Hash Chain match finders
- * and MODE_NORMAL
with Binary Tree match finders. This is
- * also what the presets do.
- * MODE_UNCOMPRESSED
doesn't try to
- * compress the data at all (and doesn't use a match finder) and will
- * simply wrap it in uncompressed LZMA2 chunks.
- *
- * @throws UnsupportedOptionsException
- * mode
is not supported
- */
- public void setMode(int mode) throws UnsupportedOptionsException {
- if (mode < MODE_UNCOMPRESSED || mode > MODE_NORMAL)
- throw new UnsupportedOptionsException(
- "Unsupported compression mode: " + mode);
-
- this.mode = mode;
- }
-
- /**
- * Gets the compression mode.
- */
- public int getMode() {
- return mode;
- }
-
- /**
- * Sets the nice length of matches.
- * Once a match of at least niceLen
bytes is found,
- * the algorithm stops looking for better matches. Higher values tend
- * to give better compression at the expense of speed. The default
- * depends on the preset.
- *
- * @throws UnsupportedOptionsException
- * niceLen
is invalid
- */
- public void setNiceLen(int niceLen) throws UnsupportedOptionsException {
- if (niceLen < NICE_LEN_MIN)
- throw new UnsupportedOptionsException(
- "Minimum nice length of matches is "
- + NICE_LEN_MIN + " bytes: " + niceLen);
-
- if (niceLen > NICE_LEN_MAX)
- throw new UnsupportedOptionsException(
- "Maximum nice length of matches is " + NICE_LEN_MAX
- + ": " + niceLen);
-
- this.niceLen = niceLen;
- }
-
- /**
- * Gets the nice length of matches.
- */
- public int getNiceLen() {
- return niceLen;
- }
-
- /**
- * Sets the match finder type.
- * MF_HC4
and 4-9 use MF_BT4
.
- *
- * @throws UnsupportedOptionsException
- * mf
is not supported
- */
- public void setMatchFinder(int mf) throws UnsupportedOptionsException {
- if (mf != MF_HC4 && mf != MF_BT4)
- throw new UnsupportedOptionsException(
- "Unsupported match finder: " + mf);
-
- this.mf = mf;
- }
-
- /**
- * Gets the match finder type.
- */
- public int getMatchFinder() {
- return mf;
- }
-
- /**
- * Sets the match finder search depth limit.
- * 0
which indicates that
- * the depth limit should be automatically calculated by the selected
- * match finder from the nice length of matches.
- * depthLimit
is invalid
- */
- public void setDepthLimit(int depthLimit)
- throws UnsupportedOptionsException {
- if (depthLimit < 0)
- throw new UnsupportedOptionsException(
- "Depth limit cannot be negative: " + depthLimit);
-
- this.depthLimit = depthLimit;
- }
-
- /**
- * Gets the match finder search depth limit.
- */
- public int getDepthLimit() {
- return depthLimit;
- }
-
- public int getEncoderMemoryUsage() {
- return (mode == MODE_UNCOMPRESSED)
- ? UncompressedLZMA2OutputStream.getMemoryUsage()
- : LZMA2OutputStream.getMemoryUsage(this);
- }
-
- public FinishableOutputStream getOutputStream(FinishableOutputStream out,
- ArrayCache arrayCache) {
- if (mode == MODE_UNCOMPRESSED)
- return new UncompressedLZMA2OutputStream(out, arrayCache);
-
- return new LZMA2OutputStream(out, this, arrayCache);
- }
-
- /**
- * Gets how much memory the LZMA2 decoder will need to decompress the data
- * that was encoded with these options and stored in a .xz file.
- * BufferedInputStream
, the performance tends
- * to be worse (maybe 10-20 % slower) than with {@link LZMA2InputStream}
- * or {@link XZInputStream} (when the .xz file contains LZMA2-compressed data).
- *
- * @since 1.4
- */
-public class LZMAInputStream extends InputStream {
- /**
- * Largest dictionary size supported by this implementation.
- * 0
,
- * DICT_SIZE_MAX
]
- *
- * @param propsByte LZMA properties byte that encodes the values
- * of lc, lp, and pb
- *
- * @return approximate memory requirements as kibibytes (KiB)
- *
- * @throws UnsupportedOptionsException
- * if dictSize
is outside
- * the range [0
,
- * DICT_SIZE_MAX
]
- *
- * @throws CorruptedInputException
- * if propsByte
is invalid
- */
- public static int getMemoryUsage(int dictSize, byte propsByte)
- throws UnsupportedOptionsException, CorruptedInputException {
- if (dictSize < 0 || dictSize > DICT_SIZE_MAX)
- throw new UnsupportedOptionsException(
- "LZMA dictionary is too big for this implementation");
-
- int props = propsByte & 0xFF;
- if (props > (4 * 5 + 4) * 9 + 8)
- throw new CorruptedInputException("Invalid LZMA properties byte");
-
- props %= 9 * 5;
- int lp = props / 9;
- int lc = props - lp * 9;
-
- return getMemoryUsage(dictSize, lc, lp);
- }
-
- /**
- * Gets approximate decompressor memory requirements as kibibytes for
- * the given dictionary size, lc, and lp. Note that pb isn't needed.
- *
- * @param dictSize LZMA dictionary size as bytes, must be
- * in the range [0
,
- * DICT_SIZE_MAX
]
- *
- * @param lc number of literal context bits, must be
- * in the range [0, 8]
- *
- * @param lp number of literal position bits, must be
- * in the range [0, 4]
- *
- * @return approximate memory requirements as kibibytes (KiB)
- */
- public static int getMemoryUsage(int dictSize, int lc, int lp) {
- if (lc < 0 || lc > 8 || lp < 0 || lp > 4)
- throw new IllegalArgumentException("Invalid lc or lp");
-
- // Probability variables have the type "short". There are
- // 0x300 (768) probability variables in each literal subcoder.
- // The number of literal subcoders is 2^(lc + lp).
- //
- // Roughly 10 KiB for the base state + LZ decoder's dictionary buffer
- // + sizeof(short) * number probability variables per literal subcoder
- // * number of literal subcoders
- return 10 + getDictSize(dictSize) / 1024
- + ((2 * 0x300) << (lc + lp)) / 1024;
- }
-
- private static int getDictSize(int dictSize) {
- if (dictSize < 0 || dictSize > DICT_SIZE_MAX)
- throw new IllegalArgumentException(
- "LZMA dictionary is too big for this implementation");
-
- // For performance reasons, use a 4 KiB dictionary if something
- // smaller was requested. It's a rare situation and the performance
- // difference isn't huge, and it starts to matter mostly when the
- // dictionary is just a few bytes. But we need to handle the special
- // case of dictSize == 0 anyway, which is an allowed value but in
- // practice means one-byte dictionary.
- //
- // Note that using a dictionary bigger than specified in the headers
- // can hide errors if there is a reference to data beyond the original
- // dictionary size but is still within 4 KiB.
- if (dictSize < 4096)
- dictSize = 4096;
-
- // Round dictionary size upward to a multiple of 16. This way LZMA
- // can use LZDecoder.getPos() for calculating LZMA's posMask.
- return (dictSize + 15) & ~15;
- }
-
- /**
- * Creates a new .lzma file format decompressor without
- * a memory usage limit.
- *
- * @param in input stream from which .lzma data is read;
- * it might be a good idea to wrap it in
- * BufferedInputStream
, see the
- * note at the top of this page
- *
- * @throws CorruptedInputException
- * file is corrupt or perhaps not in
- * the .lzma format at all
- *
- * @throws UnsupportedOptionsException
- * dictionary size or uncompressed size is too
- * big for this implementation
- *
- * @throws EOFException
- * file is truncated or perhaps not in
- * the .lzma format at all
- *
- * @throws IOException may be thrown by in
- */
- public LZMAInputStream(InputStream in) throws IOException {
- this(in, -1);
- }
-
- /**
- * Creates a new .lzma file format decompressor without
- * a memory usage limit.
- * LZMAInputStream(InputStream)
- * except that this also takes the arrayCache
argument.
- *
- * @param in input stream from which .lzma data is read;
- * it might be a good idea to wrap it in
- * BufferedInputStream
, see the
- * note at the top of this page
- *
- *
- * @param arrayCache cache to be used for allocating large arrays
- *
- * @throws CorruptedInputException
- * file is corrupt or perhaps not in
- * the .lzma format at all
- *
- * @throws UnsupportedOptionsException
- * dictionary size or uncompressed size is too
- * big for this implementation
- *
- * @throws EOFException
- * file is truncated or perhaps not in
- * the .lzma format at all
- *
- * @throws IOException may be thrown by in
- *
- * @since 1.7
- */
- public LZMAInputStream(InputStream in, ArrayCache arrayCache)
- throws IOException {
- this(in, -1, arrayCache);
- }
-
- /**
- * Creates a new .lzma file format decompressor with an optional
- * memory usage limit.
- *
- * @param in input stream from which .lzma data is read;
- * it might be a good idea to wrap it in
- * BufferedInputStream
, see the
- * note at the top of this page
- *
- * @param memoryLimit memory usage limit in kibibytes (KiB)
- * or -1
to impose no
- * memory usage limit
- *
- * @throws CorruptedInputException
- * file is corrupt or perhaps not in
- * the .lzma format at all
- *
- * @throws UnsupportedOptionsException
- * dictionary size or uncompressed size is too
- * big for this implementation
- *
- * @throws MemoryLimitException
- * memory usage limit was exceeded
- *
- * @throws EOFException
- * file is truncated or perhaps not in
- * the .lzma format at all
- *
- * @throws IOException may be thrown by in
- */
- public LZMAInputStream(InputStream in, int memoryLimit)
- throws IOException {
- this(in, memoryLimit, ArrayCache.getDefaultCache());
- }
-
- /**
- * Creates a new .lzma file format decompressor with an optional
- * memory usage limit.
- * LZMAInputStream(InputStream, int)
- * except that this also takes the arrayCache
argument.
- *
- * @param in input stream from which .lzma data is read;
- * it might be a good idea to wrap it in
- * BufferedInputStream
, see the
- * note at the top of this page
- *
- * @param memoryLimit memory usage limit in kibibytes (KiB)
- * or -1
to impose no
- * memory usage limit
- *
- * @param arrayCache cache to be used for allocating large arrays
- *
- * @throws CorruptedInputException
- * file is corrupt or perhaps not in
- * the .lzma format at all
- *
- * @throws UnsupportedOptionsException
- * dictionary size or uncompressed size is too
- * big for this implementation
- *
- * @throws MemoryLimitException
- * memory usage limit was exceeded
- *
- * @throws EOFException
- * file is truncated or perhaps not in
- * the .lzma format at all
- *
- * @throws IOException may be thrown by in
- *
- * @since 1.7
- */
- public LZMAInputStream(InputStream in, int memoryLimit,
- ArrayCache arrayCache) throws IOException {
- DataInputStream inData = new DataInputStream(in);
-
- // Properties byte (lc, lp, and pb)
- byte propsByte = inData.readByte();
-
- // Dictionary size is an unsigned 32-bit little endian integer.
- int dictSize = 0;
- for (int i = 0; i < 4; ++i)
- dictSize |= inData.readUnsignedByte() << (8 * i);
-
- // Uncompressed size is an unsigned 64-bit little endian integer.
- // The maximum 64-bit value is a special case (becomes -1 here)
- // which indicates that the end marker is used instead of knowing
- // the uncompressed size beforehand.
- long uncompSize = 0;
- for (int i = 0; i < 8; ++i)
- uncompSize |= (long)inData.readUnsignedByte() << (8 * i);
-
- // Check the memory usage limit.
- int memoryNeeded = getMemoryUsage(dictSize, propsByte);
- if (memoryLimit != -1 && memoryNeeded > memoryLimit)
- throw new MemoryLimitException(memoryNeeded, memoryLimit);
-
- initialize(in, uncompSize, propsByte, dictSize, null, arrayCache);
- }
-
- /**
- * Creates a new input stream that decompresses raw LZMA data (no .lzma
- * header) from in
.
- * 0
, DICT_SIZE_MAX
]
- *
- * @throws CorruptedInputException
- * if propsByte
is invalid or
- * the first input byte is not 0x00
- *
- * @throws UnsupportedOptionsException
- * dictionary size or uncompressed size is too
- * big for this implementation
- *
- *
- */
- public LZMAInputStream(InputStream in, long uncompSize, byte propsByte,
- int dictSize) throws IOException {
- initialize(in, uncompSize, propsByte, dictSize, null,
- ArrayCache.getDefaultCache());
- }
-
- /**
- * Creates a new input stream that decompresses raw LZMA data (no .lzma
- * header) from in
optionally with a preset dictionary.
- *
- * @param in input stream from which LZMA-compressed
- * data is read
- *
- * @param uncompSize uncompressed size of the LZMA stream or -1
- * if the end marker is used in the LZMA stream
- *
- * @param propsByte LZMA properties byte that has the encoded
- * values for literal context bits (lc), literal
- * position bits (lp), and position bits (pb)
- *
- * @param dictSize dictionary size as bytes, must be in the range
- * [0
, DICT_SIZE_MAX
]
- *
- * @param presetDict preset dictionary or null
- * to use no preset dictionary
- *
- * @throws CorruptedInputException
- * if propsByte
is invalid or
- * the first input byte is not 0x00
- *
- * @throws UnsupportedOptionsException
- * dictionary size or uncompressed size is too
- * big for this implementation
- *
- * @throws EOFException file is truncated or corrupt
- *
- * @throws IOException may be thrown by in
- */
- public LZMAInputStream(InputStream in, long uncompSize, byte propsByte,
- int dictSize, byte[] presetDict)
- throws IOException {
- initialize(in, uncompSize, propsByte, dictSize, presetDict,
- ArrayCache.getDefaultCache());
- }
-
- /**
- * Creates a new input stream that decompresses raw LZMA data (no .lzma
- * header) from in
optionally with a preset dictionary.
- * LZMAInputStream(InputStream, long, byte, int,
- * byte[])
except that this also takes the arrayCache
- * argument.
- *
- * @param in input stream from which LZMA-compressed
- * data is read
- *
- * @param uncompSize uncompressed size of the LZMA stream or -1
- * if the end marker is used in the LZMA stream
- *
- * @param propsByte LZMA properties byte that has the encoded
- * values for literal context bits (lc), literal
- * position bits (lp), and position bits (pb)
- *
- * @param dictSize dictionary size as bytes, must be in the range
- * [0
, DICT_SIZE_MAX
]
- *
- * @param presetDict preset dictionary or null
- * to use no preset dictionary
- *
- * @param arrayCache cache to be used for allocating large arrays
- *
- * @throws CorruptedInputException
- * if propsByte
is invalid or
- * the first input byte is not 0x00
- *
- * @throws UnsupportedOptionsException
- * dictionary size or uncompressed size is too
- * big for this implementation
- *
- * @throws EOFException file is truncated or corrupt
- *
- * @throws IOException may be thrown by in
- *
- * @since 1.7
- */
- public LZMAInputStream(InputStream in, long uncompSize, byte propsByte,
- int dictSize, byte[] presetDict,
- ArrayCache arrayCache)
- throws IOException {
- initialize(in, uncompSize, propsByte, dictSize, presetDict,
- arrayCache);
- }
-
- /**
- * Creates a new input stream that decompresses raw LZMA data (no .lzma
- * header) from in
optionally with a preset dictionary.
- *
- * @param in input stream from which LZMA-compressed
- * data is read
- *
- * @param uncompSize uncompressed size of the LZMA stream or -1
- * if the end marker is used in the LZMA stream
- *
- * @param lc number of literal context bits, must be
- * in the range [0, 8]
- *
- * @param lp number of literal position bits, must be
- * in the range [0, 4]
- *
- * @param pb number position bits, must be
- * in the range [0, 4]
- *
- * @param dictSize dictionary size as bytes, must be in the range
- * [0
, DICT_SIZE_MAX
]
- *
- * @param presetDict preset dictionary or null
- * to use no preset dictionary
- *
- * @throws CorruptedInputException
- * if the first input byte is not 0x00
- *
- * @throws EOFException file is truncated or corrupt
- *
- * @throws IOException may be thrown by in
- */
- public LZMAInputStream(InputStream in, long uncompSize,
- int lc, int lp, int pb,
- int dictSize, byte[] presetDict)
- throws IOException {
- initialize(in, uncompSize, lc, lp, pb, dictSize, presetDict,
- ArrayCache.getDefaultCache());
- }
-
- /**
- * Creates a new input stream that decompresses raw LZMA data (no .lzma
- * header) from in
optionally with a preset dictionary.
- * LZMAInputStream(InputStream, long, int, int,
- * int, int, byte[])
except that this also takes the
- * arrayCache
argument.
- *
- * @param in input stream from which LZMA-compressed
- * data is read
- *
- * @param uncompSize uncompressed size of the LZMA stream or -1
- * if the end marker is used in the LZMA stream
- *
- * @param lc number of literal context bits, must be
- * in the range [0, 8]
- *
- * @param lp number of literal position bits, must be
- * in the range [0, 4]
- *
- * @param pb number position bits, must be
- * in the range [0, 4]
- *
- * @param dictSize dictionary size as bytes, must be in the range
- * [0
, DICT_SIZE_MAX
]
- *
- * @param presetDict preset dictionary or null
- * to use no preset dictionary
- *
- * @param arrayCache cache to be used for allocating large arrays
- *
- * @throws CorruptedInputException
- * if the first input byte is not 0x00
- *
- * @throws EOFException file is truncated or corrupt
- *
- * @throws IOException may be thrown by in
- *
- * @since 1.7
- */
- public LZMAInputStream(InputStream in, long uncompSize,
- int lc, int lp, int pb,
- int dictSize, byte[] presetDict,
- ArrayCache arrayCache)
- throws IOException {
- initialize(in, uncompSize, lc, lp, pb, dictSize, presetDict,
- arrayCache);
- }
-
- private void initialize(InputStream in, long uncompSize, byte propsByte,
- int dictSize, byte[] presetDict,
- ArrayCache arrayCache)
- throws IOException {
- // Validate the uncompressed size since the other "initialize" throws
- // IllegalArgumentException if uncompSize < -1.
- if (uncompSize < -1)
- throw new UnsupportedOptionsException(
- "Uncompressed size is too big");
-
- // Decode the properties byte. In contrast to LZMA2, there is no
- // limit of lc + lp <= 4.
- int props = propsByte & 0xFF;
- if (props > (4 * 5 + 4) * 9 + 8)
- throw new CorruptedInputException("Invalid LZMA properties byte");
-
- int pb = props / (9 * 5);
- props -= pb * 9 * 5;
- int lp = props / 9;
- int lc = props - lp * 9;
-
- // Validate the dictionary size since the other "initialize" throws
- // IllegalArgumentException if dictSize is not supported.
- if (dictSize < 0 || dictSize > DICT_SIZE_MAX)
- throw new UnsupportedOptionsException(
- "LZMA dictionary is too big for this implementation");
-
- initialize(in, uncompSize, lc, lp, pb, dictSize, presetDict,
- arrayCache);
- }
-
- private void initialize(InputStream in, long uncompSize,
- int lc, int lp, int pb,
- int dictSize, byte[] presetDict,
- ArrayCache arrayCache)
- throws IOException {
- // getDictSize validates dictSize and gives a message in
- // the exception too, so skip validating dictSize here.
- if (uncompSize < -1 || lc < 0 || lc > 8 || lp < 0 || lp > 4
- || pb < 0 || pb > 4)
- throw new IllegalArgumentException();
-
- this.in = in;
- this.arrayCache = arrayCache;
-
- // If uncompressed size is known, use it to avoid wasting memory for
- // a uselessly large dictionary buffer.
- dictSize = getDictSize(dictSize);
- if (uncompSize >= 0 && dictSize > uncompSize)
- dictSize = getDictSize((int)uncompSize);
-
- lz = new LZDecoder(getDictSize(dictSize), presetDict, arrayCache);
- rc = new RangeDecoderFromStream(in);
- lzma = new LZMADecoder(lz, rc, lc, lp, pb);
-
- remainingSize = uncompSize;
- }
-
- /**
- * Enables relaxed end-of-stream condition when uncompressed size is known.
- * This is useful if uncompressed size is known but it is unknown if
- * the end of stream (EOS) marker is present. After calling this function,
- * both are allowed.
- *
- *
- * read
has returned -1
the
- * input position might not be at the end of the stream (too little
- * input may have been read).read()
from this input stream
- * may be inefficient. Wrap it in java.io.BufferedInputStream
- * if you need to read lots of data one byte at a time.
- *
- * @return the next decompressed byte, or -1
- * to indicate the end of the compressed stream
- *
- * @throws CorruptedInputException
- *
- * @throws XZIOException if the stream has been closed
- *
- * @throws EOFException
- * compressed input is truncated or corrupt
- *
- * @throws IOException may be thrown by in
- */
- public int read() throws IOException {
- return read(tempBuf, 0, 1) == -1 ? -1 : (tempBuf[0] & 0xFF);
- }
-
- /**
- * Decompresses into an array of bytes.
- * len
is zero, no bytes are read and 0
- * is returned. Otherwise this will block until len
- * bytes have been decompressed, the end of the LZMA stream is reached,
- * or an exception is thrown.
- *
- * @param buf target buffer for uncompressed data
- * @param off start offset in buf
- * @param len maximum number of uncompressed bytes to read
- *
- * @return number of bytes read, or -1
to indicate
- * the end of the compressed stream
- *
- * @throws CorruptedInputException
- *
- * @throws XZIOException if the stream has been closed
- *
- * @throws EOFException compressed input is truncated or corrupt
- *
- * @throws IOException may be thrown by in
- */
- public int read(byte[] buf, int off, int len) throws IOException {
- if (off < 0 || len < 0 || off + len < 0 || off + len > buf.length)
- throw new IndexOutOfBoundsException();
-
- if (len == 0)
- return 0;
-
- if (in == null)
- throw new XZIOException("Stream closed");
-
- if (exception != null)
- throw exception;
-
- if (endReached)
- return -1;
-
- try {
- int size = 0;
-
- while (len > 0) {
- // If uncompressed size is known and thus no end marker will
- // be present, set the limit so that the uncompressed size
- // won't be exceeded.
- int copySizeMax = len;
- if (remainingSize >= 0 && remainingSize < len)
- copySizeMax = (int)remainingSize;
-
- lz.setLimit(copySizeMax);
-
- // Decode into the dictionary buffer.
- try {
- lzma.decode();
- } catch (CorruptedInputException e) {
- // The end marker is encoded with a LZMA symbol that
- // indicates maximum match distance. This is larger
- // than any supported dictionary and thus causes
- // CorruptedInputException from LZDecoder.repeat.
- if (remainingSize != -1 || !lzma.endMarkerDetected())
- throw e;
-
- endReached = true;
-
- // The exception makes lzma.decode() miss the last range
- // decoder normalization, so do it here. This might
- // cause an IOException if it needs to read a byte
- // from the input stream.
- rc.normalize();
- }
-
- // Copy from the dictionary to buf.
- int copiedSize = lz.flush(buf, off);
- off += copiedSize;
- len -= copiedSize;
- size += copiedSize;
-
- if (remainingSize >= 0) {
- // Update the number of bytes left to be decompressed.
- remainingSize -= copiedSize;
- assert remainingSize >= 0;
-
- if (remainingSize == 0)
- endReached = true;
- }
-
- if (endReached) {
- // Checking these helps a lot when catching corrupt
- // or truncated .lzma files. LZMA Utils doesn't do
- // the second check and thus it accepts many invalid
- // files that this implementation and XZ Utils don't.
- if (lz.hasPending() || (!relaxedEndCondition
- && !rc.isFinished()))
- throw new CorruptedInputException();
-
- putArraysToCache();
- return size == 0 ? -1 : size;
- }
- }
-
- return size;
-
- } catch (IOException e) {
- exception = e;
- throw e;
- }
- }
-
- private void putArraysToCache() {
- if (lz != null) {
- lz.putArraysToCache(arrayCache);
- lz = null;
- }
- }
-
- /**
- * Closes the stream and calls in.close()
.
- * If the stream was already closed, this does nothing.
- *
- * @throws IOException if thrown by in.close()
- */
- public void close() throws IOException {
- if (in != null) {
- putArraysToCache();
-
- try {
- in.close();
- } finally {
- in = null;
- }
- }
- }
-}
diff --git a/app/src/main/java/org/tukaani/xz/LZMAOutputStream.java b/app/src/main/java/org/tukaani/xz/LZMAOutputStream.java
deleted file mode 100644
index 3a1b7b1..0000000
--- a/app/src/main/java/org/tukaani/xz/LZMAOutputStream.java
+++ /dev/null
@@ -1,331 +0,0 @@
-/*
- * LZMAOutputStream
- *
- * Authors: Lasse Collin -1
when unknown
- *
- * @throws IOException may be thrown from out
- */
- public LZMAOutputStream(OutputStream out, LZMA2Options options,
- long inputSize)
- throws IOException {
- this(out, options, inputSize, ArrayCache.getDefaultCache());
- }
-
- /**
- * Creates a new compressor for the legacy .lzma file format.
- * LZMAOutputStream(OutputStream, LZMA2Options, long)
- * except that this also takes the arrayCache
argument.
- *
- * @param out output stream to which the compressed data
- * will be written
- *
- * @param options LZMA compression options; the same class
- * is used here as is for LZMA2
- *
- * @param inputSize uncompressed size of the data to be compressed;
- * use -1
when unknown
- *
- * @param arrayCache cache to be used for allocating large arrays
- *
- * @throws IOException may be thrown from out
- *
- * @since 1.7
- */
- public LZMAOutputStream(OutputStream out, LZMA2Options options,
- long inputSize, ArrayCache arrayCache)
- throws IOException {
- this(out, options, true, inputSize == -1, inputSize, arrayCache);
- }
-
- /**
- * Creates a new compressor for raw LZMA (also known as LZMA1) stream.
- * out
- */
- public LZMAOutputStream(OutputStream out, LZMA2Options options,
- boolean useEndMarker) throws IOException {
- this(out, options, useEndMarker, ArrayCache.getDefaultCache());
- }
-
- /**
- * Creates a new compressor for raw LZMA (also known as LZMA1) stream.
- * LZMAOutputStream(OutputStream, LZMA2Options, boolean)
- * except that this also takes the arrayCache
argument.
- *
- * @param out output stream to which the compressed data
- * will be written
- *
- * @param options LZMA compression options; the same class
- * is used here as is for LZMA2
- *
- * @param useEndMarker
- * if end of stream marker should be written
- *
- * @param arrayCache cache to be used for allocating large arrays
- *
- * @throws IOException may be thrown from out
- *
- * @since 1.7
- */
- public LZMAOutputStream(OutputStream out, LZMA2Options options,
- boolean useEndMarker, ArrayCache arrayCache)
- throws IOException {
- this(out, options, false, useEndMarker, -1, arrayCache);
- }
-
- /**
- * Returns the LZMA lc/lp/pb properties encoded into a single byte.
- * This might be useful when handling file formats other than .lzma
- * that use the same encoding for the LZMA properties as .lzma does.
- */
- public int getProps() {
- return props;
- }
-
- /**
- * Gets the amount of uncompressed data written to the stream.
- * This is useful when creating raw LZMA streams without
- * the end of stream marker.
- */
- public long getUncompressedSize() {
- return currentUncompressedSize;
- }
-
- public void write(int b) throws IOException {
- tempBuf[0] = (byte)b;
- write(tempBuf, 0, 1);
- }
-
- public void write(byte[] buf, int off, int len) throws IOException {
- if (off < 0 || len < 0 || off + len < 0 || off + len > buf.length)
- throw new IndexOutOfBoundsException();
-
- if (exception != null)
- throw exception;
-
- if (finished)
- throw new XZIOException("Stream finished or closed");
-
- if (expectedUncompressedSize != -1
- && expectedUncompressedSize - currentUncompressedSize < len)
- throw new XZIOException("Expected uncompressed input size ("
- + expectedUncompressedSize + " bytes) was exceeded");
-
- currentUncompressedSize += len;
-
- try {
- while (len > 0) {
- int used = lz.fillWindow(buf, off, len);
- off += used;
- len -= used;
- lzma.encodeForLZMA1();
- }
- } catch (IOException e) {
- exception = e;
- throw e;
- }
- }
-
- /**
- * Flushing isn't supported and will throw XZIOException.
- */
- public void flush() throws IOException {
- throw new XZIOException("LZMAOutputStream does not support flushing");
- }
-
- /**
- * Finishes the stream without closing the underlying OutputStream.
- */
- public void finish() throws IOException {
- if (!finished) {
- if (exception != null)
- throw exception;
-
- try {
- if (expectedUncompressedSize != -1
- && expectedUncompressedSize != currentUncompressedSize)
- throw new XZIOException("Expected uncompressed size ("
- + expectedUncompressedSize + ") doesn't equal "
- + "the number of bytes written to the stream ("
- + currentUncompressedSize + ")");
-
- lz.setFinishing();
- lzma.encodeForLZMA1();
-
- if (useEndMarker)
- lzma.encodeLZMA1EndMarker();
-
- rc.finish();
- } catch (IOException e) {
- exception = e;
- throw e;
- }
-
- finished = true;
-
- lzma.putArraysToCache(arrayCache);
- lzma = null;
- lz = null;
- }
- }
-
- /**
- * Finishes the stream and closes the underlying OutputStream.
- */
- public void close() throws IOException {
- if (out != null) {
- try {
- finish();
- } catch (IOException e) {}
-
- try {
- out.close();
- } catch (IOException e) {
- if (exception == null)
- exception = e;
- }
-
- out = null;
- }
-
- if (exception != null)
- throw exception;
- }
-}
diff --git a/app/src/main/java/org/tukaani/xz/MemoryLimitException.java b/app/src/main/java/org/tukaani/xz/MemoryLimitException.java
deleted file mode 100644
index 9d766bd..0000000
--- a/app/src/main/java/org/tukaani/xz/MemoryLimitException.java
+++ /dev/null
@@ -1,60 +0,0 @@
-/*
- * MemoryLimitException
- *
- * Author: Lasse Collin RandomAccessFile
object.
- */
- public SeekableFileInputStream(RandomAccessFile randomAccessFile) {
- this.randomAccessFile = randomAccessFile;
- }
-
- /**
- * Calls {@link RandomAccessFile#read() randomAccessFile.read()}.
- */
- public int read() throws IOException {
- return randomAccessFile.read();
- }
-
- /**
- * Calls {@link RandomAccessFile#read(byte[]) randomAccessFile.read(buf)}.
- */
- public int read(byte[] buf) throws IOException {
- return randomAccessFile.read(buf);
- }
-
- /**
- * Calls
- * {@link RandomAccessFile#read(byte[],int,int)
- * randomAccessFile.read(buf, off, len)}.
- */
- public int read(byte[] buf, int off, int len) throws IOException {
- return randomAccessFile.read(buf, off, len);
- }
-
- /**
- * Calls {@link RandomAccessFile#close() randomAccessFile.close()}.
- */
- public void close() throws IOException {
- randomAccessFile.close();
- }
-
- /**
- * Calls {@link RandomAccessFile#length() randomAccessFile.length()}.
- */
- public long length() throws IOException {
- return randomAccessFile.length();
- }
-
- /**
- * Calls {@link RandomAccessFile#getFilePointer()
- randomAccessFile.getFilePointer()}.
- */
- public long position() throws IOException {
- return randomAccessFile.getFilePointer();
- }
-
- /**
- * Calls {@link RandomAccessFile#seek(long) randomAccessFile.seek(long)}.
- */
- public void seek(long pos) throws IOException {
- randomAccessFile.seek(pos);
- }
-}
diff --git a/app/src/main/java/org/tukaani/xz/SeekableInputStream.java b/app/src/main/java/org/tukaani/xz/SeekableInputStream.java
deleted file mode 100644
index a2f908a..0000000
--- a/app/src/main/java/org/tukaani/xz/SeekableInputStream.java
+++ /dev/null
@@ -1,81 +0,0 @@
-/*
- * SeekableInputStream
- *
- * Author: Lasse Collin n
bytes forward in this stream.
- * 0
. Otherwise, if skipping n
bytes
- * would cause the position to exceed the stream size, this will do
- * equivalent of seek(length())
and the return value will
- * be adjusted accordingly.
- * n
is negative, the position isn't changed and
- * the return value is 0
. It doesn't seek backward
- * because it would conflict with the specification of
- * {@link java.io.InputStream#skip(long) InputStream.skip}.
- *
- * @return 0
if n
is negative,
- * less than n
if skipping n
- * bytes would seek past the end of the file,
- * n
otherwise
- *
- * @throws IOException might be thrown by {@link #seek(long)}
- */
- public long skip(long n) throws IOException {
- if (n <= 0)
- return 0;
-
- long size = length();
- long pos = position();
- if (pos >= size)
- return 0;
-
- if (size - pos < n)
- n = size - pos;
-
- seek(pos + n);
- return n;
- }
-
- /**
- * Gets the size of the stream.
- */
- public abstract long length() throws IOException;
-
- /**
- * Gets the current position in the stream.
- */
- public abstract long position() throws IOException;
-
- /**
- * Seeks to the specified absolute position in the stream.
- * read
will return
- * -1
to indicate end of stream.
- *
- * @param pos new read position in the stream
- *
- * @throws IOException if pos
is negative or if
- * a stream-specific I/O error occurs
- */
- public abstract void seek(long pos) throws IOException;
-}
diff --git a/app/src/main/java/org/tukaani/xz/SeekableXZInputStream.java b/app/src/main/java/org/tukaani/xz/SeekableXZInputStream.java
deleted file mode 100644
index 74da2e1..0000000
--- a/app/src/main/java/org/tukaani/xz/SeekableXZInputStream.java
+++ /dev/null
@@ -1,1167 +0,0 @@
-/*
- * SeekableXZInputStream
- *
- * Author: Lasse Collin Memory usage
- * Creating random-accessible .xz files
- * LZMA_FULL_FLUSH
. XZ Utils 5.1.1alpha added threaded
- * compression which creates multi-Block .xz files. XZ Utils 5.1.1alpha
- * also added the option --block-size=SIZE
to the xz command
- * line tool. XZ Utils 5.1.2alpha added a partial implementation of
- * --block-list=SIZES
which allows specifying sizes of
- * individual Blocks.
- *
- * Example: getting the uncompressed size of a .xz file
- *
- *
- * @see SeekableFileInputStream
- * @see XZInputStream
- * @see XZOutputStream
- */
-public class SeekableXZInputStream extends SeekableInputStream {
- /**
- * Cache for big arrays.
- */
- private final ArrayCache arrayCache;
-
- /**
- * The input stream containing XZ compressed data.
- */
- private SeekableInputStream in;
-
- /**
- * Memory usage limit after the memory usage of the IndexDecoders have
- * been substracted.
- */
- private final int memoryLimit;
-
- /**
- * Memory usage of the IndexDecoders.
- *
- * String filename = "foo.xz";
- * SeekableFileInputStream seekableFile
- * = new SeekableFileInputStream(filename);
- *
- * try {
- * SeekableXZInputStream seekableXZ
- * = new SeekableXZInputStream(seekableFile);
- * System.out.println("Uncompressed size: " + seekableXZ.length());
- * } finally {
- * seekableFile.close();
- * }
- *
memoryLimit + indexMemoryUsage
equals the original
- * memory usage limit that was passed to the constructor.
- */
- private int indexMemoryUsage = 0;
-
- /**
- * List of IndexDecoders, one for each Stream in the file.
- * The list is in reverse order: The first element is
- * the last Stream in the file.
- */
- private final ArrayList-1
.
- */
- private final BlockInfo curBlockInfo;
-
- /**
- * Temporary (and cached) information about the Block whose information
- * is queried via getBlockPos
and related functions.
- */
- private final BlockInfo queriedBlockInfo;
-
- /**
- * Integrity Check in the current XZ Stream. The constructor leaves
- * this to point to the Check of the first Stream.
- */
- private Check check;
-
- /**
- * Flag indicating if the integrity checks will be verified.
- */
- private final boolean verifyCheck;
-
- /**
- * Decoder of the current XZ Block, if any.
- */
- private BlockInputStream blockDecoder = null;
-
- /**
- * Current uncompressed position.
- */
- private long curPos = 0;
-
- /**
- * Target position for seeking.
- */
- private long seekPos;
-
- /**
- * True when seek(long)
has been called but the actual
- * seeking hasn't been done yet.
- */
- private boolean seekNeeded = false;
-
- /**
- * True when end of the file was reached. This can be cleared by
- * calling seek(long)
.
- */
- private boolean endReached = false;
-
- /**
- * Pending exception from an earlier error.
- */
- private IOException exception = null;
-
- /**
- * Temporary buffer for read(). This avoids reallocating memory
- * on every read() call.
- */
- private final byte[] tempBuf = new byte[1];
-
- /**
- * Creates a new seekable XZ decompressor without a memory usage limit.
- *
- * @param in seekable input stream containing one or more
- * XZ Streams; the whole input stream is used
- *
- * @throws XZFormatException
- * input is not in the XZ format
- *
- * @throws CorruptedInputException
- * XZ data is corrupt or truncated
- *
- * @throws UnsupportedOptionsException
- * XZ headers seem valid but they specify
- * options not supported by this implementation
- *
- * @throws EOFException
- * less than 6 bytes of input was available
- * from in
, or (unlikely) the size
- * of the underlying stream got smaller while
- * this was reading from it
- *
- * @throws IOException may be thrown by in
- */
- public SeekableXZInputStream(SeekableInputStream in)
- throws IOException {
- this(in, -1);
- }
-
- /**
- * Creates a new seekable XZ decompressor without a memory usage limit.
- * SeekableXZInputStream(SeekableInputStream)
except that
- * this also takes the arrayCache
argument.
- *
- * @param in seekable input stream containing one or more
- * XZ Streams; the whole input stream is used
- *
- * @param arrayCache cache to be used for allocating large arrays
- *
- * @throws XZFormatException
- * input is not in the XZ format
- *
- * @throws CorruptedInputException
- * XZ data is corrupt or truncated
- *
- * @throws UnsupportedOptionsException
- * XZ headers seem valid but they specify
- * options not supported by this implementation
- *
- * @throws EOFException
- * less than 6 bytes of input was available
- * from in
, or (unlikely) the size
- * of the underlying stream got smaller while
- * this was reading from it
- *
- * @throws IOException may be thrown by in
- *
- * @since 1.7
- */
- public SeekableXZInputStream(SeekableInputStream in, ArrayCache arrayCache)
- throws IOException {
- this(in, -1, arrayCache);
- }
-
- /**
- * Creates a new seekable XZ decomporessor with an optional
- * memory usage limit.
- *
- * @param in seekable input stream containing one or more
- * XZ Streams; the whole input stream is used
- *
- * @param memoryLimit memory usage limit in kibibytes (KiB)
- * or -1
to impose no
- * memory usage limit
- *
- * @throws XZFormatException
- * input is not in the XZ format
- *
- * @throws CorruptedInputException
- * XZ data is corrupt or truncated
- *
- * @throws UnsupportedOptionsException
- * XZ headers seem valid but they specify
- * options not supported by this implementation
- *
- * @throws MemoryLimitException
- * decoded XZ Indexes would need more memory
- * than allowed by the memory usage limit
- *
- * @throws EOFException
- * less than 6 bytes of input was available
- * from in
, or (unlikely) the size
- * of the underlying stream got smaller while
- * this was reading from it
- *
- * @throws IOException may be thrown by in
- */
- public SeekableXZInputStream(SeekableInputStream in, int memoryLimit)
- throws IOException {
- this(in, memoryLimit, true);
- }
-
- /**
- * Creates a new seekable XZ decomporessor with an optional
- * memory usage limit.
- * SeekableXZInputStream(SeekableInputStream,int)
- * except that this also takes the arrayCache
argument.
- *
- * @param in seekable input stream containing one or more
- * XZ Streams; the whole input stream is used
- *
- * @param memoryLimit memory usage limit in kibibytes (KiB)
- * or -1
to impose no
- * memory usage limit
- *
- * @param arrayCache cache to be used for allocating large arrays
- *
- * @throws XZFormatException
- * input is not in the XZ format
- *
- * @throws CorruptedInputException
- * XZ data is corrupt or truncated
- *
- * @throws UnsupportedOptionsException
- * XZ headers seem valid but they specify
- * options not supported by this implementation
- *
- * @throws MemoryLimitException
- * decoded XZ Indexes would need more memory
- * than allowed by the memory usage limit
- *
- * @throws EOFException
- * less than 6 bytes of input was available
- * from in
, or (unlikely) the size
- * of the underlying stream got smaller while
- * this was reading from it
- *
- * @throws IOException may be thrown by in
- *
- * @since 1.7
- */
- public SeekableXZInputStream(SeekableInputStream in, int memoryLimit,
- ArrayCache arrayCache)
- throws IOException {
- this(in, memoryLimit, true, arrayCache);
- }
-
- /**
- * Creates a new seekable XZ decomporessor with an optional
- * memory usage limit and ability to disable verification
- * of integrity checks.
- *
- *
- * verifyCheck
only affects the integrity check of
- * the actual compressed data. The CRC32 fields in the headers
- * are always verified.
- *
- * @param in seekable input stream containing one or more
- * XZ Streams; the whole input stream is used
- *
- * @param memoryLimit memory usage limit in kibibytes (KiB)
- * or -1
to impose no
- * memory usage limit
- *
- * @param verifyCheck if true
, the integrity checks
- * will be verified; this should almost never
- * be set to false
- *
- * @throws XZFormatException
- * input is not in the XZ format
- *
- * @throws CorruptedInputException
- * XZ data is corrupt or truncated
- *
- * @throws UnsupportedOptionsException
- * XZ headers seem valid but they specify
- * options not supported by this implementation
- *
- * @throws MemoryLimitException
- * decoded XZ Indexes would need more memory
- * than allowed by the memory usage limit
- *
- * @throws EOFException
- * less than 6 bytes of input was available
- * from in
, or (unlikely) the size
- * of the underlying stream got smaller while
- * this was reading from it
- *
- * @throws IOException may be thrown by in
- *
- * @since 1.6
- */
- public SeekableXZInputStream(SeekableInputStream in, int memoryLimit,
- boolean verifyCheck)
- throws IOException {
- this(in, memoryLimit, verifyCheck, ArrayCache.getDefaultCache());
- }
-
- /**
- * Creates a new seekable XZ decomporessor with an optional
- * memory usage limit and ability to disable verification
- * of integrity checks.
- * SeekableXZInputStream(SeekableInputStream,int,boolean)
- * except that this also takes the arrayCache
argument.
- *
- * @param in seekable input stream containing one or more
- * XZ Streams; the whole input stream is used
- *
- * @param memoryLimit memory usage limit in kibibytes (KiB)
- * or -1
to impose no
- * memory usage limit
- *
- * @param verifyCheck if true
, the integrity checks
- * will be verified; this should almost never
- * be set to false
- *
- * @param arrayCache cache to be used for allocating large arrays
- *
- * @throws XZFormatException
- * input is not in the XZ format
- *
- * @throws CorruptedInputException
- * XZ data is corrupt or truncated
- *
- * @throws UnsupportedOptionsException
- * XZ headers seem valid but they specify
- * options not supported by this implementation
- *
- * @throws MemoryLimitException
- * decoded XZ Indexes would need more memory
- * than allowed by the memory usage limit
- *
- * @throws EOFException
- * less than 6 bytes of input was available
- * from in
, or (unlikely) the size
- * of the underlying stream got smaller while
- * this was reading from it
- *
- * @throws IOException may be thrown by in
- *
- * @since 1.7
- */
- public SeekableXZInputStream(SeekableInputStream in, int memoryLimit,
- boolean verifyCheck, ArrayCache arrayCache)
- throws IOException {
- this.arrayCache = arrayCache;
- this.verifyCheck = verifyCheck;
- this.in = in;
- DataInputStream inData = new DataInputStream(in);
-
- // Check the magic bytes in the beginning of the file.
- {
- in.seek(0);
- byte[] buf = new byte[XZ.HEADER_MAGIC.length];
- inData.readFully(buf);
- if (!Arrays.equals(buf, XZ.HEADER_MAGIC))
- throw new XZFormatException();
- }
-
- // Get the file size and verify that it is a multiple of 4 bytes.
- long pos = in.length();
- if ((pos & 3) != 0)
- throw new CorruptedInputException(
- "XZ file size is not a multiple of 4 bytes");
-
- // Parse the headers starting from the end of the file.
- byte[] buf = new byte[DecoderUtil.STREAM_HEADER_SIZE];
- long streamPadding = 0;
-
- while (pos > 0) {
- if (pos < DecoderUtil.STREAM_HEADER_SIZE)
- throw new CorruptedInputException();
-
- // Read the potential Stream Footer.
- in.seek(pos - DecoderUtil.STREAM_HEADER_SIZE);
- inData.readFully(buf);
-
- // Skip Stream Padding four bytes at a time.
- // Skipping more at once would be faster,
- // but usually there isn't much Stream Padding.
- if (buf[8] == 0x00 && buf[9] == 0x00 && buf[10] == 0x00
- && buf[11] == 0x00) {
- streamPadding += 4;
- pos -= 4;
- continue;
- }
-
- // It's not Stream Padding. Update pos.
- pos -= DecoderUtil.STREAM_HEADER_SIZE;
-
- // Decode the Stream Footer and check if Backward Size
- // looks reasonable.
- StreamFlags streamFooter = DecoderUtil.decodeStreamFooter(buf);
- if (streamFooter.backwardSize >= pos)
- throw new CorruptedInputException(
- "Backward Size in XZ Stream Footer is too big");
-
- // Check that the Check ID is supported. Store it in case this
- // is the first Stream in the file.
- check = Check.getInstance(streamFooter.checkType);
-
- // Remember which Check IDs have been seen.
- checkTypes |= 1 << streamFooter.checkType;
-
- // Seek to the beginning of the Index.
- in.seek(pos - streamFooter.backwardSize);
-
- // Decode the Index field.
- IndexDecoder index;
- try {
- index = new IndexDecoder(in, streamFooter, streamPadding,
- memoryLimit);
- } catch (MemoryLimitException e) {
- // IndexDecoder doesn't know how much memory we had
- // already needed so we need to recreate the exception.
- assert memoryLimit >= 0;
- throw new MemoryLimitException(
- e.getMemoryNeeded() + indexMemoryUsage,
- memoryLimit + indexMemoryUsage);
- }
-
- // Update the memory usage and limit counters.
- indexMemoryUsage += index.getMemoryUsage();
- if (memoryLimit >= 0) {
- memoryLimit -= index.getMemoryUsage();
- assert memoryLimit >= 0;
- }
-
- // Remember the uncompressed size of the largest Block.
- if (largestBlockSize < index.getLargestBlockSize())
- largestBlockSize = index.getLargestBlockSize();
-
- // Calculate the offset to the beginning of this XZ Stream and
- // check that it looks sane.
- long off = index.getStreamSize() - DecoderUtil.STREAM_HEADER_SIZE;
- if (pos < off)
- throw new CorruptedInputException("XZ Index indicates "
- + "too big compressed size for the XZ Stream");
-
- // Seek to the beginning of this Stream.
- pos -= off;
- in.seek(pos);
-
- // Decode the Stream Header.
- inData.readFully(buf);
- StreamFlags streamHeader = DecoderUtil.decodeStreamHeader(buf);
-
- // Verify that the Stream Header matches the Stream Footer.
- if (!DecoderUtil.areStreamFlagsEqual(streamHeader, streamFooter))
- throw new CorruptedInputException(
- "XZ Stream Footer does not match Stream Header");
-
- // Update the total uncompressed size of the file and check that
- // it doesn't overflow.
- uncompressedSize += index.getUncompressedSize();
- if (uncompressedSize < 0)
- throw new UnsupportedOptionsException("XZ file is too big");
-
- // Update the Block count and check that it fits into an int.
- blockCount += index.getRecordCount();
- if (blockCount < 0)
- throw new UnsupportedOptionsException(
- "XZ file has over " + Integer.MAX_VALUE + " Blocks");
-
- // Add this Stream to the list of Streams.
- streams.add(index);
-
- // Reset to be ready to parse the next Stream.
- streamPadding = 0;
- }
-
- assert pos == 0;
-
- // Save it now that indexMemoryUsage has been substracted from it.
- this.memoryLimit = memoryLimit;
-
- // Store the relative offsets of the Streams. This way we don't
- // need to recalculate them in this class when seeking; the
- // IndexDecoder instances will handle them.
- IndexDecoder prev = streams.get(streams.size() - 1);
- for (int i = streams.size() - 2; i >= 0; --i) {
- IndexDecoder cur = streams.get(i);
- cur.setOffsets(prev);
- prev = cur;
- }
-
- // Initialize curBlockInfo to point to the first Stream.
- // The blockNumber will be left to -1 so that .hasNext()
- // and .setNext() work to get the first Block when starting
- // to decompress from the beginning of the file.
- IndexDecoder first = streams.get(streams.size() - 1);
- curBlockInfo = new BlockInfo(first);
-
- // queriedBlockInfo needs to be allocated too. The Stream used for
- // initialization doesn't matter though.
- queriedBlockInfo = new BlockInfo(first);
- }
-
- /**
- * Gets the types of integrity checks used in the .xz file.
- * Multiple checks are possible only if there are multiple
- * concatenated XZ Streams.
- * (1 << XZ.CHECK_CRC64)
- * | (1 << XZ.CHECK_SHA256)
.
- */
- public int getCheckTypes() {
- return checkTypes;
- }
-
- /**
- * Gets the amount of memory in kibibytes (KiB) used by
- * the data structures needed to locate the XZ Blocks.
- * This is usually useless information but since it is calculated
- * for memory usage limit anyway, it is nice to make it available to too.
- */
- public int getIndexMemoryUsage() {
- return indexMemoryUsage;
- }
-
- /**
- * Gets the uncompressed size of the largest XZ Block in bytes.
- * This can be useful if you want to check that the file doesn't
- * have huge XZ Blocks which could make seeking to arbitrary offsets
- * very slow. Note that huge Blocks don't automatically mean that
- * seeking would be slow, for example, seeking to the beginning of
- * any Block is always fast.
- */
- public long getLargestBlockSize() {
- return largestBlockSize;
- }
-
- /**
- * Gets the number of Streams in the .xz file.
- *
- * @since 1.3
- */
- public int getStreamCount() {
- return streams.size();
- }
-
- /**
- * Gets the number of Blocks in the .xz file.
- *
- * @since 1.3
- */
- public int getBlockCount() {
- return blockCount;
- }
-
- /**
- * Gets the uncompressed start position of the given Block.
- *
- * @throws IndexOutOfBoundsException if
- * blockNumber < 0
or
- * blockNumber >= getBlockCount()
.
- *
- * @since 1.3
- */
- public long getBlockPos(int blockNumber) {
- locateBlockByNumber(queriedBlockInfo, blockNumber);
- return queriedBlockInfo.uncompressedOffset;
- }
-
- /**
- * Gets the uncompressed size of the given Block.
- *
- * @throws IndexOutOfBoundsException if
- * blockNumber < 0
or
- * blockNumber >= getBlockCount()
.
- *
- * @since 1.3
- */
- public long getBlockSize(int blockNumber) {
- locateBlockByNumber(queriedBlockInfo, blockNumber);
- return queriedBlockInfo.uncompressedSize;
- }
-
- /**
- * Gets the position where the given compressed Block starts in
- * the underlying .xz file.
- * This information is rarely useful to the users of this class.
- *
- * @throws IndexOutOfBoundsException if
- * blockNumber < 0
or
- * blockNumber >= getBlockCount()
.
- *
- * @since 1.3
- */
- public long getBlockCompPos(int blockNumber) {
- locateBlockByNumber(queriedBlockInfo, blockNumber);
- return queriedBlockInfo.compressedOffset;
- }
-
- /**
- * Gets the compressed size of the given Block.
- * This together with the uncompressed size can be used to calculate
- * the compression ratio of the specific Block.
- *
- * @throws IndexOutOfBoundsException if
- * blockNumber < 0
or
- * blockNumber >= getBlockCount()
.
- *
- * @since 1.3
- */
- public long getBlockCompSize(int blockNumber) {
- locateBlockByNumber(queriedBlockInfo, blockNumber);
- return (queriedBlockInfo.unpaddedSize + 3) & ~3;
- }
-
- /**
- * Gets integrity check type (Check ID) of the given Block.
- *
- * @throws IndexOutOfBoundsException if
- * blockNumber < 0
or
- * blockNumber >= getBlockCount()
.
- *
- * @see #getCheckTypes()
- *
- * @since 1.3
- */
- public int getBlockCheckType(int blockNumber) {
- locateBlockByNumber(queriedBlockInfo, blockNumber);
- return queriedBlockInfo.getCheckType();
- }
-
- /**
- * Gets the number of the Block that contains the byte at the given
- * uncompressed position.
- *
- * @throws IndexOutOfBoundsException if
- * pos < 0
or
- * pos >= length()
.
- *
- * @since 1.3
- */
- public int getBlockNumber(long pos) {
- locateBlockByPos(queriedBlockInfo, pos);
- return queriedBlockInfo.blockNumber;
- }
-
- /**
- * Decompresses the next byte from this input stream.
- *
- * @return the next decompressed byte, or -1
- * to indicate the end of the compressed stream
- *
- * @throws CorruptedInputException
- * @throws UnsupportedOptionsException
- * @throws MemoryLimitException
- *
- * @throws XZIOException if the stream has been closed
- *
- * @throws IOException may be thrown by in
- */
- public int read() throws IOException {
- return read(tempBuf, 0, 1) == -1 ? -1 : (tempBuf[0] & 0xFF);
- }
-
- /**
- * Decompresses into an array of bytes.
- * len
is zero, no bytes are read and 0
- * is returned. Otherwise this will try to decompress len
- * bytes of uncompressed data. Less than len
bytes may
- * be read only in the following situations:
- *
- *
- *
- * @param buf target buffer for uncompressed data
- * @param off start offset in len
bytes have already been successfully
- * decompressed. The next call with non-zero len
- * will immediately throw the pending exception.buf
- * @param len maximum number of uncompressed bytes to read
- *
- * @return number of bytes read, or -1
to indicate
- * the end of the compressed stream
- *
- * @throws CorruptedInputException
- * @throws UnsupportedOptionsException
- * @throws MemoryLimitException
- *
- * @throws XZIOException if the stream has been closed
- *
- * @throws IOException may be thrown by in
- */
- public int read(byte[] buf, int off, int len) throws IOException {
- if (off < 0 || len < 0 || off + len < 0 || off + len > buf.length)
- throw new IndexOutOfBoundsException();
-
- if (len == 0)
- return 0;
-
- if (in == null)
- throw new XZIOException("Stream closed");
-
- if (exception != null)
- throw exception;
-
- int size = 0;
-
- try {
- if (seekNeeded)
- seek();
-
- if (endReached)
- return -1;
-
- while (len > 0) {
- if (blockDecoder == null) {
- seek();
- if (endReached)
- break;
- }
-
- int ret = blockDecoder.read(buf, off, len);
-
- if (ret > 0) {
- curPos += ret;
- size += ret;
- off += ret;
- len -= ret;
- } else if (ret == -1) {
- blockDecoder = null;
- }
- }
- } catch (IOException e) {
- // We know that the file isn't simply truncated because we could
- // parse the Indexes in the constructor. So convert EOFException
- // to CorruptedInputException.
- if (e instanceof EOFException)
- e = new CorruptedInputException();
-
- exception = e;
- if (size == 0)
- throw e;
- }
-
- return size;
- }
-
- /**
- * Returns the number of uncompressed bytes that can be read
- * without blocking. The value is returned with an assumption
- * that the compressed input data will be valid. If the compressed
- * data is corrupt, CorruptedInputException
may get
- * thrown before the number of bytes claimed to be available have
- * been read from this input stream.
- *
- * @return the number of uncompressed bytes that can be read
- * without blocking
- */
- public int available() throws IOException {
- if (in == null)
- throw new XZIOException("Stream closed");
-
- if (exception != null)
- throw exception;
-
- if (endReached || seekNeeded || blockDecoder == null)
- return 0;
-
- return blockDecoder.available();
- }
-
- /**
- * Closes the stream and calls in.close()
.
- * If the stream was already closed, this does nothing.
- * close(true)
.
- *
- * @throws IOException if thrown by in.close()
- */
- public void close() throws IOException {
- close(true);
- }
-
- /**
- * Closes the stream and optionally calls in.close()
.
- * If the stream was already closed, this does nothing.
- * If close(false)
has been called, a further
- * call of close(true)
does nothing (it doesn't call
- * in.close()
).
- * InputStream
,
- * there is usually no need to worry about closing this stream either;
- * it's fine to do nothing and let the garbage collector handle it.
- * However, if you are using {@link ArrayCache}, close(false)
- * can be useful to put the allocated arrays back to the cache without
- * closing the underlying InputStream
.
- * read
returns -1
), the arrays are
- * automatically put back to the cache by that read
call. In
- * this situation close(false)
is redundant (but harmless).
- *
- * @throws IOException if thrown by in.close()
- *
- * @since 1.7
- */
- public void close(boolean closeInput) throws IOException {
- if (in != null) {
- if (blockDecoder != null) {
- blockDecoder.close();
- blockDecoder = null;
- }
-
- try {
- if (closeInput)
- in.close();
- } finally {
- in = null;
- }
- }
- }
-
- /**
- * Gets the uncompressed size of this input stream. If there are multiple
- * XZ Streams, the total uncompressed size of all XZ Streams is returned.
- */
- public long length() {
- return uncompressedSize;
- }
-
- /**
- * Gets the current uncompressed position in this input stream.
- *
- * @throws XZIOException if the stream has been closed
- */
- public long position() throws IOException {
- if (in == null)
- throw new XZIOException("Stream closed");
-
- return seekNeeded ? seekPos : curPos;
- }
-
- /**
- * Seeks to the specified absolute uncompressed position in the stream.
- * This only stores the new position, so this function itself is always
- * very fast. The actual seek is done when read
is called
- * to read at least one byte.
- * read
will return -1
to indicate
- * the end of the stream.
- *
- * @param pos new uncompressed read position
- *
- * @throws XZIOException
- * if pos
is negative, or
- * if stream has been closed
- */
- public void seek(long pos) throws IOException {
- if (in == null)
- throw new XZIOException("Stream closed");
-
- if (pos < 0)
- throw new XZIOException("Negative seek position: " + pos);
-
- seekPos = pos;
- seekNeeded = true;
- }
-
- /**
- * Seeks to the beginning of the given XZ Block.
- *
- * @throws XZIOException
- * if blockNumber < 0
or
- * blockNumber >= getBlockCount()
,
- * or if stream has been closed
- *
- * @since 1.3
- */
- public void seekToBlock(int blockNumber) throws IOException {
- if (in == null)
- throw new XZIOException("Stream closed");
-
- if (blockNumber < 0 || blockNumber >= blockCount)
- throw new XZIOException("Invalid XZ Block number: " + blockNumber);
-
- // This is a bit silly implementation. Here we locate the uncompressed
- // offset of the specified Block, then when doing the actual seek in
- // seek(), we need to find the Block number based on seekPos.
- seekPos = getBlockPos(blockNumber);
- seekNeeded = true;
- }
-
- /**
- * Does the actual seeking. This is also called when read
- * needs a new Block to decode.
- */
- private void seek() throws IOException {
- // If seek(long) wasn't called, we simply need to get the next Block
- // from the same Stream. If there are no more Blocks in this Stream,
- // then we behave as if seek(long) had been called.
- if (!seekNeeded) {
- if (curBlockInfo.hasNext()) {
- curBlockInfo.setNext();
- initBlockDecoder();
- return;
- }
-
- seekPos = curPos;
- }
-
- seekNeeded = false;
-
- // Check if we are seeking to or past the end of the file.
- if (seekPos >= uncompressedSize) {
- curPos = seekPos;
-
- if (blockDecoder != null) {
- blockDecoder.close();
- blockDecoder = null;
- }
-
- endReached = true;
- return;
- }
-
- endReached = false;
-
- // Locate the Block that contains the uncompressed target position.
- locateBlockByPos(curBlockInfo, seekPos);
-
- // Seek in the underlying stream and create a new Block decoder
- // only if really needed. We can skip it if the current position
- // is already in the correct Block and the target position hasn't
- // been decompressed yet.
- //
- // NOTE: If curPos points to the beginning of this Block, it's
- // because it was left there after decompressing an earlier Block.
- // In that case, decoding of the current Block hasn't been started
- // yet. (Decoding of a Block won't be started until at least one
- // byte will also be read from it.)
- if (!(curPos > curBlockInfo.uncompressedOffset && curPos <= seekPos)) {
- // Seek to the beginning of the Block.
- in.seek(curBlockInfo.compressedOffset);
-
- // Since it is possible that this Block is from a different
- // Stream than the previous Block, initialize a new Check.
- check = Check.getInstance(curBlockInfo.getCheckType());
-
- // Create a new Block decoder.
- initBlockDecoder();
- curPos = curBlockInfo.uncompressedOffset;
- }
-
- // If the target wasn't at a Block boundary, decompress and throw
- // away data to reach the target position.
- if (seekPos > curPos) {
- // NOTE: The "if" below is there just in case. In this situation,
- // blockDecoder.skip will always skip the requested amount
- // or throw an exception.
- long skipAmount = seekPos - curPos;
- if (blockDecoder.skip(skipAmount) != skipAmount)
- throw new CorruptedInputException();
-
- curPos = seekPos;
- }
- }
-
- /**
- * Locates the Block that contains the given uncompressed position.
- */
- private void locateBlockByPos(BlockInfo info, long pos) {
- if (pos < 0 || pos >= uncompressedSize)
- throw new IndexOutOfBoundsException(
- "Invalid uncompressed position: " + pos);
-
- // Locate the Stream that contains the target position.
- IndexDecoder index;
- for (int i = 0; ; ++i) {
- index = streams.get(i);
- if (index.hasUncompressedOffset(pos))
- break;
- }
-
- // Locate the Block from the Stream that contains the target position.
- index.locateBlock(info, pos);
-
- assert (info.compressedOffset & 3) == 0;
- assert info.uncompressedSize > 0;
- assert pos >= info.uncompressedOffset;
- assert pos < info.uncompressedOffset + info.uncompressedSize;
- }
-
- /**
- * Locates the given Block and stores information about it
- * to info
.
- */
- private void locateBlockByNumber(BlockInfo info, int blockNumber) {
- // Validate.
- if (blockNumber < 0 || blockNumber >= blockCount)
- throw new IndexOutOfBoundsException(
- "Invalid XZ Block number: " + blockNumber);
-
- // Skip the search if info already points to the correct Block.
- if (info.blockNumber == blockNumber)
- return;
-
- // Search the Stream that contains the given Block and then
- // search the Block from that Stream.
- for (int i = 0; ; ++i) {
- IndexDecoder index = streams.get(i);
- if (index.hasRecord(blockNumber)) {
- index.setBlockInfo(info, blockNumber);
- return;
- }
- }
- }
-
- /**
- * Initializes a new BlockInputStream. This is a helper function for
- * seek()
.
- */
- private void initBlockDecoder() throws IOException {
- try {
- // Set it to null first so that GC can collect it if memory
- // runs tight when initializing a new BlockInputStream.
- if (blockDecoder != null) {
- blockDecoder.close();
- blockDecoder = null;
- }
-
- blockDecoder = new BlockInputStream(
- in, check, verifyCheck, memoryLimit,
- curBlockInfo.unpaddedSize, curBlockInfo.uncompressedSize,
- arrayCache);
- } catch (MemoryLimitException e) {
- // BlockInputStream doesn't know how much memory we had
- // already needed so we need to recreate the exception.
- assert memoryLimit >= 0;
- throw new MemoryLimitException(
- e.getMemoryNeeded() + indexMemoryUsage,
- memoryLimit + indexMemoryUsage);
- } catch (IndexIndicatorException e) {
- // It cannot be Index so the file must be corrupt.
- throw new CorruptedInputException();
- }
- }
-}
diff --git a/app/src/main/java/org/tukaani/xz/SimpleInputStream.java b/app/src/main/java/org/tukaani/xz/SimpleInputStream.java
deleted file mode 100644
index afd40c7..0000000
--- a/app/src/main/java/org/tukaani/xz/SimpleInputStream.java
+++ /dev/null
@@ -1,138 +0,0 @@
-/*
- * SimpleInputStream
- *
- * Author: Lasse Collin XZInputStream
.
- *
- * When uncompressed size is known beforehand
- * read()
and checking
- * that it returns -1
. This way the decompressor will parse the
- * file footers and verify the integrity checks, giving the caller more
- * confidence that the uncompressed data is valid.
- *
- * @see XZInputStream
- */
-public class SingleXZInputStream extends InputStream {
- private InputStream in;
- private final ArrayCache arrayCache;
- private final int memoryLimit;
- private final StreamFlags streamHeaderFlags;
- private final Check check;
- private final boolean verifyCheck;
- private BlockInputStream blockDecoder = null;
- private final IndexHash indexHash = new IndexHash();
- private boolean endReached = false;
- private IOException exception = null;
-
- private final byte[] tempBuf = new byte[1];
-
- /**
- * Reads the Stream Header into a buffer.
- * This is a helper function for the constructors.
- */
- private static byte[] readStreamHeader(InputStream in) throws IOException {
- byte[] streamHeader = new byte[DecoderUtil.STREAM_HEADER_SIZE];
- new DataInputStream(in).readFully(streamHeader);
- return streamHeader;
- }
-
- /**
- * Creates a new XZ decompressor that decompresses exactly one
- * XZ Stream from in
without a memory usage limit.
- * in
. The header of the first Block is not read
- * until read
is called.
- *
- * @param in input stream from which XZ-compressed
- * data is read
- *
- * @throws XZFormatException
- * input is not in the XZ format
- *
- * @throws CorruptedInputException
- * XZ header CRC32 doesn't match
- *
- * @throws UnsupportedOptionsException
- * XZ header is valid but specifies options
- * not supported by this implementation
- *
- * @throws EOFException
- * less than 12 bytes of input was available
- * from in
- *
- * @throws IOException may be thrown by in
- */
- public SingleXZInputStream(InputStream in) throws IOException {
- this(in, -1);
- }
-
- /**
- * Creates a new XZ decompressor that decompresses exactly one
- * XZ Stream from in
without a memory usage limit.
- * SingleXZInputStream(InputStream)
- * except that this also takes the arrayCache
argument.
- *
- * @param in input stream from which XZ-compressed
- * data is read
- *
- * @param arrayCache cache to be used for allocating large arrays
- *
- * @throws XZFormatException
- * input is not in the XZ format
- *
- * @throws CorruptedInputException
- * XZ header CRC32 doesn't match
- *
- * @throws UnsupportedOptionsException
- * XZ header is valid but specifies options
- * not supported by this implementation
- *
- * @throws EOFException
- * less than 12 bytes of input was available
- * from in
- *
- * @throws IOException may be thrown by in
- *
- * @since 1.7
- */
- public SingleXZInputStream(InputStream in, ArrayCache arrayCache)
- throws IOException {
- this(in, -1, arrayCache);
- }
-
- /**
- * Creates a new XZ decompressor that decompresses exactly one
- * XZ Stream from in
with an optional memory usage limit.
- * SingleXZInputStream(InputStream)
- * except that this also takes the memoryLimit
argument.
- *
- * @param in input stream from which XZ-compressed
- * data is read
- *
- * @param memoryLimit memory usage limit in kibibytes (KiB)
- * or -1
to impose no
- * memory usage limit
- *
- * @throws XZFormatException
- * input is not in the XZ format
- *
- * @throws CorruptedInputException
- * XZ header CRC32 doesn't match
- *
- * @throws UnsupportedOptionsException
- * XZ header is valid but specifies options
- * not supported by this implementation
- *
- * @throws EOFException
- * less than 12 bytes of input was available
- * from in
- *
- * @throws IOException may be thrown by in
- */
- public SingleXZInputStream(InputStream in, int memoryLimit)
- throws IOException {
- this(in, memoryLimit, true);
- }
-
- /**
- * Creates a new XZ decompressor that decompresses exactly one
- * XZ Stream from in
with an optional memory usage limit.
- * SingleXZInputStream(InputStream)
- * except that this also takes the memoryLimit
and
- * arrayCache
arguments.
- *
- * @param in input stream from which XZ-compressed
- * data is read
- *
- * @param memoryLimit memory usage limit in kibibytes (KiB)
- * or -1
to impose no
- * memory usage limit
- *
- * @param arrayCache cache to be used for allocating large arrays
- *
- * @throws XZFormatException
- * input is not in the XZ format
- *
- * @throws CorruptedInputException
- * XZ header CRC32 doesn't match
- *
- * @throws UnsupportedOptionsException
- * XZ header is valid but specifies options
- * not supported by this implementation
- *
- * @throws EOFException
- * less than 12 bytes of input was available
- * from in
- *
- * @throws IOException may be thrown by in
- *
- * @since 1.7
- */
- public SingleXZInputStream(InputStream in, int memoryLimit,
- ArrayCache arrayCache) throws IOException {
- this(in, memoryLimit, true, arrayCache);
- }
-
- /**
- * Creates a new XZ decompressor that decompresses exactly one
- * XZ Stream from in
with an optional memory usage limit
- * and ability to disable verification of integrity checks.
- * SingleXZInputStream(InputStream,int)
- * except that this also takes the verifyCheck
argument.
- *
- *
- * verifyCheck
only affects the integrity check of
- * the actual compressed data. The CRC32 fields in the headers
- * are always verified.
- *
- * @param in input stream from which XZ-compressed
- * data is read
- *
- * @param memoryLimit memory usage limit in kibibytes (KiB)
- * or -1
to impose no
- * memory usage limit
- *
- * @param verifyCheck if true
, the integrity checks
- * will be verified; this should almost never
- * be set to false
- *
- * @throws XZFormatException
- * input is not in the XZ format
- *
- * @throws CorruptedInputException
- * XZ header CRC32 doesn't match
- *
- * @throws UnsupportedOptionsException
- * XZ header is valid but specifies options
- * not supported by this implementation
- *
- * @throws EOFException
- * less than 12 bytes of input was available
- * from in
- *
- * @throws IOException may be thrown by in
- *
- * @since 1.6
- */
- public SingleXZInputStream(InputStream in, int memoryLimit,
- boolean verifyCheck) throws IOException {
- this(in, memoryLimit, verifyCheck, ArrayCache.getDefaultCache());
- }
-
- /**
- * Creates a new XZ decompressor that decompresses exactly one
- * XZ Stream from in
with an optional memory usage limit
- * and ability to disable verification of integrity checks.
- * SingleXZInputStream(InputStream,int,boolean)
- * except that this also takes the arrayCache
argument.
- *
- * @param in input stream from which XZ-compressed
- * data is read
- *
- * @param memoryLimit memory usage limit in kibibytes (KiB)
- * or -1
to impose no
- * memory usage limit
- *
- * @param verifyCheck if true
, the integrity checks
- * will be verified; this should almost never
- * be set to false
- *
- * @param arrayCache cache to be used for allocating large arrays
- *
- * @throws XZFormatException
- * input is not in the XZ format
- *
- * @throws CorruptedInputException
- * XZ header CRC32 doesn't match
- *
- * @throws UnsupportedOptionsException
- * XZ header is valid but specifies options
- * not supported by this implementation
- *
- * @throws EOFException
- * less than 12 bytes of input was available
- * from in
- *
- * @throws IOException may be thrown by in
- *
- * @since 1.7
- */
- public SingleXZInputStream(InputStream in, int memoryLimit,
- boolean verifyCheck, ArrayCache arrayCache)
- throws IOException {
- this(in, memoryLimit, verifyCheck, readStreamHeader(in), arrayCache);
- }
-
- SingleXZInputStream(InputStream in, int memoryLimit, boolean verifyCheck,
- byte[] streamHeader, ArrayCache arrayCache)
- throws IOException {
- this.arrayCache = arrayCache;
- this.in = in;
- this.memoryLimit = memoryLimit;
- this.verifyCheck = verifyCheck;
- streamHeaderFlags = DecoderUtil.decodeStreamHeader(streamHeader);
- check = Check.getInstance(streamHeaderFlags.checkType);
- }
-
- /**
- * Gets the ID of the integrity check used in this XZ Stream.
- *
- * @return the Check ID specified in the XZ Stream Header
- */
- public int getCheckType() {
- return streamHeaderFlags.checkType;
- }
-
- /**
- * Gets the name of the integrity check used in this XZ Stream.
- *
- * @return the name of the check specified in the XZ Stream Header
- */
- public String getCheckName() {
- return check.getName();
- }
-
- /**
- * Decompresses the next byte from this input stream.
- * read()
from this input stream
- * may be inefficient. Wrap it in {@link java.io.BufferedInputStream}
- * if you need to read lots of data one byte at a time.
- *
- * @return the next decompressed byte, or -1
- * to indicate the end of the compressed stream
- *
- * @throws CorruptedInputException
- * @throws UnsupportedOptionsException
- * @throws MemoryLimitException
- *
- * @throws XZIOException if the stream has been closed
- *
- * @throws EOFException
- * compressed input is truncated or corrupt
- *
- * @throws IOException may be thrown by in
- */
- public int read() throws IOException {
- return read(tempBuf, 0, 1) == -1 ? -1 : (tempBuf[0] & 0xFF);
- }
-
- /**
- * Decompresses into an array of bytes.
- * len
is zero, no bytes are read and 0
- * is returned. Otherwise this will try to decompress len
- * bytes of uncompressed data. Less than len
bytes may
- * be read only in the following situations:
- *
- *
- *
- * @param buf target buffer for uncompressed data
- * @param off start offset in len
- * bytes have already been successfully decompressed.
- * The next call with non-zero len
will immediately
- * throw the pending exception.buf
- * @param len maximum number of uncompressed bytes to read
- *
- * @return number of bytes read, or -1
to indicate
- * the end of the compressed stream
- *
- * @throws CorruptedInputException
- * @throws UnsupportedOptionsException
- * @throws MemoryLimitException
- *
- * @throws XZIOException if the stream has been closed
- *
- * @throws EOFException
- * compressed input is truncated or corrupt
- *
- * @throws IOException may be thrown by in
- */
- public int read(byte[] buf, int off, int len) throws IOException {
- if (off < 0 || len < 0 || off + len < 0 || off + len > buf.length)
- throw new IndexOutOfBoundsException();
-
- if (len == 0)
- return 0;
-
- if (in == null)
- throw new XZIOException("Stream closed");
-
- if (exception != null)
- throw exception;
-
- if (endReached)
- return -1;
-
- int size = 0;
-
- try {
- while (len > 0) {
- if (blockDecoder == null) {
- try {
- blockDecoder = new BlockInputStream(
- in, check, verifyCheck, memoryLimit, -1, -1,
- arrayCache);
- } catch (IndexIndicatorException e) {
- indexHash.validate(in);
- validateStreamFooter();
- endReached = true;
- return size > 0 ? size : -1;
- }
- }
-
- int ret = blockDecoder.read(buf, off, len);
-
- if (ret > 0) {
- size += ret;
- off += ret;
- len -= ret;
- } else if (ret == -1) {
- indexHash.add(blockDecoder.getUnpaddedSize(),
- blockDecoder.getUncompressedSize());
- blockDecoder = null;
- }
- }
- } catch (IOException e) {
- exception = e;
- if (size == 0)
- throw e;
- }
-
- return size;
- }
-
- private void validateStreamFooter() throws IOException {
- byte[] buf = new byte[DecoderUtil.STREAM_HEADER_SIZE];
- new DataInputStream(in).readFully(buf);
- StreamFlags streamFooterFlags = DecoderUtil.decodeStreamFooter(buf);
-
- if (!DecoderUtil.areStreamFlagsEqual(streamHeaderFlags,
- streamFooterFlags)
- || indexHash.getIndexSize() != streamFooterFlags.backwardSize)
- throw new CorruptedInputException(
- "XZ Stream Footer does not match Stream Header");
- }
-
- /**
- * Returns the number of uncompressed bytes that can be read
- * without blocking. The value is returned with an assumption
- * that the compressed input data will be valid. If the compressed
- * data is corrupt, CorruptedInputException
may get
- * thrown before the number of bytes claimed to be available have
- * been read from this input stream.
- *
- * @return the number of uncompressed bytes that can be read
- * without blocking
- */
- public int available() throws IOException {
- if (in == null)
- throw new XZIOException("Stream closed");
-
- if (exception != null)
- throw exception;
-
- return blockDecoder == null ? 0 : blockDecoder.available();
- }
-
- /**
- * Closes the stream and calls in.close()
.
- * If the stream was already closed, this does nothing.
- * close(true)
.
- *
- * @throws IOException if thrown by in.close()
- */
- public void close() throws IOException {
- close(true);
- }
-
- /**
- * Closes the stream and optionally calls in.close()
.
- * If the stream was already closed, this does nothing.
- * If close(false)
has been called, a further
- * call of close(true)
does nothing (it doesn't call
- * in.close()
).
- * InputStream
,
- * there is usually no need to worry about closing this stream either;
- * it's fine to do nothing and let the garbage collector handle it.
- * However, if you are using {@link ArrayCache}, close(false)
- * can be useful to put the allocated arrays back to the cache without
- * closing the underlying InputStream
.
- * read
returns -1
), the arrays are
- * automatically put back to the cache by that read
call. In
- * this situation close(false)
is redundant (but harmless).
- *
- * @throws IOException if thrown by in.close()
- *
- * @since 1.7
- */
- public void close(boolean closeInput) throws IOException {
- if (in != null) {
- if (blockDecoder != null) {
- blockDecoder.close();
- blockDecoder = null;
- }
-
- try {
- if (closeInput)
- in.close();
- } finally {
- in = null;
- }
- }
- }
-}
diff --git a/app/src/main/java/org/tukaani/xz/UncompressedLZMA2OutputStream.java b/app/src/main/java/org/tukaani/xz/UncompressedLZMA2OutputStream.java
deleted file mode 100644
index 5d0e65f..0000000
--- a/app/src/main/java/org/tukaani/xz/UncompressedLZMA2OutputStream.java
+++ /dev/null
@@ -1,164 +0,0 @@
-/*
- * UncompressedLZMA2OutputStream
- *
- * Author: Lasse Collin XZIOException
.
- */
-public class XZIOException extends java.io.IOException {
- private static final long serialVersionUID = 3L;
-
- public XZIOException() {
- super();
- }
-
- public XZIOException(String s) {
- super(s);
- }
-}
diff --git a/app/src/main/java/org/tukaani/xz/XZInputStream.java b/app/src/main/java/org/tukaani/xz/XZInputStream.java
deleted file mode 100644
index 30374eb..0000000
--- a/app/src/main/java/org/tukaani/xz/XZInputStream.java
+++ /dev/null
@@ -1,527 +0,0 @@
-/*
- * XZInputStream
- *
- * Author: Lasse Collin Typical use cases
- *
- *
- * InputStream infile = new FileInputStream("foo.xz");
- * XZInputStream inxz = new XZInputStream(infile);
- *
XZInputStream
. If decompression requires more memory than
- * the specified limit, MemoryLimitException will be thrown when reading
- * from the stream. For example, the following sets the memory usage limit
- * to 100 MiB:
- *
- *
- *
- * InputStream infile = new FileInputStream("foo.xz");
- * XZInputStream inxz = new XZInputStream(infile, 100 * 1024);
- *
When uncompressed size is known beforehand
- * read()
and checking
- * that it returns -1
. This way the decompressor will parse the
- * file footers and verify the integrity checks, giving the caller more
- * confidence that the uncompressed data is valid. (This advice seems to
- * apply to
- * {@link java.util.zip.GZIPInputStream java.util.zip.GZIPInputStream} too.)
- *
- * @see SingleXZInputStream
- */
-public class XZInputStream extends InputStream {
- private final ArrayCache arrayCache;
-
- private final int memoryLimit;
- private InputStream in;
- private SingleXZInputStream xzIn;
- private final boolean verifyCheck;
- private boolean endReached = false;
- private IOException exception = null;
-
- private final byte[] tempBuf = new byte[1];
-
- /**
- * Creates a new XZ decompressor without a memory usage limit.
- * in
. The header of the first Block is not read
- * until read
is called.
- *
- * @param in input stream from which XZ-compressed
- * data is read
- *
- * @throws XZFormatException
- * input is not in the XZ format
- *
- * @throws CorruptedInputException
- * XZ header CRC32 doesn't match
- *
- * @throws UnsupportedOptionsException
- * XZ header is valid but specifies options
- * not supported by this implementation
- *
- * @throws EOFException
- * less than 12 bytes of input was available
- * from in
- *
- * @throws IOException may be thrown by in
- */
- public XZInputStream(InputStream in) throws IOException {
- this(in, -1);
- }
-
- /**
- * Creates a new XZ decompressor without a memory usage limit.
- * XZInputStream(InputStream)
- * except that this takes also the arrayCache
argument.
- *
- * @param in input stream from which XZ-compressed
- * data is read
- *
- * @param arrayCache cache to be used for allocating large arrays
- *
- * @throws XZFormatException
- * input is not in the XZ format
- *
- * @throws CorruptedInputException
- * XZ header CRC32 doesn't match
- *
- * @throws UnsupportedOptionsException
- * XZ header is valid but specifies options
- * not supported by this implementation
- *
- * @throws EOFException
- * less than 12 bytes of input was available
- * from in
- *
- * @throws IOException may be thrown by in
- *
- * @since 1.7
- */
- public XZInputStream(InputStream in, ArrayCache arrayCache)
- throws IOException {
- this(in, -1, arrayCache);
- }
-
- /**
- * Creates a new XZ decompressor with an optional memory usage limit.
- * XZInputStream(InputStream)
except
- * that this takes also the memoryLimit
argument.
- *
- * @param in input stream from which XZ-compressed
- * data is read
- *
- * @param memoryLimit memory usage limit in kibibytes (KiB)
- * or -1
to impose no
- * memory usage limit
- *
- * @throws XZFormatException
- * input is not in the XZ format
- *
- * @throws CorruptedInputException
- * XZ header CRC32 doesn't match
- *
- * @throws UnsupportedOptionsException
- * XZ header is valid but specifies options
- * not supported by this implementation
- *
- * @throws EOFException
- * less than 12 bytes of input was available
- * from in
- *
- * @throws IOException may be thrown by in
- */
- public XZInputStream(InputStream in, int memoryLimit) throws IOException {
- this(in, memoryLimit, true);
- }
-
- /**
- * Creates a new XZ decompressor with an optional memory usage limit.
- * XZInputStream(InputStream)
except
- * that this takes also the memoryLimit
and
- * arrayCache
arguments.
- *
- * @param in input stream from which XZ-compressed
- * data is read
- *
- * @param memoryLimit memory usage limit in kibibytes (KiB)
- * or -1
to impose no
- * memory usage limit
- *
- * @param arrayCache cache to be used for allocating large arrays
- *
- * @throws XZFormatException
- * input is not in the XZ format
- *
- * @throws CorruptedInputException
- * XZ header CRC32 doesn't match
- *
- * @throws UnsupportedOptionsException
- * XZ header is valid but specifies options
- * not supported by this implementation
- *
- * @throws EOFException
- * less than 12 bytes of input was available
- * from in
- *
- * @throws IOException may be thrown by in
- *
- * @since 1.7
- */
- public XZInputStream(InputStream in, int memoryLimit,
- ArrayCache arrayCache) throws IOException {
- this(in, memoryLimit, true, arrayCache);
- }
-
- /**
- * Creates a new XZ decompressor with an optional memory usage limit
- * and ability to disable verification of integrity checks.
- * XZInputStream(InputStream,int)
except
- * that this takes also the verifyCheck
argument.
- *
- *
- * verifyCheck
only affects the integrity check of
- * the actual compressed data. The CRC32 fields in the headers
- * are always verified.
- *
- * @param in input stream from which XZ-compressed
- * data is read
- *
- * @param memoryLimit memory usage limit in kibibytes (KiB)
- * or -1
to impose no
- * memory usage limit
- *
- * @param verifyCheck if true
, the integrity checks
- * will be verified; this should almost never
- * be set to false
- *
- * @throws XZFormatException
- * input is not in the XZ format
- *
- * @throws CorruptedInputException
- * XZ header CRC32 doesn't match
- *
- * @throws UnsupportedOptionsException
- * XZ header is valid but specifies options
- * not supported by this implementation
- *
- * @throws EOFException
- * less than 12 bytes of input was available
- * from in
- *
- * @throws IOException may be thrown by in
- *
- * @since 1.6
- */
- public XZInputStream(InputStream in, int memoryLimit, boolean verifyCheck)
- throws IOException {
- this(in, memoryLimit, verifyCheck, ArrayCache.getDefaultCache());
- }
-
- /**
- * Creates a new XZ decompressor with an optional memory usage limit
- * and ability to disable verification of integrity checks.
- * XZInputStream(InputStream,int,boolean)
- * except that this takes also the arrayCache
argument.
- *
- * @param in input stream from which XZ-compressed
- * data is read
- *
- * @param memoryLimit memory usage limit in kibibytes (KiB)
- * or -1
to impose no
- * memory usage limit
- *
- * @param verifyCheck if true
, the integrity checks
- * will be verified; this should almost never
- * be set to false
- *
- * @param arrayCache cache to be used for allocating large arrays
- *
- * @throws XZFormatException
- * input is not in the XZ format
- *
- * @throws CorruptedInputException
- * XZ header CRC32 doesn't match
- *
- * @throws UnsupportedOptionsException
- * XZ header is valid but specifies options
- * not supported by this implementation
- *
- * @throws EOFException
- * less than 12 bytes of input was available
- * from in
- *
- * @throws IOException may be thrown by in
- *
- * @since 1.7
- */
- public XZInputStream(InputStream in, int memoryLimit, boolean verifyCheck,
- ArrayCache arrayCache) throws IOException {
- this.arrayCache = arrayCache;
- this.in = in;
- this.memoryLimit = memoryLimit;
- this.verifyCheck = verifyCheck;
- this.xzIn = new SingleXZInputStream(in, memoryLimit, verifyCheck,
- arrayCache);
- }
-
- /**
- * Decompresses the next byte from this input stream.
- * read()
from this input stream
- * may be inefficient. Wrap it in {@link java.io.BufferedInputStream}
- * if you need to read lots of data one byte at a time.
- *
- * @return the next decompressed byte, or -1
- * to indicate the end of the compressed stream
- *
- * @throws CorruptedInputException
- * @throws UnsupportedOptionsException
- * @throws MemoryLimitException
- *
- * @throws XZIOException if the stream has been closed
- *
- * @throws EOFException
- * compressed input is truncated or corrupt
- *
- * @throws IOException may be thrown by in
- */
- public int read() throws IOException {
- return read(tempBuf, 0, 1) == -1 ? -1 : (tempBuf[0] & 0xFF);
- }
-
- /**
- * Decompresses into an array of bytes.
- * len
is zero, no bytes are read and 0
- * is returned. Otherwise this will try to decompress len
- * bytes of uncompressed data. Less than len
bytes may
- * be read only in the following situations:
- *
- *
- *
- * @param buf target buffer for uncompressed data
- * @param off start offset in len
- * bytes have already been successfully decompressed.
- * The next call with non-zero len
will immediately
- * throw the pending exception.buf
- * @param len maximum number of uncompressed bytes to read
- *
- * @return number of bytes read, or -1
to indicate
- * the end of the compressed stream
- *
- * @throws CorruptedInputException
- * @throws UnsupportedOptionsException
- * @throws MemoryLimitException
- *
- * @throws XZIOException if the stream has been closed
- *
- * @throws EOFException
- * compressed input is truncated or corrupt
- *
- * @throws IOException may be thrown by in
- */
- public int read(byte[] buf, int off, int len) throws IOException {
- if (off < 0 || len < 0 || off + len < 0 || off + len > buf.length)
- throw new IndexOutOfBoundsException();
-
- if (len == 0)
- return 0;
-
- if (in == null)
- throw new XZIOException("Stream closed");
-
- if (exception != null)
- throw exception;
-
- if (endReached)
- return -1;
-
- int size = 0;
-
- try {
- while (len > 0) {
- if (xzIn == null) {
- prepareNextStream();
- if (endReached)
- return size == 0 ? -1 : size;
- }
-
- int ret = xzIn.read(buf, off, len);
-
- if (ret > 0) {
- size += ret;
- off += ret;
- len -= ret;
- } else if (ret == -1) {
- xzIn = null;
- }
- }
- } catch (IOException e) {
- exception = e;
- if (size == 0)
- throw e;
- }
-
- return size;
- }
-
- private void prepareNextStream() throws IOException {
- DataInputStream inData = new DataInputStream(in);
- byte[] buf = new byte[DecoderUtil.STREAM_HEADER_SIZE];
-
- // The size of Stream Padding must be a multiple of four bytes,
- // all bytes zero.
- do {
- // First try to read one byte to see if we have reached the end
- // of the file.
- int ret = inData.read(buf, 0, 1);
- if (ret == -1) {
- endReached = true;
- return;
- }
-
- // Since we got one byte of input, there must be at least
- // three more available in a valid file.
- inData.readFully(buf, 1, 3);
-
- } while (buf[0] == 0 && buf[1] == 0 && buf[2] == 0 && buf[3] == 0);
-
- // Not all bytes are zero. In a valid Stream it indicates the
- // beginning of the next Stream. Read the rest of the Stream Header
- // and initialize the XZ decoder.
- inData.readFully(buf, 4, DecoderUtil.STREAM_HEADER_SIZE - 4);
-
- try {
- xzIn = new SingleXZInputStream(in, memoryLimit, verifyCheck, buf,
- arrayCache);
- } catch (XZFormatException e) {
- // Since this isn't the first .xz Stream, it is more
- // logical to tell that the data is corrupt.
- throw new CorruptedInputException(
- "Garbage after a valid XZ Stream");
- }
- }
-
- /**
- * Returns the number of uncompressed bytes that can be read
- * without blocking. The value is returned with an assumption
- * that the compressed input data will be valid. If the compressed
- * data is corrupt, CorruptedInputException
may get
- * thrown before the number of bytes claimed to be available have
- * been read from this input stream.
- *
- * @return the number of uncompressed bytes that can be read
- * without blocking
- */
- public int available() throws IOException {
- if (in == null)
- throw new XZIOException("Stream closed");
-
- if (exception != null)
- throw exception;
-
- return xzIn == null ? 0 : xzIn.available();
- }
-
- /**
- * Closes the stream and calls in.close()
.
- * If the stream was already closed, this does nothing.
- * close(true)
.
- *
- * @throws IOException if thrown by in.close()
- */
- public void close() throws IOException {
- close(true);
- }
-
- /**
- * Closes the stream and optionally calls in.close()
.
- * If the stream was already closed, this does nothing.
- * If close(false)
has been called, a further
- * call of close(true)
does nothing (it doesn't call
- * in.close()
).
- * InputStream
,
- * there is usually no need to worry about closing this stream either;
- * it's fine to do nothing and let the garbage collector handle it.
- * However, if you are using {@link ArrayCache}, close(false)
- * can be useful to put the allocated arrays back to the cache without
- * closing the underlying InputStream
.
- * read
returns -1
), the arrays are
- * automatically put back to the cache by that read
call. In
- * this situation close(false)
is redundant (but harmless).
- *
- * @throws IOException if thrown by in.close()
- *
- * @since 1.7
- */
- public void close(boolean closeInput) throws IOException {
- if (in != null) {
- if (xzIn != null) {
- xzIn.close(false);
- xzIn = null;
- }
-
- try {
- if (closeInput)
- in.close();
- } finally {
- in = null;
- }
- }
- }
-}
diff --git a/app/src/main/java/org/tukaani/xz/XZOutputStream.java b/app/src/main/java/org/tukaani/xz/XZOutputStream.java
deleted file mode 100644
index 63cf5cf..0000000
--- a/app/src/main/java/org/tukaani/xz/XZOutputStream.java
+++ /dev/null
@@ -1,606 +0,0 @@
-/*
- * XZOutputStream
- *
- * Author: Lasse Collin Examples
- *
- *
- * FileOutputStream outfile = new FileOutputStream("foo.xz");
- * XZOutputStream outxz = new XZOutputStream(outfile, new LZMA2Options());
- *
8
for LZMA2 (the default
- * is 6
) and SHA-256 instead of CRC64 for integrity checking:
- *
- *
- * XZOutputStream outxz = new XZOutputStream(outfile, new LZMA2Options(8),
- * XZ.CHECK_SHA256);
- *
- */
-public class XZOutputStream extends FinishableOutputStream {
- private final ArrayCache arrayCache;
-
- private OutputStream out;
- private final StreamFlags streamFlags = new StreamFlags();
- private final Check check;
- private final IndexEncoder index = new IndexEncoder();
-
- private BlockOutputStream blockEncoder = null;
- private FilterEncoder[] filters;
-
- /**
- * True if the current filter chain supports flushing.
- * If it doesn't support flushing,
- * X86Options x86 = new X86Options();
- * LZMA2Options lzma2 = new LZMA2Options();
- * FilterOptions[] options = { x86, lzma2 };
- * System.out.println("Encoder memory usage: "
- * + FilterOptions.getEncoderMemoryUsage(options)
- * + " KiB");
- * System.out.println("Decoder memory usage: "
- * + FilterOptions.getDecoderMemoryUsage(options)
- * + " KiB");
- * XZOutputStream outxz = new XZOutputStream(outfile, options);
- *
flush()
- * will use endBlock()
as a fallback.
- */
- private boolean filtersSupportFlushing;
-
- private IOException exception = null;
- private boolean finished = false;
-
- private final byte[] tempBuf = new byte[1];
-
- /**
- * Creates a new XZ compressor using one filter and CRC64 as
- * the integrity check. This constructor is equivalent to passing
- * a single-member FilterOptions array to
- * XZOutputStream(OutputStream, FilterOptions[])
.
- *
- * @param out output stream to which the compressed data
- * will be written
- *
- * @param filterOptions
- * filter options to use
- *
- * @throws UnsupportedOptionsException
- * invalid filter chain
- *
- * @throws IOException may be thrown from out
- */
- public XZOutputStream(OutputStream out, FilterOptions filterOptions)
- throws IOException {
- this(out, filterOptions, XZ.CHECK_CRC64);
- }
-
- /**
- * Creates a new XZ compressor using one filter and CRC64 as
- * the integrity check. This constructor is equivalent to passing
- * a single-member FilterOptions array to
- * XZOutputStream(OutputStream, FilterOptions[], ArrayCache)
.
- *
- * @param out output stream to which the compressed data
- * will be written
- *
- * @param filterOptions
- * filter options to use
- *
- * @param arrayCache cache to be used for allocating large arrays
- *
- * @throws UnsupportedOptionsException
- * invalid filter chain
- *
- * @throws IOException may be thrown from out
- *
- * @since 1.7
- */
- public XZOutputStream(OutputStream out, FilterOptions filterOptions,
- ArrayCache arrayCache)
- throws IOException {
- this(out, filterOptions, XZ.CHECK_CRC64, arrayCache);
- }
-
- /**
- * Creates a new XZ compressor using one filter and the specified
- * integrity check type. This constructor is equivalent to
- * passing a single-member FilterOptions array to
- * XZOutputStream(OutputStream, FilterOptions[], int)
.
- *
- * @param out output stream to which the compressed data
- * will be written
- *
- * @param filterOptions
- * filter options to use
- *
- * @param checkType type of the integrity check,
- * for example XZ.CHECK_CRC32
- *
- * @throws UnsupportedOptionsException
- * invalid filter chain
- *
- * @throws IOException may be thrown from out
- */
- public XZOutputStream(OutputStream out, FilterOptions filterOptions,
- int checkType) throws IOException {
- this(out, new FilterOptions[] { filterOptions }, checkType);
- }
-
- /**
- * Creates a new XZ compressor using one filter and the specified
- * integrity check type. This constructor is equivalent to
- * passing a single-member FilterOptions array to
- * XZOutputStream(OutputStream, FilterOptions[], int,
- * ArrayCache)
.
- *
- * @param out output stream to which the compressed data
- * will be written
- *
- * @param filterOptions
- * filter options to use
- *
- * @param checkType type of the integrity check,
- * for example XZ.CHECK_CRC32
- *
- * @param arrayCache cache to be used for allocating large arrays
- *
- * @throws UnsupportedOptionsException
- * invalid filter chain
- *
- * @throws IOException may be thrown from out
- *
- * @since 1.7
- */
- public XZOutputStream(OutputStream out, FilterOptions filterOptions,
- int checkType, ArrayCache arrayCache)
- throws IOException {
- this(out, new FilterOptions[] { filterOptions }, checkType,
- arrayCache);
- }
-
- /**
- * Creates a new XZ compressor using 1-4 filters and CRC64 as
- * the integrity check. This constructor is equivalent
- * XZOutputStream(out, filterOptions, XZ.CHECK_CRC64)
.
- *
- * @param out output stream to which the compressed data
- * will be written
- *
- * @param filterOptions
- * array of filter options to use
- *
- * @throws UnsupportedOptionsException
- * invalid filter chain
- *
- * @throws IOException may be thrown from out
- */
- public XZOutputStream(OutputStream out, FilterOptions[] filterOptions)
- throws IOException {
- this(out, filterOptions, XZ.CHECK_CRC64);
- }
-
- /**
- * Creates a new XZ compressor using 1-4 filters and CRC64 as
- * the integrity check. This constructor is equivalent
- * XZOutputStream(out, filterOptions, XZ.CHECK_CRC64,
- * arrayCache)
.
- *
- * @param out output stream to which the compressed data
- * will be written
- *
- * @param filterOptions
- * array of filter options to use
- *
- * @param arrayCache cache to be used for allocating large arrays
- *
- * @throws UnsupportedOptionsException
- * invalid filter chain
- *
- * @throws IOException may be thrown from out
- *
- * @since 1.7
- */
- public XZOutputStream(OutputStream out, FilterOptions[] filterOptions,
- ArrayCache arrayCache)
- throws IOException {
- this(out, filterOptions, XZ.CHECK_CRC64, arrayCache);
- }
-
- /**
- * Creates a new XZ compressor using 1-4 filters and the specified
- * integrity check type.
- *
- * @param out output stream to which the compressed data
- * will be written
- *
- * @param filterOptions
- * array of filter options to use
- *
- * @param checkType type of the integrity check,
- * for example XZ.CHECK_CRC32
- *
- * @throws UnsupportedOptionsException
- * invalid filter chain
- *
- * @throws IOException may be thrown from out
- */
- public XZOutputStream(OutputStream out, FilterOptions[] filterOptions,
- int checkType) throws IOException {
- this(out, filterOptions, checkType, ArrayCache.getDefaultCache());
- }
-
- /**
- * Creates a new XZ compressor using 1-4 filters and the specified
- * integrity check type.
- *
- * @param out output stream to which the compressed data
- * will be written
- *
- * @param filterOptions
- * array of filter options to use
- *
- * @param checkType type of the integrity check,
- * for example XZ.CHECK_CRC32
- *
- * @param arrayCache cache to be used for allocating large arrays
- *
- * @throws UnsupportedOptionsException
- * invalid filter chain
- *
- * @throws IOException may be thrown from out
- *
- * @since 1.7
- */
- public XZOutputStream(OutputStream out, FilterOptions[] filterOptions,
- int checkType, ArrayCache arrayCache)
- throws IOException {
- this.arrayCache = arrayCache;
- this.out = out;
- updateFilters(filterOptions);
-
- streamFlags.checkType = checkType;
- check = Check.getInstance(checkType);
-
- encodeStreamHeader();
- }
-
- /**
- * Updates the filter chain with a single filter.
- * This is equivalent to passing a single-member FilterOptions array
- * to updateFilters(FilterOptions[])
.
- *
- * @param filterOptions
- * new filter to use
- *
- * @throws UnsupportedOptionsException
- * unsupported filter chain, or trying to change
- * the filter chain in the middle of a Block
- */
- public void updateFilters(FilterOptions filterOptions)
- throws XZIOException {
- FilterOptions[] opts = new FilterOptions[1];
- opts[0] = filterOptions;
- updateFilters(opts);
- }
-
- /**
- * Updates the filter chain with 1-4 filters.
- * endBlock()
to finish the
- * current XZ Block before calling this function. The new filter chain
- * will then be used for the next XZ Block.
- *
- * @param filterOptions
- * new filter chain to use
- *
- * @throws UnsupportedOptionsException
- * unsupported filter chain, or trying to change
- * the filter chain in the middle of a Block
- */
- public void updateFilters(FilterOptions[] filterOptions)
- throws XZIOException {
- if (blockEncoder != null)
- throw new UnsupportedOptionsException("Changing filter options "
- + "in the middle of a XZ Block not implemented");
-
- if (filterOptions.length < 1 || filterOptions.length > 4)
- throw new UnsupportedOptionsException(
- "XZ filter chain must be 1-4 filters");
-
- filtersSupportFlushing = true;
- FilterEncoder[] newFilters = new FilterEncoder[filterOptions.length];
- for (int i = 0; i < filterOptions.length; ++i) {
- newFilters[i] = filterOptions[i].getFilterEncoder();
- filtersSupportFlushing &= newFilters[i].supportsFlushing();
- }
-
- RawCoder.validate(newFilters);
- filters = newFilters;
- }
-
- /**
- * Writes one byte to be compressed.
- *
- * @throws XZIOException
- * XZ Stream has grown too big
- *
- * @throws XZIOException
- * finish()
or close()
- * was already called
- *
- * @throws IOException may be thrown by the underlying output stream
- */
- public void write(int b) throws IOException {
- tempBuf[0] = (byte)b;
- write(tempBuf, 0, 1);
- }
-
- /**
- * Writes an array of bytes to be compressed.
- * The compressors tend to do internal buffering and thus the written
- * data won't be readable from the compressed output immediately.
- * Use flush()
to force everything written so far to
- * be written to the underlaying output stream, but be aware that
- * flushing reduces compression ratio.
- *
- * @param buf buffer of bytes to be written
- * @param off start offset in buf
- * @param len number of bytes to write
- *
- * @throws XZIOException
- * XZ Stream has grown too big: total file size
- * about 8 EiB or the Index field exceeds
- * 16 GiB; you shouldn't reach these sizes
- * in practice
- *
- * @throws XZIOException
- * finish()
or close()
- * was already called and len > 0
- *
- * @throws IOException may be thrown by the underlying output stream
- */
- public void write(byte[] buf, int off, int len) throws IOException {
- if (off < 0 || len < 0 || off + len < 0 || off + len > buf.length)
- throw new IndexOutOfBoundsException();
-
- if (exception != null)
- throw exception;
-
- if (finished)
- throw new XZIOException("Stream finished or closed");
-
- try {
- if (blockEncoder == null)
- blockEncoder = new BlockOutputStream(out, filters, check,
- arrayCache);
-
- blockEncoder.write(buf, off, len);
- } catch (IOException e) {
- exception = e;
- throw e;
- }
- }
-
- /**
- * Finishes the current XZ Block (but not the whole XZ Stream).
- * This doesn't flush the stream so it's possible that not all data will
- * be decompressible from the output stream when this function returns.
- * Call also flush()
if flushing is wanted in addition to
- * finishing the current XZ Block.
- * flush()
would do).
- *
- * @throws XZIOException
- * XZ Stream has grown too big
- *
- * @throws XZIOException
- * stream finished or closed
- *
- * @throws IOException may be thrown by the underlying output stream
- */
- public void endBlock() throws IOException {
- if (exception != null)
- throw exception;
-
- if (finished)
- throw new XZIOException("Stream finished or closed");
-
- // NOTE: Once there is threading with multiple Blocks, it's possible
- // that this function will be more like a barrier that returns
- // before the last Block has been finished.
- if (blockEncoder != null) {
- try {
- blockEncoder.finish();
- index.add(blockEncoder.getUnpaddedSize(),
- blockEncoder.getUncompressedSize());
- blockEncoder = null;
- } catch (IOException e) {
- exception = e;
- throw e;
- }
- }
- }
-
- /**
- * Flushes the encoder and calls out.flush()
.
- * All buffered pending data will then be decompressible from
- * the output stream.
- * flush()
will call endBlock()
- * before flushing.
- *
- * @throws XZIOException
- * XZ Stream has grown too big
- *
- * @throws XZIOException
- * stream finished or closed
- *
- * @throws IOException may be thrown by the underlying output stream
- */
- public void flush() throws IOException {
- if (exception != null)
- throw exception;
-
- if (finished)
- throw new XZIOException("Stream finished or closed");
-
- try {
- if (blockEncoder != null) {
- if (filtersSupportFlushing) {
- // This will eventually call out.flush() so
- // no need to do it here again.
- blockEncoder.flush();
- } else {
- endBlock();
- out.flush();
- }
- } else {
- out.flush();
- }
- } catch (IOException e) {
- exception = e;
- throw e;
- }
- }
-
- /**
- * Finishes compression without closing the underlying stream.
- * No more data can be written to this stream after finishing
- * (calling write
with an empty buffer is OK).
- * finish()
do nothing unless
- * an exception was thrown by this stream earlier. In that case
- * the same exception is thrown again.
- * close()
. If the stream will be closed anyway, there
- * usually is no need to call finish()
separately.
- *
- * @throws XZIOException
- * XZ Stream has grown too big
- *
- * @throws IOException may be thrown by the underlying output stream
- */
- public void finish() throws IOException {
- if (!finished) {
- // This checks for pending exceptions so we don't need to
- // worry about it here.
- endBlock();
-
- try {
- index.encode(out);
- encodeStreamFooter();
- } catch (IOException e) {
- exception = e;
- throw e;
- }
-
- // Set it to true only if everything goes fine. Setting it earlier
- // would cause repeated calls to finish() do nothing instead of
- // throwing an exception to indicate an earlier error.
- finished = true;
- }
- }
-
- /**
- * Finishes compression and closes the underlying stream.
- * The underlying stream out
is closed even if finishing
- * fails. If both finishing and closing fail, the exception thrown
- * by finish()
is thrown and the exception from the failed
- * out.close()
is lost.
- *
- * @throws XZIOException
- * XZ Stream has grown too big
- *
- * @throws IOException may be thrown by the underlying output stream
- */
- public void close() throws IOException {
- if (out != null) {
- // If finish() throws an exception, it stores the exception to
- // the variable "exception". So we can ignore the possible
- // exception here.
- try {
- finish();
- } catch (IOException e) {}
-
- try {
- out.close();
- } catch (IOException e) {
- // Remember the exception but only if there is no previous
- // pending exception.
- if (exception == null)
- exception = e;
- }
-
- out = null;
- }
-
- if (exception != null)
- throw exception;
- }
-
- private void encodeStreamFlags(byte[] buf, int off) {
- buf[off] = 0x00;
- buf[off + 1] = (byte)streamFlags.checkType;
- }
-
- private void encodeStreamHeader() throws IOException {
- out.write(XZ.HEADER_MAGIC);
-
- byte[] buf = new byte[2];
- encodeStreamFlags(buf, 0);
- out.write(buf);
-
- EncoderUtil.writeCRC32(out, buf);
- }
-
- private void encodeStreamFooter() throws IOException {
- byte[] buf = new byte[6];
- long backwardSize = index.getIndexSize() / 4 - 1;
- for (int i = 0; i < 4; ++i)
- buf[i] = (byte)(backwardSize >>> (i * 8));
-
- encodeStreamFlags(buf, 4);
-
- EncoderUtil.writeCRC32(out, buf);
- out.write(buf);
- out.write(XZ.FOOTER_MAGIC);
- }
-}
diff --git a/app/src/main/java/org/tukaani/xz/check/CRC32.java b/app/src/main/java/org/tukaani/xz/check/CRC32.java
deleted file mode 100644
index f182898..0000000
--- a/app/src/main/java/org/tukaani/xz/check/CRC32.java
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * CRC32
- *
- * Author: Lasse Collin LZEncoder.getInstance
for parameter descriptions.
- */
- HC4(int dictSize, int beforeSizeMin, int readAheadMax,
- int niceLen, int matchLenMax, int depthLimit,
- ArrayCache arrayCache) {
- super(dictSize, beforeSizeMin, readAheadMax, niceLen, matchLenMax,
- arrayCache);
-
- hash = new Hash234(dictSize, arrayCache);
-
- // +1 because we need dictSize bytes of history + the current byte.
- cyclicSize = dictSize + 1;
- chain = arrayCache.getIntArray(cyclicSize, false);
- lzPos = cyclicSize;
-
- // Substracting 1 because the shortest match that this match
- // finder can find is 2 bytes, so there's no need to reserve
- // space for one-byte matches.
- matches = new Matches(niceLen - 1);
-
- // Use a default depth limit if no other value was specified.
- // The default is just something based on experimentation;
- // it's nothing magic.
- this.depthLimit = (depthLimit > 0) ? depthLimit : 4 + niceLen / 4;
- }
-
- public void putArraysToCache(ArrayCache arrayCache) {
- arrayCache.putArray(chain);
- hash.putArraysToCache(arrayCache);
- super.putArraysToCache(arrayCache);
- }
-
- /**
- * Moves to the next byte, checks that there is enough available space,
- * and possibly normalizes the hash tables and the hash chain.
- *
- * @return number of bytes available, including the current byte
- */
- private int movePos() {
- int avail = movePos(4, 4);
-
- if (avail != 0) {
- if (++lzPos == Integer.MAX_VALUE) {
- int normalizationOffset = Integer.MAX_VALUE - cyclicSize;
- hash.normalize(normalizationOffset);
- normalize(chain, cyclicSize, normalizationOffset);
- lzPos -= normalizationOffset;
- }
-
- if (++cyclicPos == cyclicSize)
- cyclicPos = 0;
- }
-
- return avail;
- }
-
- public Matches getMatches() {
- matches.count = 0;
- int matchLenLimit = matchLenMax;
- int niceLenLimit = niceLen;
- int avail = movePos();
-
- if (avail < matchLenLimit) {
- if (avail == 0)
- return matches;
-
- matchLenLimit = avail;
- if (niceLenLimit > avail)
- niceLenLimit = avail;
- }
-
- hash.calcHashes(buf, readPos);
- int delta2 = lzPos - hash.getHash2Pos();
- int delta3 = lzPos - hash.getHash3Pos();
- int currentMatch = hash.getHash4Pos();
- hash.updateTables(lzPos);
-
- chain[cyclicPos] = currentMatch;
-
- int lenBest = 0;
-
- // See if the hash from the first two bytes found a match.
- // The hashing algorithm guarantees that if the first byte
- // matches, also the second byte does, so there's no need to
- // test the second byte.
- if (delta2 < cyclicSize && buf[readPos - delta2] == buf[readPos]) {
- lenBest = 2;
- matches.len[0] = 2;
- matches.dist[0] = delta2 - 1;
- matches.count = 1;
- }
-
- // See if the hash from the first three bytes found a match that
- // is different from the match possibly found by the two-byte hash.
- // Also here the hashing algorithm guarantees that if the first byte
- // matches, also the next two bytes do.
- if (delta2 != delta3 && delta3 < cyclicSize
- && buf[readPos - delta3] == buf[readPos]) {
- lenBest = 3;
- matches.dist[matches.count++] = delta3 - 1;
- delta2 = delta3;
- }
-
- // If a match was found, see how long it is.
- if (matches.count > 0) {
- while (lenBest < matchLenLimit && buf[readPos + lenBest - delta2]
- == buf[readPos + lenBest])
- ++lenBest;
-
- matches.len[matches.count - 1] = lenBest;
-
- // Return if it is long enough (niceLen or reached the end of
- // the dictionary).
- if (lenBest >= niceLenLimit)
- return matches;
- }
-
- // Long enough match wasn't found so easily. Look for better matches
- // from the hash chain.
- if (lenBest < 3)
- lenBest = 3;
-
- int depth = depthLimit;
-
- while (true) {
- int delta = lzPos - currentMatch;
-
- // Return if the search depth limit has been reached or
- // if the distance of the potential match exceeds the
- // dictionary size.
- if (depth-- == 0 || delta >= cyclicSize)
- return matches;
-
- currentMatch = chain[cyclicPos - delta
- + (delta > cyclicPos ? cyclicSize : 0)];
-
- // Test the first byte and the first new byte that would give us
- // a match that is at least one byte longer than lenBest. This
- // too short matches get quickly skipped.
- if (buf[readPos + lenBest - delta] == buf[readPos + lenBest]
- && buf[readPos - delta] == buf[readPos]) {
- // Calculate the length of the match.
- int len = 0;
- while (++len < matchLenLimit)
- if (buf[readPos + len - delta] != buf[readPos + len])
- break;
-
- // Use the match if and only if it is better than the longest
- // match found so far.
- if (len > lenBest) {
- lenBest = len;
- matches.len[matches.count] = len;
- matches.dist[matches.count] = delta - 1;
- ++matches.count;
-
- // Return if it is long enough (niceLen or reached the
- // end of the dictionary).
- if (len >= niceLenLimit)
- return matches;
- }
- }
- }
- }
-
- public void skip(int len) {
- assert len >= 0;
-
- while (len-- > 0) {
- if (movePos() != 0) {
- // Update the hash chain and hash tables.
- hash.calcHashes(buf, readPos);
- chain[cyclicPos] = hash.getHash4Pos();
- hash.updateTables(lzPos);
- }
- }
- }
-}
diff --git a/app/src/main/java/org/tukaani/xz/lz/Hash234.java b/app/src/main/java/org/tukaani/xz/lz/Hash234.java
deleted file mode 100644
index bfa51b0..0000000
--- a/app/src/main/java/org/tukaani/xz/lz/Hash234.java
+++ /dev/null
@@ -1,102 +0,0 @@
-/*
- * 2-, 3-, and 4-byte hashing
- *
- * Authors: Lasse Collin niceLen
- * bytes is found, be happy with it and don't
- * stop looking for longer matches
- *
- * @param matchLenMax don't test for matches longer than
- * matchLenMax
bytes
- *
- * @param mf match finder ID
- *
- * @param depthLimit match finder search depth limit
- */
- public static LZEncoder getInstance(
- int dictSize, int extraSizeBefore, int extraSizeAfter,
- int niceLen, int matchLenMax, int mf, int depthLimit,
- ArrayCache arrayCache) {
- switch (mf) {
- case MF_HC4:
- return new HC4(dictSize, extraSizeBefore, extraSizeAfter,
- niceLen, matchLenMax, depthLimit, arrayCache);
-
- case MF_BT4:
- return new BT4(dictSize, extraSizeBefore, extraSizeAfter,
- niceLen, matchLenMax, depthLimit, arrayCache);
- }
-
- throw new IllegalArgumentException();
- }
-
- /**
- * Creates a new LZEncoder. See getInstance
.
- */
- LZEncoder(int dictSize, int extraSizeBefore, int extraSizeAfter,
- int niceLen, int matchLenMax, ArrayCache arrayCache) {
- bufSize = getBufSize(dictSize, extraSizeBefore, extraSizeAfter,
- matchLenMax);
- buf = arrayCache.getByteArray(bufSize, false);
-
- keepSizeBefore = extraSizeBefore + dictSize;
- keepSizeAfter = extraSizeAfter + matchLenMax;
-
- this.matchLenMax = matchLenMax;
- this.niceLen = niceLen;
- }
-
- public void putArraysToCache(ArrayCache arrayCache) {
- arrayCache.putArray(buf);
- }
-
- /**
- * Sets a preset dictionary. If a preset dictionary is wanted, this
- * function must be called immediately after creating the LZEncoder
- * before any data has been encoded.
- */
- public void setPresetDict(int dictSize, byte[] presetDict) {
- assert !isStarted();
- assert writePos == 0;
-
- if (presetDict != null) {
- // If the preset dictionary buffer is bigger than the dictionary
- // size, copy only the tail of the preset dictionary.
- int copySize = Math.min(presetDict.length, dictSize);
- int offset = presetDict.length - copySize;
- System.arraycopy(presetDict, offset, buf, 0, copySize);
- writePos += copySize;
- skip(copySize);
- }
- }
-
- /**
- * Moves data from the end of the buffer to the beginning, discarding
- * old data and making space for new input.
- */
- private void moveWindow() {
- // Align the move to a multiple of 16 bytes. LZMA2 needs this
- // because it uses the lowest bits from readPos to get the
- // alignment of the uncompressed data.
- int moveOffset = (readPos + 1 - keepSizeBefore) & ~15;
- int moveSize = writePos - moveOffset;
- System.arraycopy(buf, moveOffset, buf, 0, moveSize);
-
- readPos -= moveOffset;
- readLimit -= moveOffset;
- writePos -= moveOffset;
- }
-
- /**
- * Copies new data into the LZEncoder's buffer.
- */
- public int fillWindow(byte[] in, int off, int len) {
- assert !finishing;
-
- // Move the sliding window if needed.
- if (readPos >= bufSize - keepSizeAfter)
- moveWindow();
-
- // Try to fill the dictionary buffer. If it becomes full,
- // some of the input bytes may be left unused.
- if (len > bufSize - writePos)
- len = bufSize - writePos;
-
- System.arraycopy(in, off, buf, writePos, len);
- writePos += len;
-
- // Set the new readLimit but only if there's enough data to allow
- // encoding of at least one more byte.
- if (writePos >= keepSizeAfter)
- readLimit = writePos - keepSizeAfter;
-
- processPendingBytes();
-
- // Tell the caller how much input we actually copied into
- // the dictionary.
- return len;
- }
-
- /**
- * Process pending bytes remaining from preset dictionary initialization
- * or encoder flush operation.
- */
- private void processPendingBytes() {
- // After flushing or setting a preset dictionary there will be
- // pending data that hasn't been ran through the match finder yet.
- // Run it through the match finder now if there is enough new data
- // available (readPos < readLimit) that the encoder may encode at
- // least one more input byte. This way we don't waste any time
- // looping in the match finder (and marking the same bytes as
- // pending again) if the application provides very little new data
- // per write call.
- if (pendingSize > 0 && readPos < readLimit) {
- readPos -= pendingSize;
- int oldPendingSize = pendingSize;
- pendingSize = 0;
- skip(oldPendingSize);
- assert pendingSize < oldPendingSize;
- }
- }
-
- /**
- * Returns true if at least one byte has already been run through
- * the match finder.
- */
- public boolean isStarted() {
- return readPos != -1;
- }
-
- /**
- * Marks that all the input needs to be made available in
- * the encoded output.
- */
- public void setFlushing() {
- readLimit = writePos - 1;
- processPendingBytes();
- }
-
- /**
- * Marks that there is no more input remaining. The read position
- * can be advanced until the end of the data.
- */
- public void setFinishing() {
- readLimit = writePos - 1;
- finishing = true;
- processPendingBytes();
- }
-
- /**
- * Tests if there is enough input available to let the caller encode
- * at least one more byte.
- */
- public boolean hasEnoughData(int alreadyReadLen) {
- return readPos - alreadyReadLen < readLimit;
- }
-
- public void copyUncompressed(OutputStream out, int backward, int len)
- throws IOException {
- out.write(buf, readPos + 1 - backward, len);
- }
-
- /**
- * Get the number of bytes available, including the current byte.
- * getMatches
or
- * skip
hasn't been called yet and no preset dictionary
- * is being used.
- */
- public int getAvail() {
- assert isStarted();
- return writePos - readPos;
- }
-
- /**
- * Gets the lowest four bits of the absolute offset of the current byte.
- * Bits other than the lowest four are undefined.
- */
- public int getPos() {
- return readPos;
- }
-
- /**
- * Gets the byte from the given backward offset.
- * 0
, the previous byte
- * at 1
etc. To get a byte at zero-based distance,
- * use getByte(dist + 1)
.
- *
getByte(0, backward)
.
- */
- public int getByte(int backward) {
- return buf[readPos - backward] & 0xFF;
- }
-
- /**
- * Gets the byte from the given forward minus backward offset.
- * The forward offset is added to the current position. This lets
- * one read bytes ahead of the current byte.
- */
- public int getByte(int forward, int backward) {
- return buf[readPos + forward - backward] & 0xFF;
- }
-
- /**
- * Get the length of a match at the given distance.
- *
- * @param dist zero-based distance of the match to test
- * @param lenLimit don't test for a match longer than this
- *
- * @return length of the match; it is in the range [0, lenLimit]
- */
- public int getMatchLen(int dist, int lenLimit) {
- int backPos = readPos - dist - 1;
- int len = 0;
-
- while (len < lenLimit && buf[readPos + len] == buf[backPos + len])
- ++len;
-
- return len;
- }
-
- /**
- * Get the length of a match at the given distance and forward offset.
- *
- * @param forward forward offset
- * @param dist zero-based distance of the match to test
- * @param lenLimit don't test for a match longer than this
- *
- * @return length of the match; it is in the range [0, lenLimit]
- */
- public int getMatchLen(int forward, int dist, int lenLimit) {
- int curPos = readPos + forward;
- int backPos = curPos - dist - 1;
- int len = 0;
-
- while (len < lenLimit && buf[curPos + len] == buf[backPos + len])
- ++len;
-
- return len;
- }
-
- /**
- * Verifies that the matches returned by the match finder are valid.
- * This is meant to be used in an assert statement. This is totally
- * useless for actual encoding since match finder's results should
- * naturally always be valid if it isn't broken.
- *
- * @param matches return value from getMatches
- *
- * @return true if matches are valid, false if match finder is broken
- */
- public boolean verifyMatches(Matches matches) {
- int lenLimit = Math.min(getAvail(), matchLenMax);
-
- for (int i = 0; i < matches.count; ++i)
- if (getMatchLen(matches.dist[i], lenLimit) != matches.len[i])
- return false;
-
- return true;
- }
-
- /**
- * Moves to the next byte, checks if there is enough input available,
- * and returns the amount of input available.
- *
- * @param requiredForFlushing
- * minimum number of available bytes when
- * flushing; encoding may be continued with
- * new input after flushing
- * @param requiredForFinishing
- * minimum number of available bytes when
- * finishing; encoding must not be continued
- * after finishing or the match finder state
- * may be corrupt
- *
- * @return the number of bytes available or zero if there
- * is not enough input available
- */
- int movePos(int requiredForFlushing, int requiredForFinishing) {
- assert requiredForFlushing >= requiredForFinishing;
-
- ++readPos;
- int avail = writePos - readPos;
-
- if (avail < requiredForFlushing) {
- if (avail < requiredForFinishing || !finishing) {
- ++pendingSize;
- avail = 0;
- }
- }
-
- return avail;
- }
-
- /**
- * Runs match finder for the next byte and returns the matches found.
- */
- public abstract Matches getMatches();
-
- /**
- * Skips the given number of bytes in the match finder.
- */
- public abstract void skip(int len);
-}
diff --git a/app/src/main/java/org/tukaani/xz/lz/Matches.java b/app/src/main/java/org/tukaani/xz/lz/Matches.java
deleted file mode 100644
index 2fbee11..0000000
--- a/app/src/main/java/org/tukaani/xz/lz/Matches.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Matches
- *
- * Authors: Lasse Collin LZMA2_UNCOMPRESSED_LIMIT
.
- * LZMA2_COMPRESSED_LIMIT
.
- * back
.
- * back == -1
and return value is 1
.
- * The literal itself needs to be read from lz
separately.
- * back
is in the range [0, 3] and
- * the return value is the length of the repeated match.
- * back - REPS
(
back - 4
)
- * is the distance of the match and the return value is the length
- * of the match.
- */
- abstract int getNextSymbol();
-
- LZMAEncoder(RangeEncoder rc, LZEncoder lz,
- int lc, int lp, int pb, int dictSize, int niceLen) {
- super(pb);
- this.rc = rc;
- this.lz = lz;
- this.niceLen = niceLen;
-
- literalEncoder = new LiteralEncoder(lc, lp);
- matchLenEncoder = new LengthEncoder(pb, niceLen);
- repLenEncoder = new LengthEncoder(pb, niceLen);
-
- distSlotPricesSize = getDistSlot(dictSize - 1) + 1;
- distSlotPrices = new int[DIST_STATES][distSlotPricesSize];
-
- reset();
- }
-
- public LZEncoder getLZEncoder() {
- return lz;
- }
-
- public void reset() {
- super.reset();
- literalEncoder.reset();
- matchLenEncoder.reset();
- repLenEncoder.reset();
- distPriceCount = 0;
- alignPriceCount = 0;
-
- uncompressedSize += readAhead + 1;
- readAhead = -1;
- }
-
- public int getUncompressedSize() {
- return uncompressedSize;
- }
-
- public void resetUncompressedSize() {
- uncompressedSize = 0;
- }
-
- /**
- * Compress for LZMA1.
- */
- public void encodeForLZMA1() throws IOException {
- if (!lz.isStarted() && !encodeInit())
- return;
-
- while (encodeSymbol()) {}
- }
-
- public void encodeLZMA1EndMarker() throws IOException {
- // End of stream marker is encoded as a match with the maximum
- // possible distance. The length is ignored by the decoder,
- // but the minimum length has been used by the LZMA SDK.
- //
- // Distance is a 32-bit unsigned integer in LZMA.
- // With Java's signed int, UINT32_MAX becomes -1.
- int posState = (lz.getPos() - readAhead) & posMask;
- rc.encodeBit(isMatch[state.get()], posState, 1);
- rc.encodeBit(isRep, state.get(), 0);
- encodeMatch(-1, MATCH_LEN_MIN, posState);
- }
-
- /**
- * Compresses for LZMA2.
- *
- * @return true if the LZMA2 chunk became full, false otherwise
- */
- public boolean encodeForLZMA2() {
- // LZMA2 uses RangeEncoderToBuffer so IOExceptions aren't possible.
- try {
- if (!lz.isStarted() && !encodeInit())
- return false;
-
- while (uncompressedSize <= LZMA2_UNCOMPRESSED_LIMIT
- && rc.getPendingSize() <= LZMA2_COMPRESSED_LIMIT)
- if (!encodeSymbol())
- return false;
- } catch (IOException e) {
- throw new Error();
- }
-
- return true;
- }
-
- private boolean encodeInit() throws IOException {
- assert readAhead == -1;
- if (!lz.hasEnoughData(0))
- return false;
-
- // The first symbol must be a literal unless using
- // a preset dictionary. This code isn't run if using
- // a preset dictionary.
- skip(1);
- rc.encodeBit(isMatch[state.get()], 0, 0);
- literalEncoder.encodeInit();
-
- --readAhead;
- assert readAhead == -1;
-
- ++uncompressedSize;
- assert uncompressedSize == 1;
-
- return true;
- }
-
- private boolean encodeSymbol() throws IOException {
- if (!lz.hasEnoughData(readAhead + 1))
- return false;
-
- int len = getNextSymbol();
-
- assert readAhead >= 0;
- int posState = (lz.getPos() - readAhead) & posMask;
-
- if (back == -1) {
- // Literal i.e. eight-bit byte
- assert len == 1;
- rc.encodeBit(isMatch[state.get()], posState, 0);
- literalEncoder.encode();
- } else {
- // Some type of match
- rc.encodeBit(isMatch[state.get()], posState, 1);
- if (back < REPS) {
- // Repeated match i.e. the same distance
- // has been used earlier.
- assert lz.getMatchLen(-readAhead, reps[back], len) == len;
- rc.encodeBit(isRep, state.get(), 1);
- encodeRepMatch(back, len, posState);
- } else {
- // Normal match
- assert lz.getMatchLen(-readAhead, back - REPS, len) == len;
- rc.encodeBit(isRep, state.get(), 0);
- encodeMatch(back - REPS, len, posState);
- }
- }
-
- readAhead -= len;
- uncompressedSize += len;
-
- return true;
- }
-
- private void encodeMatch(int dist, int len, int posState)
- throws IOException {
- state.updateMatch();
- matchLenEncoder.encode(len, posState);
-
- int distSlot = getDistSlot(dist);
- rc.encodeBitTree(distSlots[getDistState(len)], distSlot);
-
- if (distSlot >= DIST_MODEL_START) {
- int footerBits = (distSlot >>> 1) - 1;
- int base = (2 | (distSlot & 1)) << footerBits;
- int distReduced = dist - base;
-
- if (distSlot < DIST_MODEL_END) {
- rc.encodeReverseBitTree(
- distSpecial[distSlot - DIST_MODEL_START],
- distReduced);
- } else {
- rc.encodeDirectBits(distReduced >>> ALIGN_BITS,
- footerBits - ALIGN_BITS);
- rc.encodeReverseBitTree(distAlign, distReduced & ALIGN_MASK);
- --alignPriceCount;
- }
- }
-
- reps[3] = reps[2];
- reps[2] = reps[1];
- reps[1] = reps[0];
- reps[0] = dist;
-
- --distPriceCount;
- }
-
- private void encodeRepMatch(int rep, int len, int posState)
- throws IOException {
- if (rep == 0) {
- rc.encodeBit(isRep0, state.get(), 0);
- rc.encodeBit(isRep0Long[state.get()], posState, len == 1 ? 0 : 1);
- } else {
- int dist = reps[rep];
- rc.encodeBit(isRep0, state.get(), 1);
-
- if (rep == 1) {
- rc.encodeBit(isRep1, state.get(), 0);
- } else {
- rc.encodeBit(isRep1, state.get(), 1);
- rc.encodeBit(isRep2, state.get(), rep - 2);
-
- if (rep == 3)
- reps[3] = reps[2];
-
- reps[2] = reps[1];
- }
-
- reps[1] = reps[0];
- reps[0] = dist;
- }
-
- if (len == 1) {
- state.updateShortRep();
- } else {
- repLenEncoder.encode(len, posState);
- state.updateLongRep();
- }
- }
-
- Matches getMatches() {
- ++readAhead;
- Matches matches = lz.getMatches();
- assert lz.verifyMatches(matches);
- return matches;
- }
-
- void skip(int len) {
- readAhead += len;
- lz.skip(len);
- }
-
- int getAnyMatchPrice(State state, int posState) {
- return RangeEncoder.getBitPrice(isMatch[state.get()][posState], 1);
- }
-
- int getNormalMatchPrice(int anyMatchPrice, State state) {
- return anyMatchPrice
- + RangeEncoder.getBitPrice(isRep[state.get()], 0);
- }
-
- int getAnyRepPrice(int anyMatchPrice, State state) {
- return anyMatchPrice
- + RangeEncoder.getBitPrice(isRep[state.get()], 1);
- }
-
- int getShortRepPrice(int anyRepPrice, State state, int posState) {
- return anyRepPrice
- + RangeEncoder.getBitPrice(isRep0[state.get()], 0)
- + RangeEncoder.getBitPrice(isRep0Long[state.get()][posState],
- 0);
- }
-
- int getLongRepPrice(int anyRepPrice, int rep, State state, int posState) {
- int price = anyRepPrice;
-
- if (rep == 0) {
- price += RangeEncoder.getBitPrice(isRep0[state.get()], 0)
- + RangeEncoder.getBitPrice(
- isRep0Long[state.get()][posState], 1);
- } else {
- price += RangeEncoder.getBitPrice(isRep0[state.get()], 1);
-
- if (rep == 1)
- price += RangeEncoder.getBitPrice(isRep1[state.get()], 0);
- else
- price += RangeEncoder.getBitPrice(isRep1[state.get()], 1)
- + RangeEncoder.getBitPrice(isRep2[state.get()],
- rep - 2);
- }
-
- return price;
- }
-
- int getLongRepAndLenPrice(int rep, int len, State state, int posState) {
- int anyMatchPrice = getAnyMatchPrice(state, posState);
- int anyRepPrice = getAnyRepPrice(anyMatchPrice, state);
- int longRepPrice = getLongRepPrice(anyRepPrice, rep, state, posState);
- return longRepPrice + repLenEncoder.getPrice(len, posState);
- }
-
- int getMatchAndLenPrice(int normalMatchPrice,
- int dist, int len, int posState) {
- int price = normalMatchPrice
- + matchLenEncoder.getPrice(len, posState);
- int distState = getDistState(len);
-
- if (dist < FULL_DISTANCES) {
- price += fullDistPrices[distState][dist];
- } else {
- // Note that distSlotPrices includes also
- // the price of direct bits.
- int distSlot = getDistSlot(dist);
- price += distSlotPrices[distState][distSlot]
- + alignPrices[dist & ALIGN_MASK];
- }
-
- return price;
- }
-
- private void updateDistPrices() {
- distPriceCount = DIST_PRICE_UPDATE_INTERVAL;
-
- for (int distState = 0; distState < DIST_STATES; ++distState) {
- for (int distSlot = 0; distSlot < distSlotPricesSize; ++distSlot)
- distSlotPrices[distState][distSlot]
- = RangeEncoder.getBitTreePrice(
- distSlots[distState], distSlot);
-
- for (int distSlot = DIST_MODEL_END; distSlot < distSlotPricesSize;
- ++distSlot) {
- int count = (distSlot >>> 1) - 1 - ALIGN_BITS;
- distSlotPrices[distState][distSlot]
- += RangeEncoder.getDirectBitsPrice(count);
- }
-
- for (int dist = 0; dist < DIST_MODEL_START; ++dist)
- fullDistPrices[distState][dist]
- = distSlotPrices[distState][dist];
- }
-
- int dist = DIST_MODEL_START;
- for (int distSlot = DIST_MODEL_START; distSlot < DIST_MODEL_END;
- ++distSlot) {
- int footerBits = (distSlot >>> 1) - 1;
- int base = (2 | (distSlot & 1)) << footerBits;
-
- int limit = distSpecial[distSlot - DIST_MODEL_START].length;
- for (int i = 0; i < limit; ++i) {
- int distReduced = dist - base;
- int price = RangeEncoder.getReverseBitTreePrice(
- distSpecial[distSlot - DIST_MODEL_START],
- distReduced);
-
- for (int distState = 0; distState < DIST_STATES; ++distState)
- fullDistPrices[distState][dist]
- = distSlotPrices[distState][distSlot] + price;
-
- ++dist;
- }
- }
-
- assert dist == FULL_DISTANCES;
- }
-
- private void updateAlignPrices() {
- alignPriceCount = ALIGN_PRICE_UPDATE_INTERVAL;
-
- for (int i = 0; i < ALIGN_SIZE; ++i)
- alignPrices[i] = RangeEncoder.getReverseBitTreePrice(distAlign,
- i);
- }
-
- /**
- * Updates the lookup tables used for calculating match distance
- * and length prices. The updating is skipped for performance reasons
- * if the tables haven't changed much since the previous update.
- */
- void updatePrices() {
- if (distPriceCount <= 0)
- updateDistPrices();
-
- if (alignPriceCount <= 0)
- updateAlignPrices();
-
- matchLenEncoder.updatePrices();
- repLenEncoder.updatePrices();
- }
-
-
- class LiteralEncoder extends LiteralCoder {
- private final LiteralSubencoder[] subencoders;
-
- LiteralEncoder(int lc, int lp) {
- super(lc, lp);
-
- subencoders = new LiteralSubencoder[1 << (lc + lp)];
- for (int i = 0; i < subencoders.length; ++i)
- subencoders[i] = new LiteralSubencoder();
- }
-
- void reset() {
- for (int i = 0; i < subencoders.length; ++i)
- subencoders[i].reset();
- }
-
- void encodeInit() throws IOException {
- // When encoding the first byte of the stream, there is
- // no previous byte in the dictionary so the encode function
- // wouldn't work.
- assert readAhead >= 0;
- subencoders[0].encode();
- }
-
- void encode() throws IOException {
- assert readAhead >= 0;
- int i = getSubcoderIndex(lz.getByte(1 + readAhead),
- lz.getPos() - readAhead);
- subencoders[i].encode();
- }
-
- int getPrice(int curByte, int matchByte,
- int prevByte, int pos, State state) {
- int price = RangeEncoder.getBitPrice(
- isMatch[state.get()][pos & posMask], 0);
-
- int i = getSubcoderIndex(prevByte, pos);
- price += state.isLiteral()
- ? subencoders[i].getNormalPrice(curByte)
- : subencoders[i].getMatchedPrice(curByte, matchByte);
-
- return price;
- }
-
- private class LiteralSubencoder extends LiteralSubcoder {
- void encode() throws IOException {
- int symbol = lz.getByte(readAhead) | 0x100;
-
- if (state.isLiteral()) {
- int subencoderIndex;
- int bit;
-
- do {
- subencoderIndex = symbol >>> 8;
- bit = (symbol >>> 7) & 1;
- rc.encodeBit(probs, subencoderIndex, bit);
- symbol <<= 1;
- } while (symbol < 0x10000);
-
- } else {
- int matchByte = lz.getByte(reps[0] + 1 + readAhead);
- int offset = 0x100;
- int subencoderIndex;
- int matchBit;
- int bit;
-
- do {
- matchByte <<= 1;
- matchBit = matchByte & offset;
- subencoderIndex = offset + matchBit + (symbol >>> 8);
- bit = (symbol >>> 7) & 1;
- rc.encodeBit(probs, subencoderIndex, bit);
- symbol <<= 1;
- offset &= ~(matchByte ^ symbol);
- } while (symbol < 0x10000);
- }
-
- state.updateLiteral();
- }
-
- int getNormalPrice(int symbol) {
- int price = 0;
- int subencoderIndex;
- int bit;
-
- symbol |= 0x100;
-
- do {
- subencoderIndex = symbol >>> 8;
- bit = (symbol >>> 7) & 1;
- price += RangeEncoder.getBitPrice(probs[subencoderIndex],
- bit);
- symbol <<= 1;
- } while (symbol < (0x100 << 8));
-
- return price;
- }
-
- int getMatchedPrice(int symbol, int matchByte) {
- int price = 0;
- int offset = 0x100;
- int subencoderIndex;
- int matchBit;
- int bit;
-
- symbol |= 0x100;
-
- do {
- matchByte <<= 1;
- matchBit = matchByte & offset;
- subencoderIndex = offset + matchBit + (symbol >>> 8);
- bit = (symbol >>> 7) & 1;
- price += RangeEncoder.getBitPrice(probs[subencoderIndex],
- bit);
- symbol <<= 1;
- offset &= ~(matchByte ^ symbol);
- } while (symbol < (0x100 << 8));
-
- return price;
- }
- }
- }
-
-
- class LengthEncoder extends LengthCoder {
- /**
- * The prices are updated after at least
- * PRICE_UPDATE_INTERVAL
many lengths
- * have been encoded with the same posState.
- */
- private static final int PRICE_UPDATE_INTERVAL = 32; // FIXME?
-
- private final int[] counters;
- private final int[][] prices;
-
- LengthEncoder(int pb, int niceLen) {
- int posStates = 1 << pb;
- counters = new int[posStates];
-
- // Always allocate at least LOW_SYMBOLS + MID_SYMBOLS because
- // it makes updatePrices slightly simpler. The prices aren't
- // usually needed anyway if niceLen < 18.
- int lenSymbols = Math.max(niceLen - MATCH_LEN_MIN + 1,
- LOW_SYMBOLS + MID_SYMBOLS);
- prices = new int[posStates][lenSymbols];
- }
-
- void reset() {
- super.reset();
-
- // Reset counters to zero to force price update before
- // the prices are needed.
- for (int i = 0; i < counters.length; ++i)
- counters[i] = 0;
- }
-
- void encode(int len, int posState) throws IOException {
- len -= MATCH_LEN_MIN;
-
- if (len < LOW_SYMBOLS) {
- rc.encodeBit(choice, 0, 0);
- rc.encodeBitTree(low[posState], len);
- } else {
- rc.encodeBit(choice, 0, 1);
- len -= LOW_SYMBOLS;
-
- if (len < MID_SYMBOLS) {
- rc.encodeBit(choice, 1, 0);
- rc.encodeBitTree(mid[posState], len);
- } else {
- rc.encodeBit(choice, 1, 1);
- rc.encodeBitTree(high, len - MID_SYMBOLS);
- }
- }
-
- --counters[posState];
- }
-
- int getPrice(int len, int posState) {
- return prices[posState][len - MATCH_LEN_MIN];
- }
-
- void updatePrices() {
- for (int posState = 0; posState < counters.length; ++posState) {
- if (counters[posState] <= 0) {
- counters[posState] = PRICE_UPDATE_INTERVAL;
- updatePrices(posState);
- }
- }
- }
-
- private void updatePrices(int posState) {
- int choice0Price = RangeEncoder.getBitPrice(choice[0], 0);
-
- int i = 0;
- for (; i < LOW_SYMBOLS; ++i)
- prices[posState][i] = choice0Price
- + RangeEncoder.getBitTreePrice(low[posState], i);
-
- choice0Price = RangeEncoder.getBitPrice(choice[0], 1);
- int choice1Price = RangeEncoder.getBitPrice(choice[1], 0);
-
- for (; i < LOW_SYMBOLS + MID_SYMBOLS; ++i)
- prices[posState][i] = choice0Price + choice1Price
- + RangeEncoder.getBitTreePrice(mid[posState],
- i - LOW_SYMBOLS);
-
- choice1Price = RangeEncoder.getBitPrice(choice[1], 1);
-
- for (; i < prices[posState].length; ++i)
- prices[posState][i] = choice0Price + choice1Price
- + RangeEncoder.getBitTreePrice(high, i - LOW_SYMBOLS
- - MID_SYMBOLS);
- }
- }
-}
diff --git a/app/src/main/java/org/tukaani/xz/lzma/LZMAEncoderFast.java b/app/src/main/java/org/tukaani/xz/lzma/LZMAEncoderFast.java
deleted file mode 100644
index f8230ee..0000000
--- a/app/src/main/java/org/tukaani/xz/lzma/LZMAEncoderFast.java
+++ /dev/null
@@ -1,153 +0,0 @@
-/*
- * LZMAEncoderFast
- *
- * Authors: Lasse Collin getNextSymbol()
.
- */
- private int convertOpts() {
- optEnd = optCur;
-
- int optPrev = opts[optCur].optPrev;
-
- do {
- Optimum opt = opts[optCur];
-
- if (opt.prev1IsLiteral) {
- opts[optPrev].optPrev = optCur;
- opts[optPrev].backPrev = -1;
- optCur = optPrev--;
-
- if (opt.hasPrev2) {
- opts[optPrev].optPrev = optPrev + 1;
- opts[optPrev].backPrev = opt.backPrev2;
- optCur = optPrev;
- optPrev = opt.optPrev2;
- }
- }
-
- int temp = opts[optPrev].optPrev;
- opts[optPrev].optPrev = optCur;
- optCur = optPrev;
- optPrev = temp;
- } while (optCur > 0);
-
- optCur = opts[0].optPrev;
- back = opts[optCur].backPrev;
- return optCur;
- }
-
- int getNextSymbol() {
- // If there are pending symbols from an earlier call to this
- // function, return those symbols first.
- if (optCur < optEnd) {
- int len = opts[optCur].optPrev - optCur;
- optCur = opts[optCur].optPrev;
- back = opts[optCur].backPrev;
- return len;
- }
-
- assert optCur == optEnd;
- optCur = 0;
- optEnd = 0;
- back = -1;
-
- if (readAhead == -1)
- matches = getMatches();
-
- // Get the number of bytes available in the dictionary, but
- // not more than the maximum match length. If there aren't
- // enough bytes remaining to encode a match at all, return
- // immediately to encode this byte as a literal.
- int avail = Math.min(lz.getAvail(), MATCH_LEN_MAX);
- if (avail < MATCH_LEN_MIN)
- return 1;
-
- // Get the lengths of repeated matches.
- int repBest = 0;
- for (int rep = 0; rep < REPS; ++rep) {
- repLens[rep] = lz.getMatchLen(reps[rep], avail);
-
- if (repLens[rep] < MATCH_LEN_MIN) {
- repLens[rep] = 0;
- continue;
- }
-
- if (repLens[rep] > repLens[repBest])
- repBest = rep;
- }
-
- // Return if the best repeated match is at least niceLen bytes long.
- if (repLens[repBest] >= niceLen) {
- back = repBest;
- skip(repLens[repBest] - 1);
- return repLens[repBest];
- }
-
- // Initialize mainLen and mainDist to the longest match found
- // by the match finder.
- int mainLen = 0;
- int mainDist = 0;
- if (matches.count > 0) {
- mainLen = matches.len[matches.count - 1];
- mainDist = matches.dist[matches.count - 1];
-
- // Return if it is at least niceLen bytes long.
- if (mainLen >= niceLen) {
- back = mainDist + REPS;
- skip(mainLen - 1);
- return mainLen;
- }
- }
-
- int curByte = lz.getByte(0);
- int matchByte = lz.getByte(reps[0] + 1);
-
- // If the match finder found no matches and this byte cannot be
- // encoded as a repeated match (short or long), we must be return
- // to have the byte encoded as a literal.
- if (mainLen < MATCH_LEN_MIN && curByte != matchByte
- && repLens[repBest] < MATCH_LEN_MIN)
- return 1;
-
-
- int pos = lz.getPos();
- int posState = pos & posMask;
-
- // Calculate the price of encoding the current byte as a literal.
- {
- int prevByte = lz.getByte(1);
- int literalPrice = literalEncoder.getPrice(curByte, matchByte,
- prevByte, pos, state);
- opts[1].set1(literalPrice, 0, -1);
- }
-
- int anyMatchPrice = getAnyMatchPrice(state, posState);
- int anyRepPrice = getAnyRepPrice(anyMatchPrice, state);
-
- // If it is possible to encode this byte as a short rep, see if
- // it is cheaper than encoding it as a literal.
- if (matchByte == curByte) {
- int shortRepPrice = getShortRepPrice(anyRepPrice,
- state, posState);
- if (shortRepPrice < opts[1].price)
- opts[1].set1(shortRepPrice, 0, 0);
- }
-
- // Return if there is neither normal nor long repeated match. Use
- // a short match instead of a literal if is is possible and cheaper.
- optEnd = Math.max(mainLen, repLens[repBest]);
- if (optEnd < MATCH_LEN_MIN) {
- assert optEnd == 0 : optEnd;
- back = opts[1].backPrev;
- return 1;
- }
-
-
- // Update the lookup tables for distances and lengths before using
- // those price calculation functions. (The price function above
- // don't need these tables.)
- updatePrices();
-
- // Initialize the state and reps of this position in opts[].
- // updateOptStateAndReps() will need these to get the new
- // state and reps for the next byte.
- opts[0].state.set(state);
- System.arraycopy(reps, 0, opts[0].reps, 0, REPS);
-
- // Initialize the prices for latter opts that will be used below.
- for (int i = optEnd; i >= MATCH_LEN_MIN; --i)
- opts[i].reset();
-
- // Calculate the prices of repeated matches of all lengths.
- for (int rep = 0; rep < REPS; ++rep) {
- int repLen = repLens[rep];
- if (repLen < MATCH_LEN_MIN)
- continue;
-
- int longRepPrice = getLongRepPrice(anyRepPrice, rep,
- state, posState);
- do {
- int price = longRepPrice + repLenEncoder.getPrice(repLen,
- posState);
- if (price < opts[repLen].price)
- opts[repLen].set1(price, 0, rep);
- } while (--repLen >= MATCH_LEN_MIN);
- }
-
- // Calculate the prices of normal matches that are longer than rep0.
- {
- int len = Math.max(repLens[0] + 1, MATCH_LEN_MIN);
- if (len <= mainLen) {
- int normalMatchPrice = getNormalMatchPrice(anyMatchPrice,
- state);
-
- // Set i to the index of the shortest match that is
- // at least len bytes long.
- int i = 0;
- while (len > matches.len[i])
- ++i;
-
- while (true) {
- int dist = matches.dist[i];
- int price = getMatchAndLenPrice(normalMatchPrice,
- dist, len, posState);
- if (price < opts[len].price)
- opts[len].set1(price, 0, dist + REPS);
-
- if (len == matches.len[i])
- if (++i == matches.count)
- break;
-
- ++len;
- }
- }
- }
-
-
- avail = Math.min(lz.getAvail(), OPTS - 1);
-
- // Get matches for later bytes and optimize the use of LZMA symbols
- // by calculating the prices and picking the cheapest symbol
- // combinations.
- while (++optCur < optEnd) {
- matches = getMatches();
- if (matches.count > 0
- && matches.len[matches.count - 1] >= niceLen)
- break;
-
- --avail;
- ++pos;
- posState = pos & posMask;
-
- updateOptStateAndReps();
- anyMatchPrice = opts[optCur].price
- + getAnyMatchPrice(opts[optCur].state, posState);
- anyRepPrice = getAnyRepPrice(anyMatchPrice, opts[optCur].state);
-
- calc1BytePrices(pos, posState, avail, anyRepPrice);
-
- if (avail >= MATCH_LEN_MIN) {
- int startLen = calcLongRepPrices(pos, posState,
- avail, anyRepPrice);
- if (matches.count > 0)
- calcNormalMatchPrices(pos, posState, avail,
- anyMatchPrice, startLen);
- }
- }
-
- return convertOpts();
- }
-
- /**
- * Updates the state and reps for the current byte in the opts array.
- */
- private void updateOptStateAndReps() {
- int optPrev = opts[optCur].optPrev;
- assert optPrev < optCur;
-
- if (opts[optCur].prev1IsLiteral) {
- --optPrev;
-
- if (opts[optCur].hasPrev2) {
- opts[optCur].state.set(opts[opts[optCur].optPrev2].state);
- if (opts[optCur].backPrev2 < REPS)
- opts[optCur].state.updateLongRep();
- else
- opts[optCur].state.updateMatch();
- } else {
- opts[optCur].state.set(opts[optPrev].state);
- }
-
- opts[optCur].state.updateLiteral();
- } else {
- opts[optCur].state.set(opts[optPrev].state);
- }
-
- if (optPrev == optCur - 1) {
- // Must be either a short rep or a literal.
- assert opts[optCur].backPrev == 0 || opts[optCur].backPrev == -1;
-
- if (opts[optCur].backPrev == 0)
- opts[optCur].state.updateShortRep();
- else
- opts[optCur].state.updateLiteral();
-
- System.arraycopy(opts[optPrev].reps, 0,
- opts[optCur].reps, 0, REPS);
- } else {
- int back;
- if (opts[optCur].prev1IsLiteral && opts[optCur].hasPrev2) {
- optPrev = opts[optCur].optPrev2;
- back = opts[optCur].backPrev2;
- opts[optCur].state.updateLongRep();
- } else {
- back = opts[optCur].backPrev;
- if (back < REPS)
- opts[optCur].state.updateLongRep();
- else
- opts[optCur].state.updateMatch();
- }
-
- if (back < REPS) {
- opts[optCur].reps[0] = opts[optPrev].reps[back];
-
- int rep;
- for (rep = 1; rep <= back; ++rep)
- opts[optCur].reps[rep] = opts[optPrev].reps[rep - 1];
-
- for (; rep < REPS; ++rep)
- opts[optCur].reps[rep] = opts[optPrev].reps[rep];
- } else {
- opts[optCur].reps[0] = back - REPS;
- System.arraycopy(opts[optPrev].reps, 0,
- opts[optCur].reps, 1, REPS - 1);
- }
- }
- }
-
- /**
- * Calculates prices of a literal, a short rep, and literal + rep0.
- */
- private void calc1BytePrices(int pos, int posState,
- int avail, int anyRepPrice) {
- // This will be set to true if using a literal or a short rep.
- boolean nextIsByte = false;
-
- int curByte = lz.getByte(0);
- int matchByte = lz.getByte(opts[optCur].reps[0] + 1);
-
- // Try a literal.
- int literalPrice = opts[optCur].price
- + literalEncoder.getPrice(curByte, matchByte, lz.getByte(1),
- pos, opts[optCur].state);
- if (literalPrice < opts[optCur + 1].price) {
- opts[optCur + 1].set1(literalPrice, optCur, -1);
- nextIsByte = true;
- }
-
- // Try a short rep.
- if (matchByte == curByte && (opts[optCur + 1].optPrev == optCur
- || opts[optCur + 1].backPrev != 0)) {
- int shortRepPrice = getShortRepPrice(anyRepPrice,
- opts[optCur].state,
- posState);
- if (shortRepPrice <= opts[optCur + 1].price) {
- opts[optCur + 1].set1(shortRepPrice, optCur, 0);
- nextIsByte = true;
- }
- }
-
- // If neither a literal nor a short rep was the cheapest choice,
- // try literal + long rep0.
- if (!nextIsByte && matchByte != curByte && avail > MATCH_LEN_MIN) {
- int lenLimit = Math.min(niceLen, avail - 1);
- int len = lz.getMatchLen(1, opts[optCur].reps[0], lenLimit);
-
- if (len >= MATCH_LEN_MIN) {
- nextState.set(opts[optCur].state);
- nextState.updateLiteral();
- int nextPosState = (pos + 1) & posMask;
- int price = literalPrice
- + getLongRepAndLenPrice(0, len,
- nextState, nextPosState);
-
- int i = optCur + 1 + len;
- while (optEnd < i)
- opts[++optEnd].reset();
-
- if (price < opts[i].price)
- opts[i].set2(price, optCur, 0);
- }
- }
- }
-
- /**
- * Calculates prices of long rep and long rep + literal + rep0.
- */
- private int calcLongRepPrices(int pos, int posState,
- int avail, int anyRepPrice) {
- int startLen = MATCH_LEN_MIN;
- int lenLimit = Math.min(avail, niceLen);
-
- for (int rep = 0; rep < REPS; ++rep) {
- int len = lz.getMatchLen(opts[optCur].reps[rep], lenLimit);
- if (len < MATCH_LEN_MIN)
- continue;
-
- while (optEnd < optCur + len)
- opts[++optEnd].reset();
-
- int longRepPrice = getLongRepPrice(anyRepPrice, rep,
- opts[optCur].state, posState);
-
- for (int i = len; i >= MATCH_LEN_MIN; --i) {
- int price = longRepPrice
- + repLenEncoder.getPrice(i, posState);
- if (price < opts[optCur + i].price)
- opts[optCur + i].set1(price, optCur, rep);
- }
-
- if (rep == 0)
- startLen = len + 1;
-
- int len2Limit = Math.min(niceLen, avail - len - 1);
- int len2 = lz.getMatchLen(len + 1, opts[optCur].reps[rep],
- len2Limit);
-
- if (len2 >= MATCH_LEN_MIN) {
- // Rep
- int price = longRepPrice
- + repLenEncoder.getPrice(len, posState);
- nextState.set(opts[optCur].state);
- nextState.updateLongRep();
-
- // Literal
- int curByte = lz.getByte(len, 0);
- int matchByte = lz.getByte(0); // lz.getByte(len, len)
- int prevByte = lz.getByte(len, 1);
- price += literalEncoder.getPrice(curByte, matchByte, prevByte,
- pos + len, nextState);
- nextState.updateLiteral();
-
- // Rep0
- int nextPosState = (pos + len + 1) & posMask;
- price += getLongRepAndLenPrice(0, len2,
- nextState, nextPosState);
-
- int i = optCur + len + 1 + len2;
- while (optEnd < i)
- opts[++optEnd].reset();
-
- if (price < opts[i].price)
- opts[i].set3(price, optCur, rep, len, 0);
- }
- }
-
- return startLen;
- }
-
- /**
- * Calculates prices of a normal match and normal match + literal + rep0.
- */
- private void calcNormalMatchPrices(int pos, int posState, int avail,
- int anyMatchPrice, int startLen) {
- // If the longest match is so long that it would not fit into
- // the opts array, shorten the matches.
- if (matches.len[matches.count - 1] > avail) {
- matches.count = 0;
- while (matches.len[matches.count] < avail)
- ++matches.count;
-
- matches.len[matches.count++] = avail;
- }
-
- if (matches.len[matches.count - 1] < startLen)
- return;
-
- while (optEnd < optCur + matches.len[matches.count - 1])
- opts[++optEnd].reset();
-
- int normalMatchPrice = getNormalMatchPrice(anyMatchPrice,
- opts[optCur].state);
-
- int match = 0;
- while (startLen > matches.len[match])
- ++match;
-
- for (int len = startLen; ; ++len) {
- int dist = matches.dist[match];
-
- // Calculate the price of a match of len bytes from the nearest
- // possible distance.
- int matchAndLenPrice = getMatchAndLenPrice(normalMatchPrice,
- dist, len, posState);
- if (matchAndLenPrice < opts[optCur + len].price)
- opts[optCur + len].set1(matchAndLenPrice,
- optCur, dist + REPS);
-
- if (len != matches.len[match])
- continue;
-
- // Try match + literal + rep0. First get the length of the rep0.
- int len2Limit = Math.min(niceLen, avail - len - 1);
- int len2 = lz.getMatchLen(len + 1, dist, len2Limit);
-
- if (len2 >= MATCH_LEN_MIN) {
- nextState.set(opts[optCur].state);
- nextState.updateMatch();
-
- // Literal
- int curByte = lz.getByte(len, 0);
- int matchByte = lz.getByte(0); // lz.getByte(len, len)
- int prevByte = lz.getByte(len, 1);
- int price = matchAndLenPrice
- + literalEncoder.getPrice(curByte, matchByte,
- prevByte, pos + len,
- nextState);
- nextState.updateLiteral();
-
- // Rep0
- int nextPosState = (pos + len + 1) & posMask;
- price += getLongRepAndLenPrice(0, len2,
- nextState, nextPosState);
-
- int i = optCur + len + 1 + len2;
- while (optEnd < i)
- opts[++optEnd].reset();
-
- if (price < opts[i].price)
- opts[i].set3(price, optCur, dist + REPS, len, 0);
- }
-
- if (++match == matches.count)
- break;
- }
- }
-}
diff --git a/app/src/main/java/org/tukaani/xz/lzma/Optimum.java b/app/src/main/java/org/tukaani/xz/lzma/Optimum.java
deleted file mode 100644
index 845ac97..0000000
--- a/app/src/main/java/org/tukaani/xz/lzma/Optimum.java
+++ /dev/null
@@ -1,73 +0,0 @@
-/*
- * Optimum
- *
- * Authors: Lasse Collin Introduction
- *
- *
- * Getting started
- * Licensing
- *