TarArchiveInputStream.java

  1. /*
  2.  *  Licensed to the Apache Software Foundation (ASF) under one or more
  3.  *  contributor license agreements.  See the NOTICE file distributed with
  4.  *  this work for additional information regarding copyright ownership.
  5.  *  The ASF licenses this file to You under the Apache License, Version 2.0
  6.  *  (the "License"); you may not use this file except in compliance with
  7.  *  the License.  You may obtain a copy of the License at
  8.  *
  9.  *      http://www.apache.org/licenses/LICENSE-2.0
  10.  *
  11.  *  Unless required by applicable law or agreed to in writing, software
  12.  *  distributed under the License is distributed on an "AS IS" BASIS,
  13.  *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  14.  *  See the License for the specific language governing permissions and
  15.  *  limitations under the License.
  16.  */

  17. /*
  18.  * This package is based on the work done by Timothy Gerard Endres
  19.  * (time@ice.com) to whom the Ant project is very grateful for his great code.
  20.  */

  21. package org.apache.commons.compress.archivers.tar;

  22. import java.io.ByteArrayOutputStream;
  23. import java.io.FileInputStream;
  24. import java.io.IOException;
  25. import java.io.InputStream;
  26. import java.util.ArrayList;
  27. import java.util.Arrays;
  28. import java.util.HashMap;
  29. import java.util.List;
  30. import java.util.Map;

  31. import org.apache.commons.compress.archivers.ArchiveEntry;
  32. import org.apache.commons.compress.archivers.ArchiveInputStream;
  33. import org.apache.commons.compress.archivers.zip.ZipEncoding;
  34. import org.apache.commons.compress.archivers.zip.ZipEncodingHelper;
  35. import org.apache.commons.compress.utils.ArchiveUtils;
  36. import org.apache.commons.compress.utils.BoundedInputStream;
  37. import org.apache.commons.compress.utils.IOUtils;

  38. /**
  39.  * The TarInputStream reads a UNIX tar archive as an InputStream. methods are provided to position at each successive entry in the archive, and the read each
  40.  * entry as a normal input stream using read().
  41.  *
  42.  * @NotThreadSafe
  43.  */
  44. public class TarArchiveInputStream extends ArchiveInputStream<TarArchiveEntry> {

  45.     private static final int SMALL_BUFFER_SIZE = 256;

  46.     /**
  47.      * Checks if the signature matches what is expected for a tar file.
  48.      *
  49.      * @param signature the bytes to check
  50.      * @param length    the number of bytes to check
  51.      * @return true, if this stream is a tar archive stream, false otherwise
  52.      */
  53.     public static boolean matches(final byte[] signature, final int length) {
  54.         final int versionOffset = TarConstants.VERSION_OFFSET;
  55.         final int versionLen = TarConstants.VERSIONLEN;
  56.         if (length < versionOffset + versionLen) {
  57.             return false;
  58.         }

  59.         final int magicOffset = TarConstants.MAGIC_OFFSET;
  60.         final int magicLen = TarConstants.MAGICLEN;
  61.         if (ArchiveUtils.matchAsciiBuffer(TarConstants.MAGIC_POSIX, signature, magicOffset, magicLen)
  62.                 && ArchiveUtils.matchAsciiBuffer(TarConstants.VERSION_POSIX, signature, versionOffset, versionLen)) {
  63.             return true;
  64.         }
  65.         if (ArchiveUtils.matchAsciiBuffer(TarConstants.MAGIC_GNU, signature, magicOffset, magicLen)
  66.                 && (ArchiveUtils.matchAsciiBuffer(TarConstants.VERSION_GNU_SPACE, signature, versionOffset, versionLen)
  67.                         || ArchiveUtils.matchAsciiBuffer(TarConstants.VERSION_GNU_ZERO, signature, versionOffset, versionLen))) {
  68.             return true;
  69.         }
  70.         // COMPRESS-107 - recognize Ant tar files
  71.         return ArchiveUtils.matchAsciiBuffer(TarConstants.MAGIC_ANT, signature, magicOffset, magicLen)
  72.                 && ArchiveUtils.matchAsciiBuffer(TarConstants.VERSION_ANT, signature, versionOffset, versionLen);
  73.     }

  74.     private final byte[] smallBuf = new byte[SMALL_BUFFER_SIZE];

  75.     /** The buffer to store the TAR header. **/
  76.     private final byte[] recordBuffer;

  77.     /** The size of a block. */
  78.     private final int blockSize;

  79.     /** True if stream is at EOF. */
  80.     private boolean atEof;

  81.     /** Size of the current . */
  82.     private long entrySize;

  83.     /** How far into the entry the stream is at. */
  84.     private long entryOffset;

  85.     /** Input streams for reading sparse entries. **/
  86.     private List<InputStream> sparseInputStreams;

  87.     /** The index of current input stream being read when reading sparse entries. */
  88.     private int currentSparseInputStreamIndex;

  89.     /** The meta-data about the current entry. */
  90.     private TarArchiveEntry currEntry;

  91.     /** The encoding of the file. */
  92.     private final ZipEncoding zipEncoding;

  93.     /** The global PAX header. */
  94.     private Map<String, String> globalPaxHeaders = new HashMap<>();

  95.     /** The global sparse headers, this is only used in PAX Format 0.X. */
  96.     private final List<TarArchiveStructSparse> globalSparseHeaders = new ArrayList<>();

  97.     private final boolean lenient;

  98.     /**
  99.      * Constructs a new instance.
  100.      *
  101.      * @param inputStream the input stream to use
  102.      */
  103.     public TarArchiveInputStream(final InputStream inputStream) {
  104.         this(inputStream, TarConstants.DEFAULT_BLKSIZE, TarConstants.DEFAULT_RCDSIZE);
  105.     }

  106.     /**
  107.      * Constructs a new instance.
  108.      *
  109.      * @param inputStream the input stream to use
  110.      * @param lenient     when set to true illegal values for group/userid, mode, device numbers and timestamp will be ignored and the fields set to
  111.      *                    {@link TarArchiveEntry#UNKNOWN}. When set to false such illegal fields cause an exception instead.
  112.      * @since 1.19
  113.      */
  114.     public TarArchiveInputStream(final InputStream inputStream, final boolean lenient) {
  115.         this(inputStream, TarConstants.DEFAULT_BLKSIZE, TarConstants.DEFAULT_RCDSIZE, null, lenient);
  116.     }

  117.     /**
  118.      * Constructs a new instance.
  119.      *
  120.      * @param inputStream the input stream to use
  121.      * @param blockSize   the block size to use
  122.      */
  123.     public TarArchiveInputStream(final InputStream inputStream, final int blockSize) {
  124.         this(inputStream, blockSize, TarConstants.DEFAULT_RCDSIZE);
  125.     }

  126.     /**
  127.      * Constructs a new instance.
  128.      *
  129.      * @param inputStream the input stream to use
  130.      * @param blockSize   the block size to use
  131.      * @param recordSize  the record size to use
  132.      */
  133.     public TarArchiveInputStream(final InputStream inputStream, final int blockSize, final int recordSize) {
  134.         this(inputStream, blockSize, recordSize, null);
  135.     }

  136.     /**
  137.      * Constructs a new instance.
  138.      *
  139.      * @param inputStream the input stream to use
  140.      * @param blockSize   the block size to use
  141.      * @param recordSize  the record size to use
  142.      * @param encoding    name of the encoding to use for file names
  143.      * @since 1.4
  144.      */
  145.     public TarArchiveInputStream(final InputStream inputStream, final int blockSize, final int recordSize, final String encoding) {
  146.         this(inputStream, blockSize, recordSize, encoding, false);
  147.     }

  148.     /**
  149.      * Constructs a new instance.
  150.      *
  151.      * @param inputStream the input stream to use
  152.      * @param blockSize   the block size to use
  153.      * @param recordSize  the record size to use
  154.      * @param encoding    name of the encoding to use for file names
  155.      * @param lenient     when set to true illegal values for group/userid, mode, device numbers and timestamp will be ignored and the fields set to
  156.      *                    {@link TarArchiveEntry#UNKNOWN}. When set to false such illegal fields cause an exception instead.
  157.      * @since 1.19
  158.      */
  159.     public TarArchiveInputStream(final InputStream inputStream, final int blockSize, final int recordSize, final String encoding, final boolean lenient) {
  160.         super(inputStream, encoding);
  161.         this.zipEncoding = ZipEncodingHelper.getZipEncoding(encoding);
  162.         this.recordBuffer = new byte[recordSize];
  163.         this.blockSize = blockSize;
  164.         this.lenient = lenient;
  165.     }

  166.     /**
  167.      * Constructs a new instance.
  168.      *
  169.      * @param inputStream the input stream to use
  170.      * @param blockSize   the block size to use
  171.      * @param encoding    name of the encoding to use for file names
  172.      * @since 1.4
  173.      */
  174.     public TarArchiveInputStream(final InputStream inputStream, final int blockSize, final String encoding) {
  175.         this(inputStream, blockSize, TarConstants.DEFAULT_RCDSIZE, encoding);
  176.     }

  177.     /**
  178.      * Constructs a new instance.
  179.      *
  180.      * @param inputStream the input stream to use
  181.      * @param encoding    name of the encoding to use for file names
  182.      * @since 1.4
  183.      */
  184.     public TarArchiveInputStream(final InputStream inputStream, final String encoding) {
  185.         this(inputStream, TarConstants.DEFAULT_BLKSIZE, TarConstants.DEFAULT_RCDSIZE, encoding);
  186.     }

  187.     private void applyPaxHeadersToCurrentEntry(final Map<String, String> headers, final List<TarArchiveStructSparse> sparseHeaders) throws IOException {
  188.         currEntry.updateEntryFromPaxHeaders(headers);
  189.         currEntry.setSparseHeaders(sparseHeaders);
  190.     }

  191.     /**
  192.      * Gets the available data that can be read from the current entry in the archive. This does not indicate how much data is left in the entire archive, only
  193.      * in the current entry. This value is determined from the entry's size header field and the amount of data already read from the current entry.
  194.      * Integer.MAX_VALUE is returned in case more than Integer.MAX_VALUE bytes are left in the current entry in the archive.
  195.      *
  196.      * @return The number of available bytes for the current entry.
  197.      * @throws IOException for signature
  198.      */
  199.     @Override
  200.     public int available() throws IOException {
  201.         if (isDirectory()) {
  202.             return 0;
  203.         }
  204.         final long available = currEntry.getRealSize() - entryOffset;
  205.         if (available > Integer.MAX_VALUE) {
  206.             return Integer.MAX_VALUE;
  207.         }
  208.         return (int) available;
  209.     }

  210.     /**
  211.      * Build the input streams consisting of all-zero input streams and non-zero input streams. When reading from the non-zero input streams, the data is
  212.      * actually read from the original input stream. The size of each input stream is introduced by the sparse headers.
  213.      * <p>
  214.      * NOTE : Some all-zero input streams and non-zero input streams have the size of 0. We DO NOT store the 0 size input streams because they are meaningless.
  215.      * </p>
  216.      */
  217.     private void buildSparseInputStreams() throws IOException {
  218.         currentSparseInputStreamIndex = -1;
  219.         sparseInputStreams = new ArrayList<>();

  220.         final List<TarArchiveStructSparse> sparseHeaders = currEntry.getOrderedSparseHeaders();

  221.         // Stream doesn't need to be closed at all as it doesn't use any resources
  222.         final InputStream zeroInputStream = new TarArchiveSparseZeroInputStream(); // NOSONAR
  223.         // logical offset into the extracted entry
  224.         long offset = 0;
  225.         for (final TarArchiveStructSparse sparseHeader : sparseHeaders) {
  226.             final long zeroBlockSize = sparseHeader.getOffset() - offset;
  227.             if (zeroBlockSize < 0) {
  228.                 // sparse header says to move backwards inside the extracted entry
  229.                 throw new IOException("Corrupted struct sparse detected");
  230.             }

  231.             // only store the zero block if it is not empty
  232.             if (zeroBlockSize > 0) {
  233.                 sparseInputStreams.add(new BoundedInputStream(zeroInputStream, sparseHeader.getOffset() - offset));
  234.             }

  235.             // only store the input streams with non-zero size
  236.             if (sparseHeader.getNumbytes() > 0) {
  237.                 sparseInputStreams.add(new BoundedInputStream(in, sparseHeader.getNumbytes()));
  238.             }

  239.             offset = sparseHeader.getOffset() + sparseHeader.getNumbytes();
  240.         }

  241.         if (!sparseInputStreams.isEmpty()) {
  242.             currentSparseInputStreamIndex = 0;
  243.         }
  244.     }

  245.     /**
  246.      * Whether this class is able to read the given entry.
  247.      *
  248.      * @return The implementation will return true if the {@link ArchiveEntry} is an instance of {@link TarArchiveEntry}
  249.      */
  250.     @Override
  251.     public boolean canReadEntryData(final ArchiveEntry archiveEntry) {
  252.         return archiveEntry instanceof TarArchiveEntry;
  253.     }

  254.     /**
  255.      * Closes this stream. Calls the TarBuffer's close() method.
  256.      *
  257.      * @throws IOException on error
  258.      */
  259.     @Override
  260.     public void close() throws IOException {
  261.         // Close all the input streams in sparseInputStreams
  262.         if (sparseInputStreams != null) {
  263.             for (final InputStream inputStream : sparseInputStreams) {
  264.                 inputStream.close();
  265.             }
  266.         }
  267.         in.close();
  268.     }

  269.     /**
  270.      * This method is invoked once the end of the archive is hit, it tries to consume the remaining bytes under the assumption that the tool creating this
  271.      * archive has padded the last block.
  272.      */
  273.     private void consumeRemainderOfLastBlock() throws IOException {
  274.         final long bytesReadOfLastBlock = getBytesRead() % blockSize;
  275.         if (bytesReadOfLastBlock > 0) {
  276.             count(IOUtils.skip(in, blockSize - bytesReadOfLastBlock));
  277.         }
  278.     }

  279.     /**
  280.      * For FileInputStream, the skip always return the number you input, so we need the available bytes to determine how many bytes are actually skipped
  281.      *
  282.      * @param available available bytes returned by inputStream.available()
  283.      * @param skipped   skipped bytes returned by inputStream.skip()
  284.      * @param expected  bytes expected to skip
  285.      * @return number of bytes actually skipped
  286.      * @throws IOException if a truncated tar archive is detected
  287.      */
  288.     private long getActuallySkipped(final long available, final long skipped, final long expected) throws IOException {
  289.         long actuallySkipped = skipped;
  290.         if (in instanceof FileInputStream) {
  291.             actuallySkipped = Math.min(skipped, available);
  292.         }
  293.         if (actuallySkipped != expected) {
  294.             throw new IOException("Truncated TAR archive");
  295.         }
  296.         return actuallySkipped;
  297.     }

  298.     /**
  299.      * Gets the current TAR Archive Entry that this input stream is processing
  300.      *
  301.      * @return The current Archive Entry
  302.      */
  303.     public TarArchiveEntry getCurrentEntry() {
  304.         return currEntry;
  305.     }

  306.     /**
  307.      * Gets the next entry in this tar archive as long name data.
  308.      *
  309.      * @return The next entry in the archive as long name data, or null.
  310.      * @throws IOException on error
  311.      */
  312.     protected byte[] getLongNameData() throws IOException {
  313.         // read in the name
  314.         final ByteArrayOutputStream longName = new ByteArrayOutputStream();
  315.         int length = 0;
  316.         while ((length = read(smallBuf)) >= 0) {
  317.             longName.write(smallBuf, 0, length);
  318.         }
  319.         getNextEntry();
  320.         if (currEntry == null) {
  321.             // Bugzilla: 40334
  322.             // Malformed tar file - long entry name not followed by entry
  323.             return null;
  324.         }
  325.         byte[] longNameData = longName.toByteArray();
  326.         // remove trailing null terminator(s)
  327.         length = longNameData.length;
  328.         while (length > 0 && longNameData[length - 1] == 0) {
  329.             --length;
  330.         }
  331.         if (length != longNameData.length) {
  332.             longNameData = Arrays.copyOf(longNameData, length);
  333.         }
  334.         return longNameData;
  335.     }

  336.     /**
  337.      * Gets the next TarArchiveEntry in this stream.
  338.      *
  339.      * @return the next entry, or {@code null} if there are no more entries
  340.      * @throws IOException if the next entry could not be read
  341.      */
  342.     @Override
  343.     public TarArchiveEntry getNextEntry() throws IOException {
  344.         return getNextTarEntry();
  345.     }

  346.     /**
  347.      * Gets the next entry in this tar archive. This will skip over any remaining data in the current entry, if there is one, and place the input stream at the
  348.      * header of the next entry, and read the header and instantiate a new TarEntry from the header bytes and return that entry. If there are no more entries in
  349.      * the archive, null will be returned to indicate that the end of the archive has been reached.
  350.      *
  351.      * @return The next TarEntry in the archive, or null.
  352.      * @throws IOException on error
  353.      * @deprecated Use {@link #getNextEntry()}.
  354.      */
  355.     @Deprecated
  356.     public TarArchiveEntry getNextTarEntry() throws IOException {
  357.         if (isAtEOF()) {
  358.             return null;
  359.         }

  360.         if (currEntry != null) {
  361.             /* Skip will only go to the end of the current entry */
  362.             IOUtils.skip(this, Long.MAX_VALUE);

  363.             /* skip to the end of the last record */
  364.             skipRecordPadding();
  365.         }

  366.         final byte[] headerBuf = getRecord();

  367.         if (headerBuf == null) {
  368.             /* hit EOF */
  369.             currEntry = null;
  370.             return null;
  371.         }

  372.         try {
  373.             currEntry = new TarArchiveEntry(globalPaxHeaders, headerBuf, zipEncoding, lenient);
  374.         } catch (final IllegalArgumentException e) {
  375.             throw new IOException("Error detected parsing the header", e);
  376.         }

  377.         entryOffset = 0;
  378.         entrySize = currEntry.getSize();

  379.         if (currEntry.isGNULongLinkEntry()) {
  380.             final byte[] longLinkData = getLongNameData();
  381.             if (longLinkData == null) {
  382.                 // Bugzilla: 40334
  383.                 // Malformed tar file - long link entry name not followed by entry
  384.                 return null;
  385.             }
  386.             currEntry.setLinkName(zipEncoding.decode(longLinkData));
  387.         }

  388.         if (currEntry.isGNULongNameEntry()) {
  389.             final byte[] longNameData = getLongNameData();
  390.             if (longNameData == null) {
  391.                 // Bugzilla: 40334
  392.                 // Malformed tar file - long entry name not followed by entry
  393.                 return null;
  394.             }

  395.             // COMPRESS-509 : the name of directories should end with '/'
  396.             final String name = zipEncoding.decode(longNameData);
  397.             currEntry.setName(name);
  398.             if (currEntry.isDirectory() && !name.endsWith("/")) {
  399.                 currEntry.setName(name + "/");
  400.             }
  401.         }

  402.         if (currEntry.isGlobalPaxHeader()) { // Process Global Pax headers
  403.             readGlobalPaxHeaders();
  404.         }

  405.         try {
  406.             if (currEntry.isPaxHeader()) { // Process Pax headers
  407.                 paxHeaders();
  408.             } else if (!globalPaxHeaders.isEmpty()) {
  409.                 applyPaxHeadersToCurrentEntry(globalPaxHeaders, globalSparseHeaders);
  410.             }
  411.         } catch (final NumberFormatException e) {
  412.             throw new IOException("Error detected parsing the pax header", e);
  413.         }

  414.         if (currEntry.isOldGNUSparse()) { // Process sparse files
  415.             readOldGNUSparse();
  416.         }

  417.         // If the size of the next element in the archive has changed
  418.         // due to a new size being reported in the POSIX header
  419.         // information, we update entrySize here so that it contains
  420.         // the correct value.
  421.         entrySize = currEntry.getSize();

  422.         return currEntry;
  423.     }

  424.     /**
  425.      * Gets the next record in this tar archive. This will skip over any remaining data in the current entry, if there is one, and place the input stream at the
  426.      * header of the next entry.
  427.      * <p>
  428.      * If there are no more entries in the archive, null will be returned to indicate that the end of the archive has been reached. At the same time the
  429.      * {@code hasHitEOF} marker will be set to true.
  430.      * </p>
  431.      *
  432.      * @return The next header in the archive, or null.
  433.      * @throws IOException on error
  434.      */
  435.     private byte[] getRecord() throws IOException {
  436.         byte[] headerBuf = readRecord();
  437.         setAtEOF(isEOFRecord(headerBuf));
  438.         if (isAtEOF() && headerBuf != null) {
  439.             tryToConsumeSecondEOFRecord();
  440.             consumeRemainderOfLastBlock();
  441.             headerBuf = null;
  442.         }
  443.         return headerBuf;
  444.     }

  445.     /**
  446.      * Gets the record size being used by this stream's buffer.
  447.      *
  448.      * @return The TarBuffer record size.
  449.      */
  450.     public int getRecordSize() {
  451.         return recordBuffer.length;
  452.     }

  453.     protected final boolean isAtEOF() {
  454.         return atEof;
  455.     }

  456.     private boolean isDirectory() {
  457.         return currEntry != null && currEntry.isDirectory();
  458.     }

  459.     /**
  460.      * Tests if an archive record indicate End of Archive. End of archive is indicated by a record that consists entirely of null bytes.
  461.      *
  462.      * @param record The record data to check.
  463.      * @return true if the record data is an End of Archive
  464.      */
  465.     protected boolean isEOFRecord(final byte[] record) {
  466.         return record == null || ArchiveUtils.isArrayZero(record, getRecordSize());
  467.     }

  468.     /**
  469.      * Since we do not support marking just yet, we do nothing.
  470.      *
  471.      * @param markLimit The limit to mark.
  472.      */
  473.     @Override
  474.     public synchronized void mark(final int markLimit) {
  475.     }

  476.     /**
  477.      * Since we do not support marking just yet, we return false.
  478.      *
  479.      * @return false.
  480.      */
  481.     @Override
  482.     public boolean markSupported() {
  483.         return false;
  484.     }

  485.     /**
  486.      * For PAX Format 0.0, the sparse headers(GNU.sparse.offset and GNU.sparse.numbytes) may appear multi times, and they look like:
  487.      * <p>
  488.      * GNU.sparse.size=size GNU.sparse.numblocks=numblocks repeat numblocks times GNU.sparse.offset=offset GNU.sparse.numbytes=numbytes end repeat
  489.      * </p>
  490.      * <p>
  491.      * For PAX Format 0.1, the sparse headers are stored in a single variable : GNU.sparse.map
  492.      * </p>
  493.      * <p>
  494.      * GNU.sparse.map Map of non-null data chunks. It is a string consisting of comma-separated values "offset,size[,offset-1,size-1...]"
  495.      * </p>
  496.      * <p>
  497.      * For PAX Format 1.X: The sparse map itself is stored in the file data block, preceding the actual file data. It consists of a series of decimal numbers
  498.      * delimited by newlines. The map is padded with nulls to the nearest block boundary. The first number gives the number of entries in the map. Following are
  499.      * map entries, each one consisting of two numbers giving the offset and size of the data block it describes.
  500.      * </p>
  501.      *
  502.      * @throws IOException
  503.      */
  504.     private void paxHeaders() throws IOException {
  505.         List<TarArchiveStructSparse> sparseHeaders = new ArrayList<>();
  506.         final Map<String, String> headers = TarUtils.parsePaxHeaders(this, sparseHeaders, globalPaxHeaders, entrySize);

  507.         // for 0.1 PAX Headers
  508.         if (headers.containsKey(TarGnuSparseKeys.MAP)) {
  509.             sparseHeaders = new ArrayList<>(TarUtils.parseFromPAX01SparseHeaders(headers.get(TarGnuSparseKeys.MAP)));
  510.         }
  511.         getNextEntry(); // Get the actual file entry
  512.         if (currEntry == null) {
  513.             throw new IOException("premature end of tar archive. Didn't find any entry after PAX header.");
  514.         }
  515.         applyPaxHeadersToCurrentEntry(headers, sparseHeaders);

  516.         // for 1.0 PAX Format, the sparse map is stored in the file data block
  517.         if (currEntry.isPaxGNU1XSparse()) {
  518.             sparseHeaders = TarUtils.parsePAX1XSparseHeaders(in, getRecordSize());
  519.             currEntry.setSparseHeaders(sparseHeaders);
  520.         }

  521.         // sparse headers are all done reading, we need to build
  522.         // sparse input streams using these sparse headers
  523.         buildSparseInputStreams();
  524.     }

  525.     /**
  526.      * Reads bytes from the current tar archive entry.
  527.      * <p>
  528.      * This method is aware of the boundaries of the current entry in the archive and will deal with them as if they were this stream's start and EOF.
  529.      * </p>
  530.      *
  531.      * @param buf       The buffer into which to place bytes read.
  532.      * @param offset    The offset at which to place bytes read.
  533.      * @param numToRead The number of bytes to read.
  534.      * @return The number of bytes read, or -1 at EOF.
  535.      * @throws IOException on error
  536.      */
  537.     @Override
  538.     public int read(final byte[] buf, final int offset, int numToRead) throws IOException {
  539.         if (numToRead == 0) {
  540.             return 0;
  541.         }
  542.         int totalRead = 0;

  543.         if (isAtEOF() || isDirectory()) {
  544.             return -1;
  545.         }

  546.         if (currEntry == null) {
  547.             throw new IllegalStateException("No current tar entry");
  548.         }

  549.         if (entryOffset >= currEntry.getRealSize()) {
  550.             return -1;
  551.         }

  552.         numToRead = Math.min(numToRead, available());

  553.         if (currEntry.isSparse()) {
  554.             // for sparse entries, we need to read them in another way
  555.             totalRead = readSparse(buf, offset, numToRead);
  556.         } else {
  557.             totalRead = in.read(buf, offset, numToRead);
  558.         }

  559.         if (totalRead == -1) {
  560.             if (numToRead > 0) {
  561.                 throw new IOException("Truncated TAR archive");
  562.             }
  563.             setAtEOF(true);
  564.         } else {
  565.             count(totalRead);
  566.             entryOffset += totalRead;
  567.         }

  568.         return totalRead;
  569.     }

  570.     private void readGlobalPaxHeaders() throws IOException {
  571.         globalPaxHeaders = TarUtils.parsePaxHeaders(this, globalSparseHeaders, globalPaxHeaders, entrySize);
  572.         getNextEntry(); // Get the actual file entry

  573.         if (currEntry == null) {
  574.             throw new IOException("Error detected parsing the pax header");
  575.         }
  576.     }

  577.     /**
  578.      * Adds the sparse chunks from the current entry to the sparse chunks, including any additional sparse entries following the current entry.
  579.      *
  580.      * @throws IOException on error
  581.      */
  582.     private void readOldGNUSparse() throws IOException {
  583.         if (currEntry.isExtended()) {
  584.             TarArchiveSparseEntry entry;
  585.             do {
  586.                 final byte[] headerBuf = getRecord();
  587.                 if (headerBuf == null) {
  588.                     throw new IOException("premature end of tar archive. Didn't find extended_header after header with extended flag.");
  589.                 }
  590.                 entry = new TarArchiveSparseEntry(headerBuf);
  591.                 currEntry.getSparseHeaders().addAll(entry.getSparseHeaders());
  592.             } while (entry.isExtended());
  593.         }

  594.         // sparse headers are all done reading, we need to build
  595.         // sparse input streams using these sparse headers
  596.         buildSparseInputStreams();
  597.     }

  598.     /**
  599.      * Read a record from the input stream and return the data.
  600.      *
  601.      * @return The record data or null if EOF has been hit.
  602.      * @throws IOException on error
  603.      */
  604.     protected byte[] readRecord() throws IOException {
  605.         final int readCount = IOUtils.readFully(in, recordBuffer);
  606.         count(readCount);
  607.         if (readCount != getRecordSize()) {
  608.             return null;
  609.         }

  610.         return recordBuffer;
  611.     }

  612.     /**
  613.      * For sparse tar entries, there are many "holes"(consisting of all 0) in the file. Only the non-zero data is stored in tar files, and they are stored
  614.      * separately. The structure of non-zero data is introduced by the sparse headers using the offset, where a block of non-zero data starts, and numbytes, the
  615.      * length of the non-zero data block. When reading sparse entries, the actual data is read out with "holes" and non-zero data combined together according to
  616.      * the sparse headers.
  617.      *
  618.      * @param buf       The buffer into which to place bytes read.
  619.      * @param offset    The offset at which to place bytes read.
  620.      * @param numToRead The number of bytes to read.
  621.      * @return The number of bytes read, or -1 at EOF.
  622.      * @throws IOException on error
  623.      */
  624.     private int readSparse(final byte[] buf, final int offset, final int numToRead) throws IOException {
  625.         // if there are no actual input streams, just read from the original input stream
  626.         if (sparseInputStreams == null || sparseInputStreams.isEmpty()) {
  627.             return in.read(buf, offset, numToRead);
  628.         }
  629.         if (currentSparseInputStreamIndex >= sparseInputStreams.size()) {
  630.             return -1;
  631.         }
  632.         final InputStream currentInputStream = sparseInputStreams.get(currentSparseInputStreamIndex);
  633.         final int readLen = currentInputStream.read(buf, offset, numToRead);
  634.         // if the current input stream is the last input stream,
  635.         // just return the number of bytes read from current input stream
  636.         if (currentSparseInputStreamIndex == sparseInputStreams.size() - 1) {
  637.             return readLen;
  638.         }
  639.         // if EOF of current input stream is meet, open a new input stream and recursively call read
  640.         if (readLen == -1) {
  641.             currentSparseInputStreamIndex++;
  642.             return readSparse(buf, offset, numToRead);
  643.         }
  644.         // if the rest data of current input stream is not long enough, open a new input stream
  645.         // and recursively call read
  646.         if (readLen < numToRead) {
  647.             currentSparseInputStreamIndex++;
  648.             final int readLenOfNext = readSparse(buf, offset + readLen, numToRead - readLen);
  649.             if (readLenOfNext == -1) {
  650.                 return readLen;
  651.             }
  652.             return readLen + readLenOfNext;
  653.         }
  654.         // if the rest data of current input stream is enough(which means readLen == len), just return readLen
  655.         return readLen;
  656.     }

  657.     /**
  658.      * Since we do not support marking just yet, we do nothing.
  659.      */
  660.     @Override
  661.     public synchronized void reset() {
  662.     }

  663.     protected final void setAtEOF(final boolean atEof) {
  664.         this.atEof = atEof;
  665.     }

  666.     protected final void setCurrentEntry(final TarArchiveEntry currEntry) {
  667.         this.currEntry = currEntry;
  668.     }

  669.     /**
  670.      * Skips over and discards {@code n} bytes of data from this input stream. The {@code skip} method may, for a variety of reasons, end up skipping over some
  671.      * smaller number of bytes, possibly {@code 0}. This may result from any of a number of conditions; reaching end of file or end of entry before {@code n}
  672.      * bytes have been skipped; are only two possibilities. The actual number of bytes skipped is returned. If {@code n} is negative, no bytes are skipped.
  673.      *
  674.      * @param n the number of bytes to be skipped.
  675.      * @return the actual number of bytes skipped.
  676.      * @throws IOException if a truncated tar archive is detected or some other I/O error occurs
  677.      */
  678.     @Override
  679.     public long skip(final long n) throws IOException {
  680.         if (n <= 0 || isDirectory()) {
  681.             return 0;
  682.         }

  683.         final long availableOfInputStream = in.available();
  684.         final long available = currEntry.getRealSize() - entryOffset;
  685.         final long numToSkip = Math.min(n, available);
  686.         long skipped;

  687.         if (!currEntry.isSparse()) {
  688.             skipped = IOUtils.skip(in, numToSkip);
  689.             // for non-sparse entry, we should get the bytes actually skipped bytes along with
  690.             // inputStream.available() if inputStream is instance of FileInputStream
  691.             skipped = getActuallySkipped(availableOfInputStream, skipped, numToSkip);
  692.         } else {
  693.             skipped = skipSparse(numToSkip);
  694.         }

  695.         count(skipped);
  696.         entryOffset += skipped;
  697.         return skipped;
  698.     }

  699.     /**
  700.      * The last record block should be written at the full size, so skip any additional space used to fill a record after an entry.
  701.      *
  702.      * @throws IOException if a truncated tar archive is detected
  703.      */
  704.     private void skipRecordPadding() throws IOException {
  705.         if (!isDirectory() && this.entrySize > 0 && this.entrySize % getRecordSize() != 0) {
  706.             final long available = in.available();
  707.             final long numRecords = this.entrySize / getRecordSize() + 1;
  708.             final long padding = numRecords * getRecordSize() - this.entrySize;
  709.             long skipped = IOUtils.skip(in, padding);

  710.             skipped = getActuallySkipped(available, skipped, padding);

  711.             count(skipped);
  712.         }
  713.     }

  714.     /**
  715.      * Skip n bytes from current input stream, if the current input stream doesn't have enough data to skip, jump to the next input stream and skip the rest
  716.      * bytes, keep doing this until total n bytes are skipped or the input streams are all skipped
  717.      *
  718.      * @param n bytes of data to skip
  719.      * @return actual bytes of data skipped
  720.      * @throws IOException
  721.      */
  722.     private long skipSparse(final long n) throws IOException {
  723.         if (sparseInputStreams == null || sparseInputStreams.isEmpty()) {
  724.             return in.skip(n);
  725.         }
  726.         long bytesSkipped = 0;
  727.         while (bytesSkipped < n && currentSparseInputStreamIndex < sparseInputStreams.size()) {
  728.             final InputStream currentInputStream = sparseInputStreams.get(currentSparseInputStreamIndex);
  729.             bytesSkipped += currentInputStream.skip(n - bytesSkipped);
  730.             if (bytesSkipped < n) {
  731.                 currentSparseInputStreamIndex++;
  732.             }
  733.         }
  734.         return bytesSkipped;
  735.     }

  736.     /**
  737.      * Tries to read the next record rewinding the stream if it is not an EOF record.
  738.      * <p>
  739.      * This is meant to protect against cases where a tar implementation has written only one EOF record when two are expected. Actually this won't help since a
  740.      * non-conforming implementation likely won't fill full blocks consisting of - by default - ten records either so we probably have already read beyond the
  741.      * archive anyway.
  742.      * </p>
  743.      */
  744.     private void tryToConsumeSecondEOFRecord() throws IOException {
  745.         boolean shouldReset = true;
  746.         final boolean marked = in.markSupported();
  747.         if (marked) {
  748.             in.mark(getRecordSize());
  749.         }
  750.         try {
  751.             shouldReset = !isEOFRecord(readRecord());
  752.         } finally {
  753.             if (shouldReset && marked) {
  754.                 pushedBackBytes(getRecordSize());
  755.                 in.reset();
  756.             }
  757.         }
  758.     }
  759. }