001/* 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * https://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, 013 * software distributed under the License is distributed on an 014 * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 015 * KIND, either express or implied. See the License for the 016 * specific language governing permissions and limitations 017 * under the License. 018 */ 019package org.apache.commons.compress.archivers.zip; 020 021import static org.apache.commons.compress.archivers.zip.ZipConstants.DWORD; 022import static org.apache.commons.compress.archivers.zip.ZipConstants.SHORT; 023import static org.apache.commons.compress.archivers.zip.ZipConstants.WORD; 024import static org.apache.commons.compress.archivers.zip.ZipConstants.ZIP64_MAGIC; 025 026import java.io.ByteArrayInputStream; 027import java.io.ByteArrayOutputStream; 028import java.io.EOFException; 029import java.io.IOException; 030import java.io.InputStream; 031import java.io.PushbackInputStream; 032import java.math.BigInteger; 033import java.nio.ByteBuffer; 034import java.nio.charset.StandardCharsets; 035import java.util.Arrays; 036import java.util.Objects; 037import java.util.function.Function; 038import java.util.zip.CRC32; 039import java.util.zip.DataFormatException; 040import java.util.zip.Inflater; 041import java.util.zip.ZipEntry; 042import java.util.zip.ZipException; 043 044import org.apache.commons.compress.archivers.ArchiveEntry; 045import org.apache.commons.compress.archivers.ArchiveInputStream; 046import org.apache.commons.compress.compressors.bzip2.BZip2CompressorInputStream; 047import org.apache.commons.compress.compressors.deflate64.Deflate64CompressorInputStream; 048import org.apache.commons.compress.compressors.zstandard.ZstdCompressorInputStream; 049import org.apache.commons.compress.utils.ArchiveUtils; 050import org.apache.commons.compress.utils.IOUtils; 051import org.apache.commons.compress.utils.InputStreamStatistics; 052import org.apache.commons.io.input.BoundedInputStream; 053 054/** 055 * Implements an input stream that can read Zip archives. 056 * <p> 057 * As of Apache Commons Compress it transparently supports Zip64 extensions and thus individual entries and archives larger than 4 GB or with more than 65,536 058 * entries. 059 * </p> 060 * <p> 061 * The {@link ZipFile} class is preferred when reading from files as {@link ZipArchiveInputStream} is limited by not being able to read the central directory 062 * header before returning entries. In particular {@link ZipArchiveInputStream} 063 * </p> 064 * <ul> 065 * <li>may return entries that are not part of the central directory at all and shouldn't be considered part of the archive.</li> 066 * <li>may return several entries with the same name.</li> 067 * <li>will not return internal or external attributes.</li> 068 * <li>may return incomplete extra field data.</li> 069 * <li>may return unknown sizes and CRC values for entries until the next entry has been reached if the archive uses the data descriptor feature.</li> 070 * </ul> 071 * 072 * @see ZipFile 073 * @NotThreadSafe 074 */ 075public class ZipArchiveInputStream extends ArchiveInputStream<ZipArchiveEntry> implements InputStreamStatistics { 076 077 /** 078 * Input stream adapted from commons-io. 079 */ 080 private final class BoundCountInputStream extends BoundedInputStream { 081 082 // TODO Consider how to do this from a final class, an IO class, or basically without the current side-effect implementation. 083 084 /** 085 * Creates a new {@code BoundedInputStream} that wraps the given input stream and limits it to a certain size. 086 * 087 * @param in The wrapped input stream 088 * @param max The maximum number of bytes to return 089 */ 090 BoundCountInputStream(final InputStream in, final long max) { 091 super(in, max); 092 } 093 094 private boolean atMaxLength() { 095 return getMaxCount() >= 0 && getCount() >= getMaxCount(); 096 } 097 098 @Override 099 public int read() throws IOException { 100 if (atMaxLength()) { 101 return -1; 102 } 103 final int result = super.read(); 104 if (result != -1) { 105 readCount(1); 106 } 107 return result; 108 } 109 110 @Override 111 public int read(final byte[] b, final int off, final int len) throws IOException { 112 if (len == 0) { 113 return 0; 114 } 115 if (atMaxLength()) { 116 return -1; 117 } 118 final long maxRead = getMaxCount() >= 0 ? Math.min(len, getMaxCount() - getCount()) : len; 119 return readCount(super.read(b, off, (int) maxRead)); 120 } 121 122 private int readCount(final int bytesRead) { 123 if (bytesRead != -1) { 124 count(bytesRead); 125 current.bytesReadFromStream += bytesRead; 126 } 127 return bytesRead; 128 } 129 130 } 131 132 /** 133 * Structure collecting information for the entry that is currently being read. 134 */ 135 private static final class CurrentEntry { 136 137 /** 138 * Current ZIP entry. 139 */ 140 private final ZipArchiveEntry entry = new ZipArchiveEntry(); 141 142 /** 143 * Does the entry use a data descriptor? 144 */ 145 private boolean hasDataDescriptor; 146 147 /** 148 * Does the entry have a ZIP64 extended information extra field. 149 */ 150 private boolean usesZip64; 151 152 /** 153 * Number of bytes of entry content read by the client if the entry is STORED. 154 */ 155 private long bytesRead; 156 157 /** 158 * Number of bytes of entry content read from the stream. 159 * <p> 160 * This may be more than the actual entry's length as some stuff gets buffered up and needs to be pushed back when the end of the entry has been 161 * reached. 162 * </p> 163 */ 164 private long bytesReadFromStream; 165 166 /** 167 * The checksum calculated as the current entry is read. 168 */ 169 private final CRC32 crc = new CRC32(); 170 171 /** 172 * The input stream decompressing the data for shrunk and imploded entries. 173 */ 174 private InputStream inputStream; 175 176 @SuppressWarnings("unchecked") // Caller beware 177 private <T extends InputStream> T checkInputStream() { 178 return (T) Objects.requireNonNull(inputStream, "inputStream"); 179 } 180 } 181 182 /** 183 * Maximum size of data in the first local file header. 184 */ 185 public static final int PREAMBLE_GARBAGE_MAX_SIZE = 4096; 186 187 private static final int LFH_LEN = 30; 188 189 /* 190 * local file header signature WORD version needed to extract SHORT general purpose bit flag SHORT compression method SHORT last mod file time SHORT last 191 * mod file date SHORT CRC-32 WORD compressed size WORD uncompressed size WORD file name length SHORT extra field length SHORT 192 */ 193 private static final int CFH_LEN = 46; 194 195 /* 196 * central file header signature WORD version made by SHORT version needed to extract SHORT general purpose bit flag SHORT compression method SHORT last mod 197 * file time SHORT last mod file date SHORT CRC-32 WORD compressed size WORD uncompressed size WORD file name length SHORT extra field length SHORT file 198 * comment length SHORT disk number start SHORT internal file attributes SHORT external file attributes WORD relative offset of local header WORD 199 */ 200 private static final long TWO_EXP_32 = ZIP64_MAGIC + 1; 201 202 private static final String USE_ZIPFILE_INSTEAD_OF_STREAM_DISCLAIMER = " while reading a stored entry using data descriptor. Either the archive is broken" 203 + " or it cannot be read using ZipArchiveInputStream and you must use ZipFile." 204 + " A common cause for this is a ZIP archive containing a ZIP archive." 205 + " See https://commons.apache.org/proper/commons-compress/zip.html#ZipArchiveInputStream_vs_ZipFile"; 206 207 private static final byte[] LFH = ZipLong.LFH_SIG.getBytes(); 208 209 private static final byte[] CFH = ZipLong.CFH_SIG.getBytes(); 210 211 private static final byte[] DD = ZipLong.DD_SIG.getBytes(); 212 213 private static final byte[] APK_SIGNING_BLOCK_MAGIC = { 'A', 'P', 'K', ' ', 'S', 'i', 'g', ' ', 'B', 'l', 'o', 'c', 'k', ' ', '4', '2', }; 214 215 private static final BigInteger LONG_MAX = BigInteger.valueOf(Long.MAX_VALUE); 216 217 private static boolean checkSig(final byte[] expected, final byte[] signature) { 218 for (int i = 0; i < expected.length; i++) { 219 if (signature[i] != expected[i]) { 220 return false; 221 } 222 } 223 return true; 224 } 225 226 /** 227 * Checks if the signature matches what is expected for a ZIP file. Does not currently handle self-extracting ZIPs which may have arbitrary leading content. 228 * 229 * @param signature the bytes to check 230 * @param length the number of bytes to check 231 * @return true, if this stream is a ZIP archive stream, false otherwise 232 */ 233 public static boolean matches(final byte[] signature, final int length) { 234 if (length < ZipArchiveOutputStream.LFH_SIG.length) { 235 return false; 236 } 237 238 return checkSig(ZipArchiveOutputStream.LFH_SIG, signature) // normal file 239 || checkSig(ZipArchiveOutputStream.EOCD_SIG, signature) // empty zip 240 || checkSig(ZipArchiveOutputStream.DD_SIG, signature) // split zip 241 || checkSig(ZipLong.SINGLE_SEGMENT_SPLIT_MARKER.getBytes(), signature); 242 } 243 244 /** The ZIP encoding to use for file names and the file comment. */ 245 private final ZipEncoding zipEncoding; 246 247 /** Whether to look for and use Unicode extra fields. */ 248 private final boolean useUnicodeExtraFields; 249 250 /** Inflater used for all deflated entries. */ 251 private final Inflater inf = new Inflater(true); 252 253 /** Buffer used to read from the wrapped stream. */ 254 private final ByteBuffer buf = ByteBuffer.allocate(ZipArchiveOutputStream.BUFFER_SIZE); 255 256 /** The entry that is currently being read. */ 257 private CurrentEntry current; 258 259 /** Whether the stream has been closed. */ 260 private boolean closed; 261 262 /** Whether the stream has reached the central directory - and thus found all entries. */ 263 private boolean hitCentralDirectory; 264 265 /** 266 * When reading a stored entry that uses the data descriptor this stream has to read the full entry and caches it. This is the cache. 267 */ 268 private ByteArrayInputStream lastStoredEntry; 269 270 /** 271 * Whether the stream will try to read STORED entries that use a data descriptor. Setting it to true means we will not stop reading an entry with the 272 * compressed size, instead we will stop reading an entry when a data descriptor is met (by finding the Data Descriptor Signature). This will completely 273 * break down in some cases - like JARs in WARs. 274 * <p> 275 * See also : https://issues.apache.org/jira/projects/COMPRESS/issues/COMPRESS-555 276 * https://github.com/apache/commons-compress/pull/137#issuecomment-690835644 277 * </p> 278 */ 279 private final boolean allowStoredEntriesWithDataDescriptor; 280 281 /** Count decompressed bytes for current entry */ 282 private long uncompressedCount; 283 284 /** Whether the stream will try to skip the ZIP split signature(08074B50) at the beginning **/ 285 private final boolean skipSplitSig; 286 287 /** Cached buffers - must only be used locally in the class (COMPRESS-172 - reduce garbage collection). */ 288 private final byte[] lfhBuf = new byte[LFH_LEN]; 289 290 private final byte[] skipBuf = new byte[1024]; 291 292 private final byte[] shortBuf = new byte[SHORT]; 293 294 private final byte[] wordBuf = new byte[WORD]; 295 296 private final byte[] twoDwordBuf = new byte[2 * DWORD]; 297 298 private int entriesRead; 299 300 /** 301 * The factory for extra fields or null. 302 */ 303 // private Function<ZipShort, ZipExtraField> extraFieldSupport; 304 305 /** 306 * Constructs an instance using UTF-8 encoding 307 * 308 * @param inputStream the stream to wrap 309 */ 310 public ZipArchiveInputStream(final InputStream inputStream) { 311 this(inputStream, StandardCharsets.UTF_8.name()); 312 } 313 314 /** 315 * Constructs an instance using the specified encoding 316 * 317 * @param inputStream the stream to wrap 318 * @param encoding the encoding to use for file names, use null for the platform's default encoding 319 * @since 1.5 320 */ 321 public ZipArchiveInputStream(final InputStream inputStream, final String encoding) { 322 this(inputStream, encoding, true); 323 } 324 325 /** 326 * Constructs an instance using the specified encoding 327 * 328 * @param inputStream the stream to wrap 329 * @param encoding the encoding to use for file names, use null for the platform's default encoding 330 * @param useUnicodeExtraFields whether to use InfoZIP Unicode Extra Fields (if present) to set the file names. 331 */ 332 public ZipArchiveInputStream(final InputStream inputStream, final String encoding, final boolean useUnicodeExtraFields) { 333 this(inputStream, encoding, useUnicodeExtraFields, false); 334 } 335 336 /** 337 * Constructs an instance using the specified encoding 338 * 339 * @param inputStream the stream to wrap 340 * @param encoding the encoding to use for file names, use null for the platform's default encoding 341 * @param useUnicodeExtraFields whether to use InfoZIP Unicode Extra Fields (if present) to set the file names. 342 * @param allowStoredEntriesWithDataDescriptor whether the stream will try to read STORED entries that use a data descriptor 343 * @since 1.1 344 */ 345 public ZipArchiveInputStream(final InputStream inputStream, final String encoding, final boolean useUnicodeExtraFields, 346 final boolean allowStoredEntriesWithDataDescriptor) { 347 this(inputStream, encoding, useUnicodeExtraFields, allowStoredEntriesWithDataDescriptor, false); 348 } 349 350 /** 351 * Constructs an instance using the specified encoding 352 * 353 * @param inputStream the stream to wrap 354 * @param encoding the encoding to use for file names, use null for the platform's default encoding 355 * @param useUnicodeExtraFields whether to use InfoZIP Unicode Extra Fields (if present) to set the file names. 356 * @param allowStoredEntriesWithDataDescriptor whether the stream will try to read STORED entries that use a data descriptor 357 * @param skipSplitSig Whether the stream will try to skip the zip split signature(08074B50) at the beginning. You will need to set 358 * this to true if you want to read a split archive. 359 * @since 1.20 360 */ 361 public ZipArchiveInputStream(final InputStream inputStream, final String encoding, final boolean useUnicodeExtraFields, 362 final boolean allowStoredEntriesWithDataDescriptor, final boolean skipSplitSig) { 363 super(inputStream, encoding); 364 this.in = new PushbackInputStream(inputStream, buf.capacity()); 365 this.zipEncoding = ZipEncodingHelper.getZipEncoding(encoding); 366 this.useUnicodeExtraFields = useUnicodeExtraFields; 367 this.allowStoredEntriesWithDataDescriptor = allowStoredEntriesWithDataDescriptor; 368 this.skipSplitSig = skipSplitSig; 369 // haven't read anything so far 370 buf.limit(0); 371 } 372 373 /** 374 * Checks whether the current buffer contains the signature of a "data descriptor", "local file header" or "central directory 375 * entry". 376 * <p> 377 * If it contains such a signature, reads the data descriptor and positions the stream right after the data descriptor. 378 * </p> 379 */ 380 private boolean bufferContainsSignature(final ByteArrayOutputStream bos, final int offset, final int lastRead, final int expectedDDLen) throws IOException { 381 382 boolean done = false; 383 for (int i = 0; !done && i < offset + lastRead - 4; i++) { 384 if (buf.array()[i] == LFH[0] && buf.array()[i + 1] == LFH[1]) { 385 int expectDDPos = i; 386 if (i >= expectedDDLen && buf.array()[i + 2] == LFH[2] && buf.array()[i + 3] == LFH[3] 387 || buf.array()[i + 2] == CFH[2] && buf.array()[i + 3] == CFH[3]) { 388 // found an LFH or CFH: 389 expectDDPos = i - expectedDDLen; 390 done = true; 391 } else if (buf.array()[i + 2] == DD[2] && buf.array()[i + 3] == DD[3]) { 392 // found DD: 393 done = true; 394 } 395 if (done) { 396 // * push back bytes read in excess as well as the data 397 // descriptor 398 // * copy the remaining bytes to cache 399 // * read data descriptor 400 pushback(buf.array(), expectDDPos, offset + lastRead - expectDDPos); 401 bos.write(buf.array(), 0, expectDDPos); 402 readDataDescriptor(); 403 } 404 } 405 } 406 return done; 407 } 408 409 /** 410 * If the last read bytes could hold a data descriptor and an incomplete signature then save the last bytes to the front of the buffer and cache everything 411 * in front of the potential data descriptor into the given ByteArrayOutputStream. 412 * <p> 413 * Data descriptor plus incomplete signature (3 bytes in the worst case) can be 20 bytes max. 414 * </p> 415 */ 416 private int cacheBytesRead(final ByteArrayOutputStream bos, int offset, final int lastRead, final int expectedDDLen) { 417 final int cacheable = offset + lastRead - expectedDDLen - 3; 418 if (cacheable > 0) { 419 bos.write(buf.array(), 0, cacheable); 420 System.arraycopy(buf.array(), cacheable, buf.array(), 0, expectedDDLen + 3); 421 offset = expectedDDLen + 3; 422 } else { 423 offset += lastRead; 424 } 425 return offset; 426 } 427 428 /** 429 * Tests whether this class is able to read the given entry. 430 * <p> 431 * May return false if it is set up to use encryption or a compression method that hasn't been implemented yet. 432 * </p> 433 * 434 * @since 1.1 435 */ 436 @Override 437 public boolean canReadEntryData(final ArchiveEntry ae) { 438 if (ae instanceof ZipArchiveEntry) { 439 final ZipArchiveEntry ze = (ZipArchiveEntry) ae; 440 return ZipUtil.canHandleEntryData(ze) && supportsDataDescriptorFor(ze) && supportsCompressedSizeFor(ze); 441 } 442 return false; 443 } 444 445 @Override 446 public void close() throws IOException { 447 if (!closed) { 448 closed = true; 449 try { 450 in.close(); 451 } finally { 452 inf.end(); 453 } 454 } 455 } 456 457 /** 458 * Closes the current ZIP archive entry and positions the underlying stream to the beginning of the next entry. All per-entry variables and data structures 459 * are cleared. 460 * <p> 461 * If the compressed size of this entry is included in the entry header, then any outstanding bytes are simply skipped from the underlying stream without 462 * uncompressing them. This allows an entry to be safely closed even if the compression method is unsupported. 463 * </p> 464 * <p> 465 * In case we don't know the compressed size of this entry or have already buffered too much data from the underlying stream to support uncompression, then 466 * the uncompression process is completed and the end position of the stream is adjusted based on the result of that process. 467 * </p> 468 * 469 * @throws IOException if an error occurs 470 */ 471 private void closeEntry() throws IOException { 472 if (closed) { 473 throw new IOException("The stream is closed"); 474 } 475 if (current == null) { 476 return; 477 } 478 479 // Ensure all entry bytes are read 480 if (currentEntryHasOutstandingBytes()) { 481 drainCurrentEntryData(); 482 } else { 483 // this is guaranteed to exhaust the stream 484 if (skip(Long.MAX_VALUE) < 0) { 485 throw new IllegalStateException("Can't read the remainder of the stream"); 486 } 487 488 final long inB = current.entry.getMethod() == ZipArchiveOutputStream.DEFLATED ? getBytesInflated() : current.bytesRead; 489 490 // this is at most a single read() operation and can't 491 // exceed the range of int 492 final int diff = (int) (current.bytesReadFromStream - inB); 493 494 // Pushback any required bytes 495 if (diff > 0) { 496 pushback(buf.array(), buf.limit() - diff, diff); 497 current.bytesReadFromStream -= diff; 498 } 499 500 // Drain remainder of entry if not all data bytes were required 501 if (currentEntryHasOutstandingBytes()) { 502 drainCurrentEntryData(); 503 } 504 } 505 506 if (lastStoredEntry == null && current.hasDataDescriptor) { 507 readDataDescriptor(); 508 } 509 510 inf.reset(); 511 buf.clear().flip(); 512 current = null; 513 lastStoredEntry = null; 514 } 515 516 /** 517 * Creates the appropriate InputStream for the Zstd compression method. 518 * 519 * @param in the input stream which should be used for compression. 520 * @return the {@link InputStream} for handling the Zstd compression. 521 * @throws IOException if an I/O error occurs. 522 * @since 1.28.0 523 */ 524 protected InputStream createZstdInputStream(final InputStream in) throws IOException { 525 return new ZstdCompressorInputStream(in); 526 } 527 528 /** 529 * If the compressed size of the current entry is included in the entry header and there are any outstanding bytes in the underlying stream, then this 530 * returns true. 531 * 532 * @return true, if current entry is determined to have outstanding bytes, false otherwise 533 */ 534 private boolean currentEntryHasOutstandingBytes() { 535 return current.bytesReadFromStream <= current.entry.getCompressedSize() && !current.hasDataDescriptor; 536 } 537 538 /** 539 * Reads all data of the current entry from the underlying stream that hasn't been read, yet. 540 */ 541 private void drainCurrentEntryData() throws IOException { 542 long remaining = current.entry.getCompressedSize() - current.bytesReadFromStream; 543 while (remaining > 0) { 544 final long n = in.read(buf.array(), 0, (int) Math.min(buf.capacity(), remaining)); 545 if (n < 0) { 546 throw new EOFException("Truncated ZIP entry: " + ArchiveUtils.sanitize(current.entry.getName())); 547 } 548 count(n); 549 remaining -= n; 550 } 551 } 552 553 private int fill() throws IOException { 554 if (closed) { 555 throw new IOException("The stream is closed"); 556 } 557 final int length = in.read(buf.array()); 558 if (length > 0) { 559 buf.limit(length); 560 count(buf.limit()); 561 inf.setInput(buf.array(), 0, buf.limit()); 562 } 563 return length; 564 } 565 566 /** 567 * Reads forward until the signature of the "End of central directory" record is found. 568 */ 569 private boolean findEocdRecord() throws IOException { 570 int currentByte = -1; 571 boolean skipReadCall = false; 572 while (skipReadCall || (currentByte = readOneByte()) > -1) { 573 skipReadCall = false; 574 if (!isFirstByteOfEocdSig(currentByte)) { 575 continue; 576 } 577 currentByte = readOneByte(); 578 if (currentByte != ZipArchiveOutputStream.EOCD_SIG[1]) { 579 if (currentByte == -1) { 580 break; 581 } 582 skipReadCall = isFirstByteOfEocdSig(currentByte); 583 continue; 584 } 585 currentByte = readOneByte(); 586 if (currentByte != ZipArchiveOutputStream.EOCD_SIG[2]) { 587 if (currentByte == -1) { 588 break; 589 } 590 skipReadCall = isFirstByteOfEocdSig(currentByte); 591 continue; 592 } 593 currentByte = readOneByte(); 594 if (currentByte == -1) { 595 break; 596 } 597 if (currentByte == ZipArchiveOutputStream.EOCD_SIG[3]) { 598 return true; 599 } 600 skipReadCall = isFirstByteOfEocdSig(currentByte); 601 } 602 return false; 603 } 604 605 /** 606 * Gets the number of bytes Inflater has actually processed. 607 * <p> 608 * for Java < Java7 the getBytes* methods in Inflater/Deflater seem to return unsigned ints rather than longs that start over with 0 at 2^32. 609 * </p> 610 * <p> 611 * The stream knows how many bytes it has read, but not how many the Inflater actually consumed - it should be between the total number of bytes read for 612 * the entry and the total number minus the last read operation. Here we just try to make the value close enough to the bytes we've read by assuming the 613 * number of bytes consumed must be smaller than (or equal to) the number of bytes read but not smaller by more than 2^32. 614 * </p> 615 */ 616 private long getBytesInflated() { 617 long inB = inf.getBytesRead(); 618 if (current.bytesReadFromStream >= TWO_EXP_32) { 619 while (inB + TWO_EXP_32 <= current.bytesReadFromStream) { 620 inB += TWO_EXP_32; 621 } 622 } 623 return inB; 624 } 625 626 /** 627 * @since 1.17 628 */ 629 @SuppressWarnings("resource") // checkInputStream() does not allocate. 630 @Override 631 public long getCompressedCount() { 632 if (current == null) { 633 return -1; 634 } 635 final int method = current.entry.getMethod(); 636 if (method == ZipArchiveOutputStream.STORED) { 637 return current.bytesRead; 638 } 639 if (method == ZipArchiveOutputStream.DEFLATED) { 640 return getBytesInflated(); 641 } 642 if (method == ZipMethod.UNSHRINKING.getCode() || method == ZipMethod.IMPLODING.getCode() || method == ZipMethod.ENHANCED_DEFLATED.getCode() 643 || method == ZipMethod.BZIP2.getCode()) { 644 return ((InputStreamStatistics) current.checkInputStream()).getCompressedCount(); 645 } 646 return -1; 647 } 648 649 @Override 650 public ZipArchiveEntry getNextEntry() throws IOException { 651 return getNextZipEntry(); 652 } 653 654 /** 655 * Gets the next entry. 656 * 657 * @return the next entry. 658 * @throws IOException if an I/O error occurs. 659 * @deprecated Use {@link #getNextEntry()}. 660 */ 661 @Deprecated 662 public ZipArchiveEntry getNextZipEntry() throws IOException { 663 uncompressedCount = 0; 664 665 boolean firstEntry = true; 666 if (closed || hitCentralDirectory) { 667 return null; 668 } 669 if (current != null) { 670 closeEntry(); 671 firstEntry = false; 672 } 673 674 final long currentHeaderOffset = getBytesRead(); 675 try { 676 if (firstEntry) { 677 // split archives have a special signature before the 678 // first local file header - look for it and fail with 679 // the appropriate error message if this is a split 680 // archive. 681 if (!readFirstLocalFileHeader()) { 682 hitCentralDirectory = true; 683 skipRemainderOfArchive(); 684 return null; 685 } 686 } else { 687 readFully(lfhBuf); 688 } 689 } catch (final EOFException e) { // NOSONAR 690 return null; 691 } 692 693 final ZipLong sig = new ZipLong(lfhBuf); 694 if (!sig.equals(ZipLong.LFH_SIG)) { 695 if (sig.equals(ZipLong.CFH_SIG) || sig.equals(ZipLong.AED_SIG) || isApkSigningBlock(lfhBuf)) { 696 hitCentralDirectory = true; 697 skipRemainderOfArchive(); 698 return null; 699 } 700 throw new ZipException(String.format("Unexpected record signature: 0x%x", sig.getValue())); 701 } 702 // off: go past the signature 703 int off = WORD; 704 current = new CurrentEntry(); 705 // get version 706 final int versionMadeBy = ZipShort.getValue(lfhBuf, off); 707 off += SHORT; 708 current.entry.setPlatform(ZipFile.toPlatform(versionMadeBy)); 709 710 final GeneralPurposeBit gpFlag = GeneralPurposeBit.parse(lfhBuf, off); 711 final boolean hasUTF8Flag = gpFlag.usesUTF8ForNames(); 712 final ZipEncoding entryEncoding = hasUTF8Flag ? ZipEncodingHelper.ZIP_ENCODING_UTF_8 : zipEncoding; 713 current.hasDataDescriptor = gpFlag.usesDataDescriptor(); 714 current.entry.setGeneralPurposeBit(gpFlag); 715 716 off += SHORT; 717 718 current.entry.setMethod(ZipShort.getValue(lfhBuf, off)); 719 off += SHORT; 720 721 final long time = ZipUtil.dosToJavaTime(ZipLong.getValue(lfhBuf, off)); 722 current.entry.setTime(time); 723 off += WORD; 724 725 ZipLong size = null; 726 ZipLong cSize = null; 727 if (!current.hasDataDescriptor) { 728 current.entry.setCrc(ZipLong.getValue(lfhBuf, off)); 729 off += WORD; 730 731 cSize = new ZipLong(lfhBuf, off); 732 off += WORD; 733 734 size = new ZipLong(lfhBuf, off); 735 off += WORD; 736 } else { 737 off += 3 * WORD; 738 } 739 740 final int fileNameLen = ZipShort.getValue(lfhBuf, off); 741 742 off += SHORT; 743 744 final int extraLen = ZipShort.getValue(lfhBuf, off); 745 off += SHORT; // NOSONAR - assignment as documentation 746 747 final byte[] fileName = readRange(fileNameLen); 748 current.entry.setName(entryEncoding.decode(fileName), fileName); 749 if (hasUTF8Flag) { 750 current.entry.setNameSource(ZipArchiveEntry.NameSource.NAME_WITH_EFS_FLAG); 751 } 752 753 final byte[] extraData = readRange(extraLen); 754 try { 755 current.entry.setExtra(extraData); 756 } catch (final RuntimeException ex) { 757 throw ZipUtil.newZipException("Invalid extra data in entry " + current.entry.getName(), ex); 758 } 759 760 if (!hasUTF8Flag && useUnicodeExtraFields) { 761 ZipUtil.setNameAndCommentFromExtraFields(current.entry, fileName, null); 762 } 763 764 processZip64Extra(size, cSize); 765 766 current.entry.setLocalHeaderOffset(currentHeaderOffset); 767 current.entry.setDataOffset(getBytesRead()); 768 current.entry.setStreamContiguous(true); 769 770 final ZipMethod m = ZipMethod.getMethodByCode(current.entry.getMethod()); 771 if (current.entry.getCompressedSize() != ArchiveEntry.SIZE_UNKNOWN) { 772 if (ZipUtil.canHandleEntryData(current.entry) && m != ZipMethod.STORED && m != ZipMethod.DEFLATED) { 773 final InputStream bis = new BoundCountInputStream(in, current.entry.getCompressedSize()); 774 switch (m) { 775 case UNSHRINKING: 776 current.inputStream = new UnshrinkingInputStream(bis); 777 break; 778 case IMPLODING: 779 try { 780 current.inputStream = new ExplodingInputStream(current.entry.getGeneralPurposeBit().getSlidingDictionarySize(), 781 current.entry.getGeneralPurposeBit().getNumberOfShannonFanoTrees(), bis); 782 } catch (final IllegalArgumentException ex) { 783 throw new IOException("bad IMPLODE data", ex); 784 } 785 break; 786 case BZIP2: 787 current.inputStream = new BZip2CompressorInputStream(bis); 788 break; 789 case ENHANCED_DEFLATED: 790 current.inputStream = new Deflate64CompressorInputStream(bis); 791 break; 792 case ZSTD: 793 case ZSTD_DEPRECATED: 794 current.inputStream = createZstdInputStream(bis); 795 break; 796 default: 797 // we should never get here as all supported methods have been covered 798 // will cause an error when read is invoked, don't throw an exception here so people can 799 // skip unsupported entries 800 break; 801 } 802 } 803 } else if (m == ZipMethod.ENHANCED_DEFLATED) { 804 current.inputStream = new Deflate64CompressorInputStream(in); 805 } 806 807 entriesRead++; 808 return current.entry; 809 } 810 811 /** 812 * Gets the uncompressed count. 813 * 814 * @since 1.17 815 */ 816 @Override 817 public long getUncompressedCount() { 818 return uncompressedCount; 819 } 820 821 /** 822 * Checks whether this might be an APK Signing Block. 823 * <p> 824 * Unfortunately the APK signing block does not start with some kind of signature, it rather ends with one. It starts with a length, so what we do is parse 825 * the suspect length, skip ahead far enough, look for the signature and if we've found it, return true. 826 * </p> 827 * 828 * @param suspectLocalFileHeader the bytes read from the underlying stream in the expectation that they would hold the local file header of the next entry. 829 * @return true if this looks like an APK signing block 830 * @see <a href="https://source.android.com/security/apksigning/v2">https://source.android.com/security/apksigning/v2</a> 831 */ 832 private boolean isApkSigningBlock(final byte[] suspectLocalFileHeader) throws IOException { 833 // length of block excluding the size field itself 834 final BigInteger len = ZipEightByteInteger.getValue(suspectLocalFileHeader); 835 // LFH has already been read and all but the first eight bytes contain (part of) the APK signing block, 836 // also subtract 16 bytes in order to position us at the magic string 837 BigInteger toSkip = len.add(BigInteger.valueOf(DWORD - suspectLocalFileHeader.length - (long) APK_SIGNING_BLOCK_MAGIC.length)); 838 final byte[] magic = new byte[APK_SIGNING_BLOCK_MAGIC.length]; 839 840 try { 841 if (toSkip.signum() < 0) { 842 // suspectLocalFileHeader contains the start of suspect magic string 843 final int off = suspectLocalFileHeader.length + toSkip.intValue(); 844 // length was shorter than magic length 845 if (off < DWORD) { 846 return false; 847 } 848 final int bytesInBuffer = Math.abs(toSkip.intValue()); 849 System.arraycopy(suspectLocalFileHeader, off, magic, 0, Math.min(bytesInBuffer, magic.length)); 850 if (bytesInBuffer < magic.length) { 851 readFully(magic, bytesInBuffer); 852 } 853 } else { 854 while (toSkip.compareTo(LONG_MAX) > 0) { 855 realSkip(Long.MAX_VALUE); 856 toSkip = toSkip.add(LONG_MAX.negate()); 857 } 858 realSkip(toSkip.longValue()); 859 readFully(magic); 860 } 861 } catch (final EOFException ex) { // NOSONAR 862 // length was invalid 863 return false; 864 } 865 return Arrays.equals(magic, APK_SIGNING_BLOCK_MAGIC); 866 } 867 868 private boolean isFirstByteOfEocdSig(final int b) { 869 return b == ZipArchiveOutputStream.EOCD_SIG[0]; 870 } 871 872 /** 873 * Records whether a Zip64 extra is present and sets the size information from it if sizes are 0xFFFFFFFF and the entry doesn't use a data descriptor. 874 */ 875 private void processZip64Extra(final ZipLong size, final ZipLong cSize) throws ZipException { 876 final ZipExtraField extra = current.entry.getExtraField(Zip64ExtendedInformationExtraField.HEADER_ID); 877 if (extra != null && !(extra instanceof Zip64ExtendedInformationExtraField)) { 878 throw new ZipException("archive contains unparseable zip64 extra field"); 879 } 880 final Zip64ExtendedInformationExtraField z64 = (Zip64ExtendedInformationExtraField) extra; 881 current.usesZip64 = z64 != null; 882 if (!current.hasDataDescriptor) { 883 if (z64 != null // same as current.usesZip64 but avoids NPE warning 884 && (ZipLong.ZIP64_MAGIC.equals(cSize) || ZipLong.ZIP64_MAGIC.equals(size))) { 885 if (z64.getCompressedSize() == null || z64.getSize() == null) { 886 // avoid NPE if it's a corrupted ZIP archive 887 throw new ZipException("archive contains corrupted zip64 extra field"); 888 } 889 long s = z64.getCompressedSize().getLongValue(); 890 if (s < 0) { 891 throw new ZipException("broken archive, entry with negative compressed size"); 892 } 893 current.entry.setCompressedSize(s); 894 s = z64.getSize().getLongValue(); 895 if (s < 0) { 896 throw new ZipException("broken archive, entry with negative size"); 897 } 898 current.entry.setSize(s); 899 } else if (cSize != null && size != null) { 900 if (cSize.getValue() < 0) { 901 throw new ZipException("broken archive, entry with negative compressed size"); 902 } 903 current.entry.setCompressedSize(cSize.getValue()); 904 if (size.getValue() < 0) { 905 throw new ZipException("broken archive, entry with negative size"); 906 } 907 current.entry.setSize(size.getValue()); 908 } 909 } 910 } 911 912 private void pushback(final byte[] buf, final int offset, final int length) throws IOException { 913 if (offset < 0) { 914 // Instead of ArrayIndexOutOfBoundsException 915 throw new IOException(String.format("Negative offset %,d into buffer", offset)); 916 } 917 ((PushbackInputStream) in).unread(buf, offset, length); 918 pushedBackBytes(length); 919 } 920 921 @Override 922 public int read(final byte[] buffer, final int offset, final int length) throws IOException { 923 if (length == 0) { 924 return 0; 925 } 926 if (closed) { 927 throw new IOException("The stream is closed"); 928 } 929 930 if (current == null) { 931 return -1; 932 } 933 934 // avoid int overflow, check null buffer 935 if (offset > buffer.length || length < 0 || offset < 0 || buffer.length - offset < length) { 936 throw new ArrayIndexOutOfBoundsException(); 937 } 938 939 ZipUtil.checkRequestedFeatures(current.entry); 940 if (!supportsDataDescriptorFor(current.entry)) { 941 throw new UnsupportedZipFeatureException(UnsupportedZipFeatureException.Feature.DATA_DESCRIPTOR, current.entry); 942 } 943 if (!supportsCompressedSizeFor(current.entry)) { 944 throw new UnsupportedZipFeatureException(UnsupportedZipFeatureException.Feature.UNKNOWN_COMPRESSED_SIZE, current.entry); 945 } 946 947 final int read; 948 final int method = current.entry.getMethod(); 949 if (method == ZipArchiveOutputStream.STORED) { 950 read = readStored(buffer, offset, length); 951 } else if (method == ZipArchiveOutputStream.DEFLATED) { 952 read = readDeflated(buffer, offset, length); 953 } else if (method == ZipMethod.UNSHRINKING.getCode() || method == ZipMethod.IMPLODING.getCode() 954 || method == ZipMethod.ENHANCED_DEFLATED.getCode() || method == ZipMethod.BZIP2.getCode() 955 || ZipMethod.isZstd(method) 956 || method == ZipMethod.XZ.getCode()) { 957 read = current.inputStream.read(buffer, offset, length); 958 } else { 959 throw new UnsupportedZipFeatureException(ZipMethod.getMethodByCode(method), current.entry); 960 } 961 962 if (read >= 0) { 963 current.crc.update(buffer, offset, read); 964 uncompressedCount += read; 965 } 966 967 return read; 968 } 969 970 private void readDataDescriptor() throws IOException { 971 readFully(wordBuf); 972 ZipLong val = new ZipLong(wordBuf); 973 if (ZipLong.DD_SIG.equals(val)) { 974 // data descriptor with signature, skip sig 975 readFully(wordBuf); 976 val = new ZipLong(wordBuf); 977 } 978 current.entry.setCrc(val.getValue()); 979 980 // if there is a ZIP64 extra field, sizes are eight bytes 981 // each, otherwise four bytes each. Unfortunately some 982 // implementations - namely Java7 - use eight bytes without 983 // using a ZIP64 extra field - 984 // https://bugs.sun.com/bugdatabase/view_bug.do?bug_id=7073588 985 986 // just read 16 bytes and check whether bytes nine to twelve 987 // look like one of the signatures of what could follow a data 988 // descriptor (ignoring archive decryption headers for now). 989 // If so, push back eight bytes and assume sizes are four 990 // bytes, otherwise sizes are eight bytes each. 991 readFully(twoDwordBuf); 992 final ZipLong potentialSig = new ZipLong(twoDwordBuf, DWORD); 993 if (potentialSig.equals(ZipLong.CFH_SIG) || potentialSig.equals(ZipLong.LFH_SIG)) { 994 pushback(twoDwordBuf, DWORD, DWORD); 995 long size = ZipLong.getValue(twoDwordBuf); 996 if (size < 0) { 997 throw new ZipException("broken archive, entry with negative compressed size"); 998 } 999 current.entry.setCompressedSize(size); 1000 size = ZipLong.getValue(twoDwordBuf, WORD); 1001 if (size < 0) { 1002 throw new ZipException("broken archive, entry with negative size"); 1003 } 1004 current.entry.setSize(size); 1005 } else { 1006 long size = ZipEightByteInteger.getLongValue(twoDwordBuf); 1007 if (size < 0) { 1008 throw new ZipException("broken archive, entry with negative compressed size"); 1009 } 1010 current.entry.setCompressedSize(size); 1011 size = ZipEightByteInteger.getLongValue(twoDwordBuf, DWORD); 1012 if (size < 0) { 1013 throw new ZipException("broken archive, entry with negative size"); 1014 } 1015 current.entry.setSize(size); 1016 } 1017 } 1018 1019 /** 1020 * Implements read for DEFLATED entries. 1021 */ 1022 private int readDeflated(final byte[] buffer, final int offset, final int length) throws IOException { 1023 final int read = readFromInflater(buffer, offset, length); 1024 if (read <= 0) { 1025 if (inf.finished()) { 1026 return -1; 1027 } 1028 if (inf.needsDictionary()) { 1029 throw new ZipException("This archive needs a preset dictionary which is not supported by Commons Compress."); 1030 } 1031 if (read == -1) { 1032 throw new IOException("Truncated ZIP file"); 1033 } 1034 } 1035 return read; 1036 } 1037 1038 /** 1039 * Fills the given array with the first local file header and deals with splitting/spanning markers that may prefix the first LFH. 1040 */ 1041 private boolean readFirstLocalFileHeader() throws IOException { 1042 // for empty archive, we may get only EOCD size: 1043 final byte[] header = new byte[Math.min(LFH_LEN, ZipFile.MIN_EOCD_SIZE)]; 1044 readFully(header); 1045 try { 1046 READ_LOOP: for (int i = 0; ; ) { 1047 for (int j = 0; i <= PREAMBLE_GARBAGE_MAX_SIZE - 4 && j <= header.length - 4; ++j, ++i) { 1048 final ZipLong sig = new ZipLong(header, j); 1049 if (sig.equals(ZipLong.LFH_SIG) || 1050 sig.equals(ZipLong.SINGLE_SEGMENT_SPLIT_MARKER) || 1051 sig.equals(ZipLong.DD_SIG)) { 1052 // regular archive containing at least one entry: 1053 System.arraycopy(header, j, header, 0, header.length - j); 1054 readFully(header, header.length - j); 1055 break READ_LOOP; 1056 } 1057 if (sig.equals(new ZipLong(ZipArchiveOutputStream.EOCD_SIG))) { 1058 // empty archive: 1059 pushback(header, j, header.length - j); 1060 return false; 1061 } 1062 } 1063 if (i >= PREAMBLE_GARBAGE_MAX_SIZE - 4) { 1064 throw new ZipException("Cannot find zip signature within the first " + PREAMBLE_GARBAGE_MAX_SIZE + " bytes"); 1065 } 1066 System.arraycopy(header, header.length - 3, header, 0, 3); 1067 readFully(header, 3); 1068 } 1069 System.arraycopy(header, 0, lfhBuf, 0, header.length); 1070 readFully(lfhBuf, header.length); 1071 } catch (final EOFException ex) { 1072 throw new ZipException("Cannot find zip signature within the file"); 1073 } 1074 final ZipLong sig = new ZipLong(lfhBuf); 1075 1076 if (!skipSplitSig && sig.equals(ZipLong.DD_SIG)) { 1077 throw new UnsupportedZipFeatureException(UnsupportedZipFeatureException.Feature.SPLITTING); 1078 } 1079 1080 // the split ZIP signature(08074B50) should only be skipped when the skipSplitSig is set 1081 if (sig.equals(ZipLong.SINGLE_SEGMENT_SPLIT_MARKER) || sig.equals(ZipLong.DD_SIG)) { 1082 // Just skip over the marker. 1083 System.arraycopy(lfhBuf, 4, lfhBuf, 0, lfhBuf.length - 4); 1084 readFully(lfhBuf, lfhBuf.length - 4); 1085 } 1086 return true; 1087 } 1088 1089 /** 1090 * Potentially reads more bytes to fill the inflater's buffer and reads from it. 1091 */ 1092 private int readFromInflater(final byte[] buffer, final int offset, final int length) throws IOException { 1093 int read = 0; 1094 do { 1095 if (inf.needsInput()) { 1096 final int l = fill(); 1097 if (l > 0) { 1098 current.bytesReadFromStream += buf.limit(); 1099 } else if (l == -1) { 1100 return -1; 1101 } else { 1102 break; 1103 } 1104 } 1105 try { 1106 read = inf.inflate(buffer, offset, length); 1107 } catch (final DataFormatException e) { 1108 throw ZipUtil.newZipException(e.getMessage(), e); 1109 } 1110 } while (read == 0 && inf.needsInput()); 1111 return read; 1112 } 1113 1114 private void readFully(final byte[] b) throws IOException { 1115 readFully(b, 0); 1116 } 1117 1118 private void readFully(final byte[] b, final int off) throws IOException { 1119 final int len = b.length - off; 1120 final int count = IOUtils.readFully(in, b, off, len); 1121 count(count); 1122 if (count < len) { 1123 throw new EOFException(); 1124 } 1125 } 1126 1127 // End of Central Directory Record 1128 // end of central dir signature WORD 1129 // number of this disk SHORT 1130 // number of the disk with the 1131 // start of the central directory SHORT 1132 // total number of entries in the 1133 // central directory on this disk SHORT 1134 // total number of entries in 1135 // the central directory SHORT 1136 // size of the central directory WORD 1137 // offset of start of central 1138 // directory with respect to 1139 // the starting disk number WORD 1140 // .ZIP file comment length SHORT 1141 // .ZIP file comment up to 64KB 1142 // 1143 1144 /** 1145 * Reads bytes by reading from the underlying stream rather than the (potentially inflating) archive stream - which {@link #read} would do. 1146 * 1147 * Also updates bytes-read counter. 1148 */ 1149 private int readOneByte() throws IOException { 1150 final int b = in.read(); 1151 if (b != -1) { 1152 count(1); 1153 } 1154 return b; 1155 } 1156 1157 private byte[] readRange(final int len) throws IOException { 1158 final byte[] ret = IOUtils.readRange(in, len); 1159 count(ret.length); 1160 if (ret.length < len) { 1161 throw new EOFException(); 1162 } 1163 return ret; 1164 } 1165 1166 /** 1167 * Implements read for STORED entries. 1168 */ 1169 private int readStored(final byte[] buffer, final int offset, final int length) throws IOException { 1170 1171 if (current.hasDataDescriptor) { 1172 if (lastStoredEntry == null) { 1173 readStoredEntry(); 1174 } 1175 return lastStoredEntry.read(buffer, offset, length); 1176 } 1177 1178 final long csize = current.entry.getSize(); 1179 if (current.bytesRead >= csize) { 1180 return -1; 1181 } 1182 1183 if (buf.position() >= buf.limit()) { 1184 buf.position(0); 1185 final int l = in.read(buf.array()); 1186 if (l == -1) { 1187 buf.limit(0); 1188 throw new IOException("Truncated ZIP file"); 1189 } 1190 buf.limit(l); 1191 1192 count(l); 1193 current.bytesReadFromStream += l; 1194 } 1195 1196 int toRead = Math.min(buf.remaining(), length); 1197 if (csize - current.bytesRead < toRead) { 1198 // if it is smaller than toRead then it fits into an int 1199 toRead = (int) (csize - current.bytesRead); 1200 } 1201 buf.get(buffer, offset, toRead); 1202 current.bytesRead += toRead; 1203 return toRead; 1204 } 1205 1206 /** 1207 * Caches a stored entry that uses the data descriptor. 1208 * <ul> 1209 * <li>Reads a stored entry until the signature of a local file header, central directory header or data descriptor has been found.</li> 1210 * <li>Stores all entry data in lastStoredEntry. 1211 * </p> 1212 * <li>Rewinds the stream to position at the data descriptor.</li> 1213 * <li>reads the data descriptor</li> 1214 * </ul> 1215 * <p> 1216 * After calling this method the entry should know its size, the entry's data is cached and the stream is positioned at the next local file or central 1217 * directory header. 1218 * </p> 1219 */ 1220 private void readStoredEntry() throws IOException { 1221 final ByteArrayOutputStream bos = new ByteArrayOutputStream(); 1222 int off = 0; 1223 boolean done = false; 1224 1225 // length of DD without signature 1226 final int ddLen = current.usesZip64 ? WORD + 2 * DWORD : 3 * WORD; 1227 1228 while (!done) { 1229 final int r = in.read(buf.array(), off, ZipArchiveOutputStream.BUFFER_SIZE - off); 1230 if (r <= 0) { 1231 // read the whole archive without ever finding a 1232 // central directory 1233 throw new IOException("Truncated ZIP file"); 1234 } 1235 if (r + off < 4) { 1236 // buffer too small to check for a signature, loop 1237 off += r; 1238 continue; 1239 } 1240 1241 done = bufferContainsSignature(bos, off, r, ddLen); 1242 if (!done) { 1243 off = cacheBytesRead(bos, off, r, ddLen); 1244 } 1245 } 1246 if (current.entry.getCompressedSize() != current.entry.getSize()) { 1247 throw new ZipException("compressed and uncompressed size don't match" + USE_ZIPFILE_INSTEAD_OF_STREAM_DISCLAIMER); 1248 } 1249 final byte[] b = bos.toByteArray(); 1250 if (b.length != current.entry.getSize()) { 1251 throw new ZipException("actual and claimed size don't match" + USE_ZIPFILE_INSTEAD_OF_STREAM_DISCLAIMER); 1252 } 1253 lastStoredEntry = new ByteArrayInputStream(b); 1254 } 1255 1256 /** 1257 * Skips bytes by reading from the underlying stream rather than the (potentially inflating) archive stream - which {@link #skip} would do. 1258 * 1259 * Also updates bytes-read counter. 1260 */ 1261 private void realSkip(final long value) throws IOException { 1262 if (value >= 0) { 1263 long skipped = 0; 1264 while (skipped < value) { 1265 final long rem = value - skipped; 1266 final int x = in.read(skipBuf, 0, (int) (skipBuf.length > rem ? rem : skipBuf.length)); 1267 if (x == -1) { 1268 return; 1269 } 1270 count(x); 1271 skipped += x; 1272 } 1273 return; 1274 } 1275 throw new IllegalArgumentException(); 1276 } 1277 1278 /** 1279 * Currently unused. 1280 * 1281 * Sets the custom extra fields factory. 1282 * @param extraFieldSupport the lookup function based on extra field header id. 1283 * @return the archive. 1284 */ 1285 public ZipArchiveInputStream setExtraFieldSupport(final Function<ZipShort, ZipExtraField> extraFieldSupport) { 1286 // this.extraFieldSupport = extraFieldSupport; 1287 return this; 1288 } 1289 1290 /** 1291 * Skips over and discards value bytes of data from this input stream. 1292 * <p> 1293 * This implementation may end up skipping over some smaller number of bytes, possibly 0, if and only if it reaches the end of the underlying stream. 1294 * </p> 1295 * <p> 1296 * The actual number of bytes skipped is returned. 1297 * </p> 1298 * 1299 * @param value the number of bytes to be skipped. 1300 * @return the actual number of bytes skipped. 1301 * @throws IOException - if an I/O error occurs. 1302 * @throws IllegalArgumentException - if value is negative. 1303 */ 1304 @Override 1305 public long skip(final long value) throws IOException { 1306 if (value >= 0) { 1307 long skipped = 0; 1308 while (skipped < value) { 1309 final long rem = value - skipped; 1310 final int x = read(skipBuf, 0, (int) (skipBuf.length > rem ? rem : skipBuf.length)); 1311 if (x == -1) { 1312 return skipped; 1313 } 1314 skipped += x; 1315 } 1316 return skipped; 1317 } 1318 throw new IllegalArgumentException("Negative skip value"); 1319 } 1320 1321 /** 1322 * Reads the stream until it find the "End of central directory record" and consumes it as well. 1323 */ 1324 private void skipRemainderOfArchive() throws IOException { 1325 // skip over central directory. One LFH has been read too much 1326 // already. The calculation discounts file names and extra 1327 // data, so it will be too short. 1328 if (entriesRead > 0) { 1329 realSkip((long) entriesRead * CFH_LEN - LFH_LEN); 1330 } 1331 final boolean foundEocd = findEocdRecord(); 1332 if (foundEocd) { 1333 realSkip((long) ZipFile.MIN_EOCD_SIZE - WORD /* signature */ - SHORT /* comment len */); 1334 readFully(shortBuf); 1335 // file comment 1336 final int commentLen = ZipShort.getValue(shortBuf); 1337 if (commentLen >= 0) { 1338 realSkip(commentLen); 1339 return; 1340 } 1341 } 1342 throw new IOException("Truncated ZIP file"); 1343 } 1344 1345 /** 1346 * Tests whether the compressed size for the entry is either known or not required by the compression method being used. 1347 */ 1348 private boolean supportsCompressedSizeFor(final ZipArchiveEntry entry) { 1349 final int method = entry.getMethod(); 1350 return entry.getCompressedSize() != ArchiveEntry.SIZE_UNKNOWN || method == ZipEntry.DEFLATED 1351 || method == ZipMethod.ENHANCED_DEFLATED.getCode() 1352 || entry.getGeneralPurposeBit().usesDataDescriptor() && allowStoredEntriesWithDataDescriptor && method == ZipEntry.STORED 1353 || ZipMethod.isZstd(method) 1354 || method == ZipMethod.XZ.getCode(); 1355 } 1356 1357 /** 1358 * Tests whether this entry requires a data descriptor this library can work with. 1359 * 1360 * @return true if allowStoredEntriesWithDataDescriptor is true, the entry doesn't require any data descriptor or the method is DEFLATED or 1361 * ENHANCED_DEFLATED. 1362 */ 1363 private boolean supportsDataDescriptorFor(final ZipArchiveEntry entry) { 1364 final int method = entry.getMethod(); 1365 return !entry.getGeneralPurposeBit().usesDataDescriptor() || allowStoredEntriesWithDataDescriptor && method == ZipEntry.STORED 1366 || method == ZipEntry.DEFLATED || method == ZipMethod.ENHANCED_DEFLATED.getCode() 1367 || ZipMethod.isZstd(method) 1368 || method == ZipMethod.XZ.getCode(); 1369 } 1370}