001/* 002 * Licensed to the Apache Software Foundation (ASF) under one or more 003 * contributor license agreements. See the NOTICE file distributed with 004 * this work for additional information regarding copyright ownership. 005 * The ASF licenses this file to You under the Apache License, Version 2.0 006 * (the "License"); you may not use this file except in compliance with 007 * the License. You may obtain a copy of the License at 008 * 009 * http://www.apache.org/licenses/LICENSE-2.0 010 * 011 * Unless required by applicable law or agreed to in writing, software 012 * distributed under the License is distributed on an "AS IS" BASIS, 013 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 014 * See the License for the specific language governing permissions and 015 * limitations under the License. 016 */ 017package org.apache.commons.compress.archivers.zip; 018 019import java.io.ByteArrayOutputStream; 020import java.io.File; 021import java.io.IOException; 022import java.io.InputStream; 023import java.io.OutputStream; 024import java.nio.ByteBuffer; 025import java.nio.channels.SeekableByteChannel; 026import java.nio.charset.Charset; 027import java.nio.charset.StandardCharsets; 028import java.nio.file.LinkOption; 029import java.nio.file.OpenOption; 030import java.nio.file.Path; 031import java.util.HashMap; 032import java.util.LinkedList; 033import java.util.List; 034import java.util.Map; 035import java.util.zip.Deflater; 036import java.util.zip.ZipException; 037 038import org.apache.commons.compress.archivers.ArchiveEntry; 039import org.apache.commons.compress.archivers.ArchiveOutputStream; 040import org.apache.commons.compress.utils.ByteUtils; 041import org.apache.commons.io.Charsets; 042 043/** 044 * Reimplementation of {@link java.util.zip.ZipOutputStream java.util.zip.ZipOutputStream} to handle the extended functionality of this package, especially 045 * internal/external file attributes and extra fields with different layouts for local file data and central directory entries. 046 * <p> 047 * This class will try to use {@link java.nio.channels.SeekableByteChannel} when it knows that the output is going to go to a file and no split archive shall be 048 * created. 049 * </p> 050 * <p> 051 * If SeekableByteChannel cannot be used, this implementation will use a Data Descriptor to store size and CRC information for {@link #DEFLATED DEFLATED} 052 * entries, you don't need to calculate them yourself. Unfortunately, this is not possible for the {@link #STORED STORED} method, where setting the CRC and 053 * uncompressed size information is required before {@link #putArchiveEntry(ZipArchiveEntry)} can be called. 054 * </p> 055 * <p> 056 * As of Apache Commons Compress 1.3, the class transparently supports Zip64 extensions and thus individual entries and archives larger than 4 GB or with more 057 * than 65,536 entries in most cases but explicit control is provided via {@link #setUseZip64}. If the stream can not use SeekableByteChannel and you try to 058 * write a ZipArchiveEntry of unknown size, then Zip64 extensions will be disabled by default. 059 * </p> 060 * 061 * @NotThreadSafe 062 */ 063public class ZipArchiveOutputStream extends ArchiveOutputStream<ZipArchiveEntry> { 064 065 /** 066 * Structure collecting information for the entry that is currently being written. 067 */ 068 private static final class CurrentEntry { 069 070 /** 071 * Current ZIP entry. 072 */ 073 private final ZipArchiveEntry entry; 074 075 /** 076 * Offset for CRC entry in the local file header data for the current entry starts here. 077 */ 078 private long localDataStart; 079 080 /** 081 * Data for local header data 082 */ 083 private long dataStart; 084 085 /** 086 * Number of bytes read for the current entry (can't rely on Deflater#getBytesRead) when using DEFLATED. 087 */ 088 private long bytesRead; 089 090 /** 091 * Whether current entry was the first one using ZIP64 features. 092 */ 093 private boolean causedUseOfZip64; 094 095 /** 096 * Whether write() has been called at all. 097 * 098 * <p> 099 * In order to create a valid archive {@link #closeArchiveEntry closeArchiveEntry} will write an empty array to get the CRC right if nothing has been 100 * written to the stream at all. 101 * </p> 102 */ 103 private boolean hasWritten; 104 105 private CurrentEntry(final ZipArchiveEntry entry) { 106 this.entry = entry; 107 } 108 } 109 110 private static final class EntryMetaData { 111 private final long offset; 112 private final boolean usesDataDescriptor; 113 114 private EntryMetaData(final long offset, final boolean usesDataDescriptor) { 115 this.offset = offset; 116 this.usesDataDescriptor = usesDataDescriptor; 117 } 118 } 119 120 /** 121 * enum that represents the possible policies for creating Unicode extra fields. 122 */ 123 public static final class UnicodeExtraFieldPolicy { 124 125 /** 126 * Always create Unicode extra fields. 127 */ 128 public static final UnicodeExtraFieldPolicy ALWAYS = new UnicodeExtraFieldPolicy("always"); 129 130 /** 131 * Never create Unicode extra fields. 132 */ 133 public static final UnicodeExtraFieldPolicy NEVER = new UnicodeExtraFieldPolicy("never"); 134 135 /** 136 * Creates Unicode extra fields for file names that cannot be encoded using the specified encoding. 137 */ 138 public static final UnicodeExtraFieldPolicy NOT_ENCODEABLE = new UnicodeExtraFieldPolicy("not encodeable"); 139 140 private final String name; 141 142 private UnicodeExtraFieldPolicy(final String n) { 143 name = n; 144 } 145 146 @Override 147 public String toString() { 148 return name; 149 } 150 } 151 152 static final int BUFFER_SIZE = 512; 153 private static final int LFH_SIG_OFFSET = 0; 154 private static final int LFH_VERSION_NEEDED_OFFSET = 4; 155 private static final int LFH_GPB_OFFSET = 6; 156 private static final int LFH_METHOD_OFFSET = 8; 157 private static final int LFH_TIME_OFFSET = 10; 158 private static final int LFH_CRC_OFFSET = 14; 159 private static final int LFH_COMPRESSED_SIZE_OFFSET = 18; 160 private static final int LFH_ORIGINAL_SIZE_OFFSET = 22; 161 private static final int LFH_FILENAME_LENGTH_OFFSET = 26; 162 private static final int LFH_EXTRA_LENGTH_OFFSET = 28; 163 private static final int LFH_FILENAME_OFFSET = 30; 164 private static final int CFH_SIG_OFFSET = 0; 165 private static final int CFH_VERSION_MADE_BY_OFFSET = 4; 166 private static final int CFH_VERSION_NEEDED_OFFSET = 6; 167 private static final int CFH_GPB_OFFSET = 8; 168 private static final int CFH_METHOD_OFFSET = 10; 169 private static final int CFH_TIME_OFFSET = 12; 170 private static final int CFH_CRC_OFFSET = 16; 171 private static final int CFH_COMPRESSED_SIZE_OFFSET = 20; 172 private static final int CFH_ORIGINAL_SIZE_OFFSET = 24; 173 private static final int CFH_FILENAME_LENGTH_OFFSET = 28; 174 private static final int CFH_EXTRA_LENGTH_OFFSET = 30; 175 private static final int CFH_COMMENT_LENGTH_OFFSET = 32; 176 private static final int CFH_DISK_NUMBER_OFFSET = 34; 177 private static final int CFH_INTERNAL_ATTRIBUTES_OFFSET = 36; 178 179 private static final int CFH_EXTERNAL_ATTRIBUTES_OFFSET = 38; 180 181 private static final int CFH_LFH_OFFSET = 42; 182 183 private static final int CFH_FILENAME_OFFSET = 46; 184 185 /** 186 * Compression method for deflated entries. 187 */ 188 public static final int DEFLATED = java.util.zip.ZipEntry.DEFLATED; 189 190 /** 191 * Default compression level for deflated entries. 192 */ 193 public static final int DEFAULT_COMPRESSION = Deflater.DEFAULT_COMPRESSION; 194 195 /** 196 * Compression method for stored entries. 197 */ 198 public static final int STORED = java.util.zip.ZipEntry.STORED; 199 200 /** 201 * Default encoding for file names and comment. 202 */ 203 static final Charset DEFAULT_CHARSET = StandardCharsets.UTF_8; 204 205 /** 206 * General purpose flag, which indicates that file names are written in UTF-8. 207 * 208 * @deprecated use {@link GeneralPurposeBit#UFT8_NAMES_FLAG} instead 209 */ 210 @Deprecated 211 public static final int EFS_FLAG = GeneralPurposeBit.UFT8_NAMES_FLAG; 212 213 /** 214 * Helper, a 0 as ZipShort. 215 */ 216 private static final byte[] ZERO = { 0, 0 }; 217 218 /** 219 * Helper, a 0 as ZipLong. 220 */ 221 private static final byte[] LZERO = { 0, 0, 0, 0 }; 222 223 private static final byte[] ONE = ZipLong.getBytes(1L); 224 225 /* 226 * Various ZIP constants shared between this class, ZipArchiveInputStream and ZipFile 227 */ 228 /** 229 * local file header signature 230 */ 231 static final byte[] LFH_SIG = ZipLong.LFH_SIG.getBytes(); // NOSONAR 232 233 /** 234 * data descriptor signature 235 */ 236 static final byte[] DD_SIG = ZipLong.DD_SIG.getBytes(); // NOSONAR 237 238 /** 239 * central file header signature 240 */ 241 static final byte[] CFH_SIG = ZipLong.CFH_SIG.getBytes(); // NOSONAR 242 243 /** 244 * end of central dir signature 245 */ 246 static final byte[] EOCD_SIG = ZipLong.getBytes(0X06054B50L); // NOSONAR 247 248 /** 249 * ZIP64 end of central dir signature 250 */ 251 static final byte[] ZIP64_EOCD_SIG = ZipLong.getBytes(0X06064B50L); // NOSONAR 252 253 /** 254 * ZIP64 end of central dir locator signature 255 */ 256 static final byte[] ZIP64_EOCD_LOC_SIG = ZipLong.getBytes(0X07064B50L); // NOSONAR 257 258 /** Indicates if this archive is finished. protected for use in Jar implementation */ 259 protected boolean finished; 260 261 /** 262 * Current entry. 263 */ 264 private CurrentEntry entry; 265 266 /** 267 * The file comment. 268 */ 269 private String comment = ""; 270 271 /** 272 * Compression level for next entry. 273 */ 274 private int level = DEFAULT_COMPRESSION; 275 276 /** 277 * Has the compression level changed when compared to the last entry? 278 */ 279 private boolean hasCompressionLevelChanged; 280 281 /** 282 * Default compression method for next entry. 283 */ 284 private int method = java.util.zip.ZipEntry.DEFLATED; 285 286 /** 287 * List of ZipArchiveEntries written so far. 288 */ 289 private final List<ZipArchiveEntry> entries = new LinkedList<>(); 290 291 private final StreamCompressor streamCompressor; 292 293 /** 294 * Start of central directory. 295 */ 296 private long cdOffset; 297 298 /** 299 * Length of central directory. 300 */ 301 private long cdLength; 302 303 /** 304 * Disk number start of central directory. 305 */ 306 private long cdDiskNumberStart; 307 308 /** 309 * Length of end of central directory 310 */ 311 private long eocdLength; 312 313 /** 314 * Holds some book-keeping data for each entry. 315 */ 316 private final Map<ZipArchiveEntry, EntryMetaData> metaData = new HashMap<>(); 317 318 /** 319 * The encoding to use for file names and the file comment. 320 * 321 * <p> 322 * For a list of possible values see <a href="https://docs.oracle.com/javase/8/docs/technotes/guides/intl/encoding.doc.html">Supported Encodings</a>. 323 * Defaults to UTF-8. 324 * </p> 325 */ 326 private Charset charset = DEFAULT_CHARSET; 327 328 /** 329 * The ZIP encoding to use for file names and the file comment. 330 * 331 * This field is of internal use and will be set in {@link #setEncoding(String)}. 332 */ 333 private ZipEncoding zipEncoding = ZipEncodingHelper.getZipEncoding(DEFAULT_CHARSET); 334 335 /** 336 * This Deflater object is used for output. 337 */ 338 protected final Deflater def; 339 340 private final OutputStream outputStream; 341 342 /** 343 * whether to use the general purpose bit flag when writing UTF-8 file names or not. 344 */ 345 private boolean useUTF8Flag = true; 346 347 /** 348 * Whether to encode non-encodable file names as UTF-8. 349 */ 350 private boolean fallbackToUTF8; 351 352 /** 353 * whether to create UnicodePathExtraField-s for each entry. 354 */ 355 private UnicodeExtraFieldPolicy createUnicodeExtraFields = UnicodeExtraFieldPolicy.NEVER; 356 357 /** 358 * Whether anything inside this archive has used a ZIP64 feature. 359 * 360 * @since 1.3 361 */ 362 private boolean hasUsedZip64; 363 364 private Zip64Mode zip64Mode = Zip64Mode.AsNeeded; 365 366 private final byte[] copyBuffer = new byte[32768]; 367 368 /** 369 * Whether we are creating a split zip 370 */ 371 private final boolean isSplitZip; 372 373 /** 374 * Holds the number of Central Directories on each disk, this is used when writing Zip64 End Of Central Directory and End Of Central Directory 375 */ 376 private final Map<Integer, Integer> numberOfCDInDiskData = new HashMap<>(); 377 378 /** 379 * Creates a new ZIP OutputStream writing to a File. Will use random access if possible. 380 * 381 * @param file the file to ZIP to 382 * @throws IOException on error 383 */ 384 public ZipArchiveOutputStream(final File file) throws IOException { 385 this(file.toPath()); 386 } 387 388 /** 389 * Creates a split ZIP Archive. 390 * 391 * <p> 392 * The files making up the archive will use Z01, Z02, ... extensions and the last part of it will be the given {@code 393 * file}. 394 * </p> 395 * 396 * <p> 397 * Even though the stream writes to a file this stream will behave as if no random access was possible. This means the sizes of stored entries need to be 398 * known before the actual entry data is written. 399 * </p> 400 * 401 * @param file the file that will become the last part of the split archive 402 * @param zipSplitSize maximum size of a single part of the split archive created by this stream. Must be between 64kB and about 4GB. 403 * 404 * @throws IOException on error 405 * @throws IllegalArgumentException if zipSplitSize is not in the required range 406 * @since 1.20 407 */ 408 public ZipArchiveOutputStream(final File file, final long zipSplitSize) throws IOException { 409 this(file.toPath(), zipSplitSize); 410 } 411 412 /** 413 * Creates a new ZIP OutputStream filtering the underlying stream. 414 * 415 * @param out the outputstream to zip 416 */ 417 public ZipArchiveOutputStream(final OutputStream out) { 418 this.outputStream = out; 419 this.def = new Deflater(level, true); 420 this.streamCompressor = StreamCompressor.create(out, def); 421 this.isSplitZip = false; 422 } 423 424 /** 425 * Creates a split ZIP Archive. 426 * <p> 427 * The files making up the archive will use Z01, Z02, ... extensions and the last part of it will be the given {@code 428 * file}. 429 * </p> 430 * <p> 431 * Even though the stream writes to a file this stream will behave as if no random access was possible. This means the sizes of stored entries need to be 432 * known before the actual entry data is written. 433 * </p> 434 * 435 * @param path the path to the file that will become the last part of the split archive 436 * @param zipSplitSize maximum size of a single part of the split archive created by this stream. Must be between 64kB and about 4GB. 437 * @throws IOException on error 438 * @throws IllegalArgumentException if zipSplitSize is not in the required range 439 * @since 1.22 440 */ 441 public ZipArchiveOutputStream(final Path path, final long zipSplitSize) throws IOException { 442 this.def = new Deflater(level, true); 443 this.outputStream = new ZipSplitOutputStream(path, zipSplitSize); 444 this.streamCompressor = StreamCompressor.create(this.outputStream, def); 445 this.isSplitZip = true; 446 } 447 448 /** 449 * Creates a new ZIP OutputStream writing to a Path. Will use random access if possible. 450 * 451 * @param file the file to ZIP to 452 * @param options options specifying how the file is opened. 453 * @throws IOException on error 454 * @since 1.21 455 */ 456 public ZipArchiveOutputStream(final Path file, final OpenOption... options) throws IOException { 457 this.def = new Deflater(level, true); 458 this.outputStream = options.length == 0 ? new FileRandomAccessOutputStream(file) : new FileRandomAccessOutputStream(file, options); 459 this.streamCompressor = StreamCompressor.create(outputStream, def); 460 this.isSplitZip = false; 461 } 462 463 /** 464 * Creates a new ZIP OutputStream writing to a SeekableByteChannel. 465 * 466 * <p> 467 * {@link org.apache.commons.compress.utils.SeekableInMemoryByteChannel} allows you to write to an in-memory archive using random access. 468 * </p> 469 * 470 * @param channel the channel to ZIP to 471 * @since 1.13 472 */ 473 public ZipArchiveOutputStream(final SeekableByteChannel channel) { 474 this.outputStream = new SeekableChannelRandomAccessOutputStream(channel); 475 this.def = new Deflater(level, true); 476 this.streamCompressor = StreamCompressor.create(outputStream, def); 477 this.isSplitZip = false; 478 } 479 480 /** 481 * Adds an archive entry with a raw input stream. 482 * <p> 483 * If crc, size and compressed size are supplied on the entry, these values will be used as-is. Zip64 status is re-established based on the settings in this 484 * stream, and the supplied value is ignored. 485 * </p> 486 * <p> 487 * The entry is put and closed immediately. 488 * </p> 489 * 490 * @param entry The archive entry to add 491 * @param rawStream The raw input stream of a different entry. May be compressed/encrypted. 492 * @throws IOException If copying fails 493 */ 494 public void addRawArchiveEntry(final ZipArchiveEntry entry, final InputStream rawStream) throws IOException { 495 final ZipArchiveEntry ae = new ZipArchiveEntry(entry); 496 if (hasZip64Extra(ae)) { 497 // Will be re-added as required. this may make the file generated with this method 498 // somewhat smaller than standard mode, 499 // since standard mode is unable to remove the ZIP 64 header. 500 ae.removeExtraField(Zip64ExtendedInformationExtraField.HEADER_ID); 501 } 502 final boolean is2PhaseSource = ae.getCrc() != ZipArchiveEntry.CRC_UNKNOWN && ae.getSize() != ArchiveEntry.SIZE_UNKNOWN 503 && ae.getCompressedSize() != ArchiveEntry.SIZE_UNKNOWN; 504 putArchiveEntry(ae, is2PhaseSource); 505 copyFromZipInputStream(rawStream); 506 closeCopiedEntry(is2PhaseSource); 507 } 508 509 /** 510 * Adds UnicodeExtra fields for name and file comment if mode is ALWAYS or the data cannot be encoded using the configured encoding. 511 */ 512 private void addUnicodeExtraFields(final ZipArchiveEntry ze, final boolean encodable, final ByteBuffer name) throws IOException { 513 if (createUnicodeExtraFields == UnicodeExtraFieldPolicy.ALWAYS || !encodable) { 514 ze.addExtraField(new UnicodePathExtraField(ze.getName(), name.array(), name.arrayOffset(), name.limit() - name.position())); 515 } 516 517 final String comm = ze.getComment(); 518 if (comm != null && !comm.isEmpty()) { 519 520 final boolean commentEncodable = zipEncoding.canEncode(comm); 521 522 if (createUnicodeExtraFields == UnicodeExtraFieldPolicy.ALWAYS || !commentEncodable) { 523 final ByteBuffer commentB = getEntryEncoding(ze).encode(comm); 524 ze.addExtraField(new UnicodeCommentExtraField(comm, commentB.array(), commentB.arrayOffset(), commentB.limit() - commentB.position())); 525 } 526 } 527 } 528 529 /** 530 * Whether this stream is able to write the given entry. 531 * <p> 532 * May return false if it is set up to use encryption or a compression method that hasn't been implemented yet. 533 * </p> 534 * 535 * @since 1.1 536 */ 537 @Override 538 public boolean canWriteEntryData(final ArchiveEntry ae) { 539 if (ae instanceof ZipArchiveEntry) { 540 final ZipArchiveEntry zae = (ZipArchiveEntry) ae; 541 return zae.getMethod() != ZipMethod.IMPLODING.getCode() && zae.getMethod() != ZipMethod.UNSHRINKING.getCode() && ZipUtil.canHandleEntryData(zae); 542 } 543 return false; 544 } 545 546 /** 547 * Verifies the sizes aren't too big in the Zip64Mode.Never case and returns whether the entry would require a Zip64 extra field. 548 */ 549 private boolean checkIfNeedsZip64(final Zip64Mode effectiveMode) throws ZipException { 550 final boolean actuallyNeedsZip64 = isZip64Required(entry.entry, effectiveMode); 551 if (actuallyNeedsZip64 && effectiveMode == Zip64Mode.Never) { 552 throw new Zip64RequiredException(Zip64RequiredException.getEntryTooBigMessage(entry.entry)); 553 } 554 return actuallyNeedsZip64; 555 } 556 557 /** 558 * Closes this output stream and releases any system resources associated with the stream. 559 * 560 * @throws IOException if an I/O error occurs. 561 * @throws Zip64RequiredException if the archive's size exceeds 4 GByte or there are more than 65535 entries inside the archive and {@link #setUseZip64} is 562 * {@link Zip64Mode#Never}. 563 */ 564 @Override 565 public void close() throws IOException { 566 try { 567 if (!finished) { 568 finish(); 569 } 570 } finally { 571 destroy(); 572 } 573 } 574 575 /** 576 * Writes all necessary data for this entry. 577 * 578 * @throws IOException on error 579 * @throws Zip64RequiredException if the entry's uncompressed or compressed size exceeds 4 GByte and {@link #setUseZip64} is {@link Zip64Mode#Never}. 580 */ 581 @Override 582 public void closeArchiveEntry() throws IOException { 583 preClose(); 584 585 flushDeflater(); 586 587 final long bytesWritten = streamCompressor.getTotalBytesWritten() - entry.dataStart; 588 final long realCrc = streamCompressor.getCrc32(); 589 entry.bytesRead = streamCompressor.getBytesRead(); 590 final Zip64Mode effectiveMode = getEffectiveZip64Mode(entry.entry); 591 final boolean actuallyNeedsZip64 = handleSizesAndCrc(bytesWritten, realCrc, effectiveMode); 592 closeEntry(actuallyNeedsZip64, false); 593 streamCompressor.reset(); 594 } 595 596 /** 597 * Writes all necessary data for this entry. 598 * 599 * @param phased This entry is second phase of a 2-phase ZIP creation, size, compressed size and crc are known in ZipArchiveEntry 600 * @throws IOException on error 601 * @throws Zip64RequiredException if the entry's uncompressed or compressed size exceeds 4 GByte and {@link #setUseZip64} is {@link Zip64Mode#Never}. 602 */ 603 private void closeCopiedEntry(final boolean phased) throws IOException { 604 preClose(); 605 entry.bytesRead = entry.entry.getSize(); 606 final Zip64Mode effectiveMode = getEffectiveZip64Mode(entry.entry); 607 final boolean actuallyNeedsZip64 = checkIfNeedsZip64(effectiveMode); 608 closeEntry(actuallyNeedsZip64, phased); 609 } 610 611 private void closeEntry(final boolean actuallyNeedsZip64, final boolean phased) throws IOException { 612 if (!phased && outputStream instanceof RandomAccessOutputStream) { 613 rewriteSizesAndCrc(actuallyNeedsZip64); 614 } 615 616 if (!phased) { 617 writeDataDescriptor(entry.entry); 618 } 619 entry = null; 620 } 621 622 private void copyFromZipInputStream(final InputStream src) throws IOException { 623 if (entry == null) { 624 throw new IllegalStateException("No current entry"); 625 } 626 ZipUtil.checkRequestedFeatures(entry.entry); 627 entry.hasWritten = true; 628 int length; 629 while ((length = src.read(copyBuffer)) >= 0) { 630 streamCompressor.writeCounted(copyBuffer, 0, length); 631 count(length); 632 } 633 } 634 635 /** 636 * Creates a new ZIP entry taking some information from the given file and using the provided name. 637 * <p> 638 * The name will be adjusted to end with a forward slash "/" if the file is a directory. If the file is not a directory a potential trailing forward slash 639 * will be stripped from the entry name. 640 * </p> 641 * <p> 642 * Must not be used if the stream has already been closed. 643 * </p> 644 */ 645 @Override 646 public ZipArchiveEntry createArchiveEntry(final File inputFile, final String entryName) throws IOException { 647 if (finished) { 648 throw new IOException("Stream has already been finished"); 649 } 650 return new ZipArchiveEntry(inputFile, entryName); 651 } 652 653 /** 654 * Creates a new ZIP entry taking some information from the given file and using the provided name. 655 * <p> 656 * The name will be adjusted to end with a forward slash "/" if the file is a directory. If the file is not a directory a potential trailing forward slash 657 * will be stripped from the entry name. 658 * </p> 659 * <p> 660 * Must not be used if the stream has already been closed. 661 * </p> 662 * 663 * @param inputPath path to create the entry from. 664 * @param entryName name of the entry. 665 * @param options options indicating how symbolic links are handled. 666 * @return a new instance. 667 * @throws IOException if an I/O error occurs. 668 * @since 1.21 669 */ 670 @Override 671 public ZipArchiveEntry createArchiveEntry(final Path inputPath, final String entryName, final LinkOption... options) throws IOException { 672 if (finished) { 673 throw new IOException("Stream has already been finished"); 674 } 675 return new ZipArchiveEntry(inputPath, entryName); 676 } 677 678 private byte[] createCentralFileHeader(final ZipArchiveEntry ze) throws IOException { 679 680 final EntryMetaData entryMetaData = metaData.get(ze); 681 final boolean needsZip64Extra = hasZip64Extra(ze) || ze.getCompressedSize() >= ZipConstants.ZIP64_MAGIC || ze.getSize() >= ZipConstants.ZIP64_MAGIC 682 || entryMetaData.offset >= ZipConstants.ZIP64_MAGIC || ze.getDiskNumberStart() >= ZipConstants.ZIP64_MAGIC_SHORT 683 || zip64Mode == Zip64Mode.Always || zip64Mode == Zip64Mode.AlwaysWithCompatibility; 684 685 if (needsZip64Extra && zip64Mode == Zip64Mode.Never) { 686 // must be the offset that is too big, otherwise an 687 // exception would have been throw in putArchiveEntry or 688 // closeArchiveEntry 689 throw new Zip64RequiredException(Zip64RequiredException.ARCHIVE_TOO_BIG_MESSAGE); 690 } 691 692 handleZip64Extra(ze, entryMetaData.offset, needsZip64Extra); 693 694 return createCentralFileHeader(ze, getName(ze), entryMetaData, needsZip64Extra); 695 } 696 697 /** 698 * Writes the central file header entry. 699 * 700 * @param ze the entry to write 701 * @param name The encoded name 702 * @param entryMetaData meta data for this file 703 * @throws IOException on error 704 */ 705 private byte[] createCentralFileHeader(final ZipArchiveEntry ze, final ByteBuffer name, final EntryMetaData entryMetaData, final boolean needsZip64Extra) 706 throws IOException { 707 if (isSplitZip) { 708 // calculate the disk number for every central file header, 709 // this will be used in writing End Of Central Directory and Zip64 End Of Central Directory 710 final int currentSplitSegment = ((ZipSplitOutputStream) this.outputStream).getCurrentSplitSegmentIndex(); 711 if (numberOfCDInDiskData.get(currentSplitSegment) == null) { 712 numberOfCDInDiskData.put(currentSplitSegment, 1); 713 } else { 714 final int originalNumberOfCD = numberOfCDInDiskData.get(currentSplitSegment); 715 numberOfCDInDiskData.put(currentSplitSegment, originalNumberOfCD + 1); 716 } 717 } 718 719 final byte[] extra = ze.getCentralDirectoryExtra(); 720 final int extraLength = extra.length; 721 722 // file comment length 723 String comm = ze.getComment(); 724 if (comm == null) { 725 comm = ""; 726 } 727 728 final ByteBuffer commentB = getEntryEncoding(ze).encode(comm); 729 final int nameLen = name.limit() - name.position(); 730 final int commentLen = commentB.limit() - commentB.position(); 731 final int len = CFH_FILENAME_OFFSET + nameLen + extraLength + commentLen; 732 final byte[] buf = new byte[len]; 733 734 System.arraycopy(CFH_SIG, 0, buf, CFH_SIG_OFFSET, ZipConstants.WORD); 735 736 // version made by 737 // CheckStyle:MagicNumber OFF 738 ZipShort.putShort(ze.getPlatform() << 8 | (!hasUsedZip64 ? ZipConstants.DATA_DESCRIPTOR_MIN_VERSION : ZipConstants.ZIP64_MIN_VERSION), buf, 739 CFH_VERSION_MADE_BY_OFFSET); 740 741 final int zipMethod = ze.getMethod(); 742 final boolean encodable = zipEncoding.canEncode(ze.getName()); 743 ZipShort.putShort(versionNeededToExtract(zipMethod, needsZip64Extra, entryMetaData.usesDataDescriptor), buf, CFH_VERSION_NEEDED_OFFSET); 744 getGeneralPurposeBits(!encodable && fallbackToUTF8, entryMetaData.usesDataDescriptor).encode(buf, CFH_GPB_OFFSET); 745 746 // compression method 747 ZipShort.putShort(zipMethod, buf, CFH_METHOD_OFFSET); 748 749 // last mod. time and date 750 ZipUtil.toDosTime(ze.getTime(), buf, CFH_TIME_OFFSET); 751 752 // CRC 753 // compressed length 754 // uncompressed length 755 ZipLong.putLong(ze.getCrc(), buf, CFH_CRC_OFFSET); 756 if (ze.getCompressedSize() >= ZipConstants.ZIP64_MAGIC || ze.getSize() >= ZipConstants.ZIP64_MAGIC || zip64Mode == Zip64Mode.Always 757 || zip64Mode == Zip64Mode.AlwaysWithCompatibility) { 758 ZipLong.ZIP64_MAGIC.putLong(buf, CFH_COMPRESSED_SIZE_OFFSET); 759 ZipLong.ZIP64_MAGIC.putLong(buf, CFH_ORIGINAL_SIZE_OFFSET); 760 } else { 761 ZipLong.putLong(ze.getCompressedSize(), buf, CFH_COMPRESSED_SIZE_OFFSET); 762 ZipLong.putLong(ze.getSize(), buf, CFH_ORIGINAL_SIZE_OFFSET); 763 } 764 765 ZipShort.putShort(nameLen, buf, CFH_FILENAME_LENGTH_OFFSET); 766 767 // extra field length 768 ZipShort.putShort(extraLength, buf, CFH_EXTRA_LENGTH_OFFSET); 769 770 ZipShort.putShort(commentLen, buf, CFH_COMMENT_LENGTH_OFFSET); 771 772 // disk number start 773 if (isSplitZip) { 774 if (ze.getDiskNumberStart() >= ZipConstants.ZIP64_MAGIC_SHORT || zip64Mode == Zip64Mode.Always) { 775 ZipShort.putShort(ZipConstants.ZIP64_MAGIC_SHORT, buf, CFH_DISK_NUMBER_OFFSET); 776 } else { 777 ZipShort.putShort((int) ze.getDiskNumberStart(), buf, CFH_DISK_NUMBER_OFFSET); 778 } 779 } else { 780 System.arraycopy(ZERO, 0, buf, CFH_DISK_NUMBER_OFFSET, ZipConstants.SHORT); 781 } 782 783 // internal file attributes 784 ZipShort.putShort(ze.getInternalAttributes(), buf, CFH_INTERNAL_ATTRIBUTES_OFFSET); 785 786 // external file attributes 787 ZipLong.putLong(ze.getExternalAttributes(), buf, CFH_EXTERNAL_ATTRIBUTES_OFFSET); 788 789 // relative offset of LFH 790 if (entryMetaData.offset >= ZipConstants.ZIP64_MAGIC || zip64Mode == Zip64Mode.Always) { 791 ZipLong.putLong(ZipConstants.ZIP64_MAGIC, buf, CFH_LFH_OFFSET); 792 } else { 793 ZipLong.putLong(Math.min(entryMetaData.offset, ZipConstants.ZIP64_MAGIC), buf, CFH_LFH_OFFSET); 794 } 795 796 // file name 797 System.arraycopy(name.array(), name.arrayOffset(), buf, CFH_FILENAME_OFFSET, nameLen); 798 799 final int extraStart = CFH_FILENAME_OFFSET + nameLen; 800 System.arraycopy(extra, 0, buf, extraStart, extraLength); 801 802 final int commentStart = extraStart + extraLength; 803 804 // file comment 805 System.arraycopy(commentB.array(), commentB.arrayOffset(), buf, commentStart, commentLen); 806 return buf; 807 } 808 809 private byte[] createLocalFileHeader(final ZipArchiveEntry ze, final ByteBuffer name, final boolean encodable, final boolean phased, 810 final long archiveOffset) { 811 final ZipExtraField oldEx = ze.getExtraField(ResourceAlignmentExtraField.ID); 812 if (oldEx != null) { 813 ze.removeExtraField(ResourceAlignmentExtraField.ID); 814 } 815 final ResourceAlignmentExtraField oldAlignmentEx = oldEx instanceof ResourceAlignmentExtraField ? (ResourceAlignmentExtraField) oldEx : null; 816 817 int alignment = ze.getAlignment(); 818 if (alignment <= 0 && oldAlignmentEx != null) { 819 alignment = oldAlignmentEx.getAlignment(); 820 } 821 822 if (alignment > 1 || oldAlignmentEx != null && !oldAlignmentEx.allowMethodChange()) { 823 final int oldLength = LFH_FILENAME_OFFSET + name.limit() - name.position() + ze.getLocalFileDataExtra().length; 824 825 final int padding = (int) (-archiveOffset - oldLength - ZipExtraField.EXTRAFIELD_HEADER_SIZE - ResourceAlignmentExtraField.BASE_SIZE 826 & alignment - 1); 827 ze.addExtraField(new ResourceAlignmentExtraField(alignment, oldAlignmentEx != null && oldAlignmentEx.allowMethodChange(), padding)); 828 } 829 830 final byte[] extra = ze.getLocalFileDataExtra(); 831 final int nameLen = name.limit() - name.position(); 832 final int len = LFH_FILENAME_OFFSET + nameLen + extra.length; 833 final byte[] buf = new byte[len]; 834 835 System.arraycopy(LFH_SIG, 0, buf, LFH_SIG_OFFSET, ZipConstants.WORD); 836 837 // store method in local variable to prevent multiple method calls 838 final int zipMethod = ze.getMethod(); 839 final boolean dataDescriptor = usesDataDescriptor(zipMethod, phased); 840 841 ZipShort.putShort(versionNeededToExtract(zipMethod, hasZip64Extra(ze), dataDescriptor), buf, LFH_VERSION_NEEDED_OFFSET); 842 843 final GeneralPurposeBit generalPurposeBit = getGeneralPurposeBits(!encodable && fallbackToUTF8, dataDescriptor); 844 generalPurposeBit.encode(buf, LFH_GPB_OFFSET); 845 846 // compression method 847 ZipShort.putShort(zipMethod, buf, LFH_METHOD_OFFSET); 848 849 ZipUtil.toDosTime(ze.getTime(), buf, LFH_TIME_OFFSET); 850 851 // CRC 852 if (phased || !(zipMethod == DEFLATED || outputStream instanceof RandomAccessOutputStream)) { 853 ZipLong.putLong(ze.getCrc(), buf, LFH_CRC_OFFSET); 854 } else { 855 System.arraycopy(LZERO, 0, buf, LFH_CRC_OFFSET, ZipConstants.WORD); 856 } 857 858 // compressed length 859 // uncompressed length 860 if (hasZip64Extra(entry.entry)) { 861 // point to ZIP64 extended information extra field for 862 // sizes, may get rewritten once sizes are known if 863 // stream is seekable 864 ZipLong.ZIP64_MAGIC.putLong(buf, LFH_COMPRESSED_SIZE_OFFSET); 865 ZipLong.ZIP64_MAGIC.putLong(buf, LFH_ORIGINAL_SIZE_OFFSET); 866 } else if (phased) { 867 ZipLong.putLong(ze.getCompressedSize(), buf, LFH_COMPRESSED_SIZE_OFFSET); 868 ZipLong.putLong(ze.getSize(), buf, LFH_ORIGINAL_SIZE_OFFSET); 869 } else if (zipMethod == DEFLATED || outputStream instanceof RandomAccessOutputStream) { 870 System.arraycopy(LZERO, 0, buf, LFH_COMPRESSED_SIZE_OFFSET, ZipConstants.WORD); 871 System.arraycopy(LZERO, 0, buf, LFH_ORIGINAL_SIZE_OFFSET, ZipConstants.WORD); 872 } else { // Stored 873 ZipLong.putLong(ze.getSize(), buf, LFH_COMPRESSED_SIZE_OFFSET); 874 ZipLong.putLong(ze.getSize(), buf, LFH_ORIGINAL_SIZE_OFFSET); 875 } 876 // file name length 877 ZipShort.putShort(nameLen, buf, LFH_FILENAME_LENGTH_OFFSET); 878 879 // extra field length 880 ZipShort.putShort(extra.length, buf, LFH_EXTRA_LENGTH_OFFSET); 881 882 // file name 883 System.arraycopy(name.array(), name.arrayOffset(), buf, LFH_FILENAME_OFFSET, nameLen); 884 885 // extra fields 886 System.arraycopy(extra, 0, buf, LFH_FILENAME_OFFSET + nameLen, extra.length); 887 888 return buf; 889 } 890 891 /** 892 * Writes next block of compressed data to the output stream. 893 * 894 * @throws IOException on error 895 */ 896 protected final void deflate() throws IOException { 897 streamCompressor.deflate(); 898 } 899 900 /** 901 * Closes the underlying stream/file without finishing the archive, the result will likely be a corrupt archive. 902 * <p> 903 * This method only exists to support tests that generate corrupt archives so they can clean up any temporary files. 904 * </p> 905 */ 906 void destroy() throws IOException { 907 if (outputStream != null) { 908 outputStream.close(); 909 } 910 } 911 912 /** 913 * {@inheritDoc} 914 * 915 * @throws Zip64RequiredException if the archive's size exceeds 4 GByte or there are more than 65535 entries inside the archive and {@link #setUseZip64} is 916 * {@link Zip64Mode#Never}. 917 */ 918 @Override 919 public void finish() throws IOException { 920 if (finished) { 921 throw new IOException("This archive has already been finished"); 922 } 923 924 if (entry != null) { 925 throw new IOException("This archive contains unclosed entries."); 926 } 927 928 final long cdOverallOffset = streamCompressor.getTotalBytesWritten(); 929 cdOffset = cdOverallOffset; 930 if (isSplitZip) { 931 // when creating a split zip, the offset should be 932 // the offset to the corresponding segment disk 933 final ZipSplitOutputStream zipSplitOutputStream = (ZipSplitOutputStream) this.outputStream; 934 cdOffset = zipSplitOutputStream.getCurrentSplitSegmentBytesWritten(); 935 cdDiskNumberStart = zipSplitOutputStream.getCurrentSplitSegmentIndex(); 936 } 937 writeCentralDirectoryInChunks(); 938 939 cdLength = streamCompressor.getTotalBytesWritten() - cdOverallOffset; 940 941 // calculate the length of end of central directory, as it may be used in writeZip64CentralDirectory 942 final ByteBuffer commentData = this.zipEncoding.encode(comment); 943 final long commentLength = (long) commentData.limit() - commentData.position(); 944 eocdLength = ZipConstants.WORD /* length of EOCD_SIG */ 945 + ZipConstants.SHORT /* number of this disk */ 946 + ZipConstants.SHORT /* disk number of start of central directory */ 947 + ZipConstants.SHORT /* total number of entries on this disk */ 948 + ZipConstants.SHORT /* total number of entries */ 949 + ZipConstants.WORD /* size of central directory */ 950 + ZipConstants.WORD /* offset of start of central directory */ 951 + ZipConstants.SHORT /* ZIP comment length */ 952 + commentLength /* ZIP comment */; 953 954 writeZip64CentralDirectory(); 955 writeCentralDirectoryEnd(); 956 metaData.clear(); 957 entries.clear(); 958 streamCompressor.close(); 959 if (isSplitZip) { 960 // trigger the ZipSplitOutputStream to write the final split segment 961 outputStream.close(); 962 } 963 finished = true; 964 } 965 966 /** 967 * Flushes this output stream and forces any buffered output bytes to be written out to the stream. 968 * 969 * @throws IOException if an I/O error occurs. 970 */ 971 @Override 972 public void flush() throws IOException { 973 if (outputStream != null) { 974 outputStream.flush(); 975 } 976 } 977 978 /** 979 * Ensures all bytes sent to the deflater are written to the stream. 980 */ 981 private void flushDeflater() throws IOException { 982 if (entry.entry.getMethod() == DEFLATED) { 983 streamCompressor.flushDeflater(); 984 } 985 } 986 987 /** 988 * Returns the total number of bytes written to this stream. 989 * 990 * @return the number of written bytes 991 * @since 1.22 992 */ 993 @Override 994 public long getBytesWritten() { 995 return streamCompressor.getTotalBytesWritten(); 996 } 997 998 /** 999 * If the mode is AsNeeded and the entry is a compressed entry of unknown size that gets written to a non-seekable stream then change the default to Never. 1000 * 1001 * @since 1.3 1002 */ 1003 private Zip64Mode getEffectiveZip64Mode(final ZipArchiveEntry ze) { 1004 if (zip64Mode != Zip64Mode.AsNeeded || outputStream instanceof RandomAccessOutputStream || 1005 ze.getMethod() != DEFLATED || ze.getSize() != ArchiveEntry.SIZE_UNKNOWN) { 1006 return zip64Mode; 1007 } 1008 return Zip64Mode.Never; 1009 } 1010 1011 /** 1012 * The encoding to use for file names and the file comment. 1013 * 1014 * @return null if using the platform's default character encoding. 1015 */ 1016 public String getEncoding() { 1017 return charset != null ? charset.name() : null; 1018 } 1019 1020 private ZipEncoding getEntryEncoding(final ZipArchiveEntry ze) { 1021 final boolean encodable = zipEncoding.canEncode(ze.getName()); 1022 return !encodable && fallbackToUTF8 ? ZipEncodingHelper.ZIP_ENCODING_UTF_8 : zipEncoding; 1023 } 1024 1025 private GeneralPurposeBit getGeneralPurposeBits(final boolean utfFallback, final boolean usesDataDescriptor) { 1026 final GeneralPurposeBit b = new GeneralPurposeBit(); 1027 b.useUTF8ForNames(useUTF8Flag || utfFallback); 1028 if (usesDataDescriptor) { 1029 b.useDataDescriptor(true); 1030 } 1031 return b; 1032 } 1033 1034 private ByteBuffer getName(final ZipArchiveEntry ze) throws IOException { 1035 return getEntryEncoding(ze).encode(ze.getName()); 1036 } 1037 1038 /** 1039 * Gets the existing ZIP64 extended information extra field or create a new one and add it to the entry. 1040 * 1041 * @since 1.3 1042 */ 1043 private Zip64ExtendedInformationExtraField getZip64Extra(final ZipArchiveEntry ze) { 1044 if (entry != null) { 1045 entry.causedUseOfZip64 = !hasUsedZip64; 1046 } 1047 hasUsedZip64 = true; 1048 final ZipExtraField extra = ze.getExtraField(Zip64ExtendedInformationExtraField.HEADER_ID); 1049 Zip64ExtendedInformationExtraField z64 = extra instanceof Zip64ExtendedInformationExtraField ? (Zip64ExtendedInformationExtraField) extra : null; 1050 if (z64 == null) { 1051 /* 1052 * System.err.println("Adding z64 for " + ze.getName() + ", method: " + ze.getMethod() + " (" + (ze.getMethod() == STORED) + ")" + ", channel: " + 1053 * (channel != null)); 1054 */ 1055 z64 = new Zip64ExtendedInformationExtraField(); 1056 } 1057 1058 // even if the field is there already, make sure it is the first one 1059 ze.addAsFirstExtraField(z64); 1060 1061 return z64; 1062 } 1063 1064 /** 1065 * Ensures the current entry's size and CRC information is set to the values just written, verifies it isn't too big in the Zip64Mode.Never case and returns 1066 * whether the entry would require a Zip64 extra field. 1067 */ 1068 private boolean handleSizesAndCrc(final long bytesWritten, final long crc, final Zip64Mode effectiveMode) throws ZipException { 1069 if (entry.entry.getMethod() == DEFLATED) { 1070 /* 1071 * It turns out def.getBytesRead() returns wrong values if the size exceeds 4 GB on Java < Java7 entry.entry.setSize(def.getBytesRead()); 1072 */ 1073 entry.entry.setSize(entry.bytesRead); 1074 entry.entry.setCompressedSize(bytesWritten); 1075 entry.entry.setCrc(crc); 1076 1077 } else if (!(outputStream instanceof RandomAccessOutputStream)) { 1078 if (entry.entry.getCrc() != crc) { 1079 throw new ZipException("Bad CRC checksum for entry " + entry.entry.getName() + ": " + Long.toHexString(entry.entry.getCrc()) + " instead of " 1080 + Long.toHexString(crc)); 1081 } 1082 1083 if (entry.entry.getSize() != bytesWritten) { 1084 throw new ZipException("Bad size for entry " + entry.entry.getName() + ": " + entry.entry.getSize() + " instead of " + bytesWritten); 1085 } 1086 } else { /* method is STORED and we used SeekableByteChannel */ 1087 entry.entry.setSize(bytesWritten); 1088 entry.entry.setCompressedSize(bytesWritten); 1089 entry.entry.setCrc(crc); 1090 } 1091 1092 return checkIfNeedsZip64(effectiveMode); 1093 } 1094 1095 /** 1096 * If the entry needs Zip64 extra information inside the central directory then configure its data. 1097 */ 1098 private void handleZip64Extra(final ZipArchiveEntry ze, final long lfhOffset, final boolean needsZip64Extra) { 1099 if (needsZip64Extra) { 1100 final Zip64ExtendedInformationExtraField z64 = getZip64Extra(ze); 1101 if (ze.getCompressedSize() >= ZipConstants.ZIP64_MAGIC || ze.getSize() >= ZipConstants.ZIP64_MAGIC || zip64Mode == Zip64Mode.Always 1102 || zip64Mode == Zip64Mode.AlwaysWithCompatibility) { 1103 z64.setCompressedSize(new ZipEightByteInteger(ze.getCompressedSize())); 1104 z64.setSize(new ZipEightByteInteger(ze.getSize())); 1105 } else { 1106 // reset value that may have been set for LFH 1107 z64.setCompressedSize(null); 1108 z64.setSize(null); 1109 } 1110 1111 final boolean needsToEncodeLfhOffset = lfhOffset >= ZipConstants.ZIP64_MAGIC || zip64Mode == Zip64Mode.Always; 1112 final boolean needsToEncodeDiskNumberStart = ze.getDiskNumberStart() >= ZipConstants.ZIP64_MAGIC_SHORT || zip64Mode == Zip64Mode.Always; 1113 1114 if (needsToEncodeLfhOffset || needsToEncodeDiskNumberStart) { 1115 z64.setRelativeHeaderOffset(new ZipEightByteInteger(lfhOffset)); 1116 } 1117 if (needsToEncodeDiskNumberStart) { 1118 z64.setDiskStartNumber(new ZipLong(ze.getDiskNumberStart())); 1119 } 1120 ze.setExtra(); 1121 } 1122 } 1123 1124 /** 1125 * Is there a ZIP64 extended information extra field for the entry? 1126 * 1127 * @since 1.3 1128 */ 1129 private boolean hasZip64Extra(final ZipArchiveEntry ze) { 1130 return ze.getExtraField(Zip64ExtendedInformationExtraField.HEADER_ID) instanceof Zip64ExtendedInformationExtraField; 1131 } 1132 1133 /** 1134 * This method indicates whether this archive is writing to a seekable stream (i.e., to a random access file). 1135 * <p> 1136 * For seekable streams, you don't need to calculate the CRC or uncompressed size for {@link #STORED} entries before invoking 1137 * {@link #putArchiveEntry(ZipArchiveEntry)}. 1138 * </p> 1139 * 1140 * @return true if seekable 1141 */ 1142 public boolean isSeekable() { 1143 return outputStream instanceof RandomAccessOutputStream; 1144 } 1145 1146 private boolean isTooLargeForZip32(final ZipArchiveEntry zipArchiveEntry) { 1147 return zipArchiveEntry.getSize() >= ZipConstants.ZIP64_MAGIC || zipArchiveEntry.getCompressedSize() >= ZipConstants.ZIP64_MAGIC; 1148 } 1149 1150 private boolean isZip64Required(final ZipArchiveEntry entry1, final Zip64Mode requestedMode) { 1151 return requestedMode == Zip64Mode.Always || requestedMode == Zip64Mode.AlwaysWithCompatibility || isTooLargeForZip32(entry1); 1152 } 1153 1154 private void preClose() throws IOException { 1155 if (finished) { 1156 throw new IOException("Stream has already been finished"); 1157 } 1158 1159 if (entry == null) { 1160 throw new IOException("No current entry to close"); 1161 } 1162 1163 if (!entry.hasWritten) { 1164 write(ByteUtils.EMPTY_BYTE_ARRAY, 0, 0); 1165 } 1166 } 1167 1168 /** 1169 * {@inheritDoc} 1170 * 1171 * @throws ClassCastException if entry is not an instance of ZipArchiveEntry 1172 * @throws Zip64RequiredException if the entry's uncompressed or compressed size is known to exceed 4 GByte and {@link #setUseZip64} is 1173 * {@link Zip64Mode#Never}. 1174 */ 1175 @Override 1176 public void putArchiveEntry(final ZipArchiveEntry archiveEntry) throws IOException { 1177 putArchiveEntry(archiveEntry, false); 1178 } 1179 1180 /** 1181 * Writes the headers for an archive entry to the output stream. The caller must then write the content to the stream and call {@link #closeArchiveEntry()} 1182 * to complete the process. 1183 * 1184 * @param archiveEntry The archiveEntry 1185 * @param phased If true size, compressedSize and crc required to be known up-front in the archiveEntry 1186 * @throws ClassCastException if entry is not an instance of ZipArchiveEntry 1187 * @throws Zip64RequiredException if the entry's uncompressed or compressed size is known to exceed 4 GByte and {@link #setUseZip64} is 1188 * {@link Zip64Mode#Never}. 1189 */ 1190 private void putArchiveEntry(final ZipArchiveEntry archiveEntry, final boolean phased) throws IOException { 1191 if (finished) { 1192 throw new IOException("Stream has already been finished"); 1193 } 1194 1195 if (entry != null) { 1196 closeArchiveEntry(); 1197 } 1198 1199 entry = new CurrentEntry(archiveEntry); 1200 entries.add(entry.entry); 1201 1202 setDefaults(entry.entry); 1203 1204 final Zip64Mode effectiveMode = getEffectiveZip64Mode(entry.entry); 1205 validateSizeInformation(effectiveMode); 1206 1207 if (shouldAddZip64Extra(entry.entry, effectiveMode)) { 1208 1209 final Zip64ExtendedInformationExtraField z64 = getZip64Extra(entry.entry); 1210 1211 final ZipEightByteInteger size; 1212 final ZipEightByteInteger compressedSize; 1213 if (phased) { 1214 // sizes are already known 1215 size = new ZipEightByteInteger(entry.entry.getSize()); 1216 compressedSize = new ZipEightByteInteger(entry.entry.getCompressedSize()); 1217 } else if (entry.entry.getMethod() == STORED && entry.entry.getSize() != ArchiveEntry.SIZE_UNKNOWN) { 1218 // actually, we already know the sizes 1219 compressedSize = size = new ZipEightByteInteger(entry.entry.getSize()); 1220 } else { 1221 // just a placeholder, real data will be in data 1222 // descriptor or inserted later via SeekableByteChannel 1223 compressedSize = size = ZipEightByteInteger.ZERO; 1224 } 1225 z64.setSize(size); 1226 z64.setCompressedSize(compressedSize); 1227 entry.entry.setExtra(); 1228 } 1229 1230 if (entry.entry.getMethod() == DEFLATED && hasCompressionLevelChanged) { 1231 def.setLevel(level); 1232 hasCompressionLevelChanged = false; 1233 } 1234 writeLocalFileHeader(archiveEntry, phased); 1235 } 1236 1237 /** 1238 * When using random access output, write the local file header and potentially the ZIP64 extra containing the correct CRC and compressed/uncompressed 1239 * sizes. 1240 */ 1241 private void rewriteSizesAndCrc(final boolean actuallyNeedsZip64) throws IOException { 1242 final RandomAccessOutputStream randomStream = (RandomAccessOutputStream) outputStream; 1243 long dataStart = entry.localDataStart; 1244 if (randomStream instanceof ZipSplitOutputStream) { 1245 dataStart = ((ZipSplitOutputStream) randomStream).calculateDiskPosition(entry.entry.getDiskNumberStart(), dataStart); 1246 } 1247 1248 long position = dataStart; 1249 randomStream.writeFully(ZipLong.getBytes(entry.entry.getCrc()), position); position += ZipConstants.WORD; 1250 if (!hasZip64Extra(entry.entry) || !actuallyNeedsZip64) { 1251 randomStream.writeFully(ZipLong.getBytes(entry.entry.getCompressedSize()), position); position += ZipConstants.WORD; 1252 randomStream.writeFully(ZipLong.getBytes(entry.entry.getSize()), position); position += ZipConstants.WORD; 1253 } else { 1254 randomStream.writeFully(ZipLong.ZIP64_MAGIC.getBytes(), position); position += ZipConstants.WORD; 1255 randomStream.writeFully(ZipLong.ZIP64_MAGIC.getBytes(), position); position += ZipConstants.WORD; 1256 } 1257 1258 if (hasZip64Extra(entry.entry)) { 1259 final ByteBuffer name = getName(entry.entry); 1260 final int nameLen = name.limit() - name.position(); 1261 // seek to ZIP64 extra, skip header and size information 1262 position = dataStart + 3 * ZipConstants.WORD + 2 * ZipConstants.SHORT + nameLen + 2 * ZipConstants.SHORT; 1263 // inside the ZIP64 extra uncompressed size comes 1264 // first, unlike the LFH, CD or data descriptor 1265 randomStream.writeFully(ZipEightByteInteger.getBytes(entry.entry.getSize()), position); position += ZipConstants.DWORD; 1266 randomStream.writeFully(ZipEightByteInteger.getBytes(entry.entry.getCompressedSize()), position); position += ZipConstants.DWORD; 1267 1268 if (!actuallyNeedsZip64) { 1269 // do some cleanup: 1270 // * rewrite version needed to extract 1271 position = dataStart - 5 * ZipConstants.SHORT; 1272 randomStream.writeFully(ZipShort.getBytes(versionNeededToExtract(entry.entry.getMethod(), false, false)), position); 1273 position += ZipConstants.SHORT; 1274 1275 // * remove ZIP64 extra, so it doesn't get written 1276 // to the central directory 1277 entry.entry.removeExtraField(Zip64ExtendedInformationExtraField.HEADER_ID); 1278 entry.entry.setExtra(); 1279 1280 // * reset hasUsedZip64 if it has been set because 1281 // of this entry 1282 if (entry.causedUseOfZip64) { 1283 hasUsedZip64 = false; 1284 } 1285 } 1286 } 1287 } 1288 1289 /** 1290 * Sets the file comment. 1291 * 1292 * @param comment the comment 1293 */ 1294 public void setComment(final String comment) { 1295 this.comment = comment; 1296 } 1297 1298 /** 1299 * Whether to create Unicode Extra Fields. 1300 * <p> 1301 * Defaults to NEVER. 1302 * </p> 1303 * 1304 * @param b whether to create Unicode Extra Fields. 1305 */ 1306 public void setCreateUnicodeExtraFields(final UnicodeExtraFieldPolicy b) { 1307 createUnicodeExtraFields = b; 1308 } 1309 1310 /** 1311 * Provides default values for compression method and last modification time. 1312 */ 1313 private void setDefaults(final ZipArchiveEntry entry) { 1314 if (entry.getMethod() == -1) { // not specified 1315 entry.setMethod(method); 1316 } 1317 1318 if (entry.getTime() == -1) { // not specified 1319 entry.setTime(System.currentTimeMillis()); 1320 } 1321 } 1322 1323 private void setEncoding(final Charset encoding) { 1324 this.charset = encoding; 1325 this.zipEncoding = ZipEncodingHelper.getZipEncoding(encoding); 1326 if (useUTF8Flag && !ZipEncodingHelper.isUTF8(encoding)) { 1327 useUTF8Flag = false; 1328 } 1329 } 1330 1331 /** 1332 * The encoding to use for file names and the file comment. 1333 * <p> 1334 * For a list of possible values see <a href="https://docs.oracle.com/javase/8/docs/technotes/guides/intl/encoding.doc.html">Supported Encodings</a>. 1335 * Defaults to UTF-8. 1336 * </p> 1337 * 1338 * @param encoding the encoding to use for file names, use null for the platform's default encoding 1339 */ 1340 public void setEncoding(final String encoding) { 1341 setEncoding(Charsets.toCharset(encoding)); 1342 } 1343 1344 /** 1345 * Whether to fall back to UTF and the language encoding flag if the file name cannot be encoded using the specified encoding. 1346 * <p> 1347 * Defaults to false. 1348 * </p> 1349 * 1350 * @param b whether to fall back to UTF and the language encoding flag if the file name cannot be encoded using the specified encoding. 1351 */ 1352 public void setFallbackToUTF8(final boolean b) { 1353 fallbackToUTF8 = b; 1354 } 1355 1356 /** 1357 * Sets the compression level for subsequent entries. 1358 * <p> 1359 * Default is Deflater.DEFAULT_COMPRESSION. 1360 * </p> 1361 * 1362 * @param level the compression level. 1363 * @throws IllegalArgumentException if an invalid compression level is specified. 1364 */ 1365 public void setLevel(final int level) { 1366 if (level < Deflater.DEFAULT_COMPRESSION || level > Deflater.BEST_COMPRESSION) { 1367 throw new IllegalArgumentException("Invalid compression level: " + level); 1368 } 1369 if (this.level == level) { 1370 return; 1371 } 1372 hasCompressionLevelChanged = true; 1373 this.level = level; 1374 } 1375 1376 /** 1377 * Sets the default compression method for subsequent entries. 1378 * <p> 1379 * Default is DEFLATED. 1380 * </p> 1381 * 1382 * @param method an {@code int} from java.util.zip.ZipEntry 1383 */ 1384 public void setMethod(final int method) { 1385 this.method = method; 1386 } 1387 1388 /** 1389 * Whether to set the language encoding flag if the file name encoding is UTF-8. 1390 * <p> 1391 * Defaults to true. 1392 * </p> 1393 * 1394 * @param b whether to set the language encoding flag if the file name encoding is UTF-8 1395 */ 1396 public void setUseLanguageEncodingFlag(final boolean b) { 1397 useUTF8Flag = b && ZipEncodingHelper.isUTF8(charset); 1398 } 1399 1400 /** 1401 * Whether Zip64 extensions will be used. 1402 * <p> 1403 * When setting the mode to {@link Zip64Mode#Never Never}, {@link #putArchiveEntry}, {@link #closeArchiveEntry}, {@link #finish} or {@link #close} may throw 1404 * a {@link Zip64RequiredException} if the entry's size or the total size of the archive exceeds 4GB or there are more than 65,536 entries inside the 1405 * archive. Any archive created in this mode will be readable by implementations that don't support Zip64. 1406 * </p> 1407 * <p> 1408 * When setting the mode to {@link Zip64Mode#Always Always}, Zip64 extensions will be used for all entries. Any archive created in this mode may be 1409 * unreadable by implementations that don't support Zip64 even if all its contents would be. 1410 * </p> 1411 * <p> 1412 * When setting the mode to {@link Zip64Mode#AsNeeded AsNeeded}, Zip64 extensions will transparently be used for those entries that require them. This mode 1413 * can only be used if the uncompressed size of the {@link ZipArchiveEntry} is known when calling {@link #putArchiveEntry} or the archive is written to a 1414 * seekable output (i.e. you have used the {@link #ZipArchiveOutputStream(java.io.File) File-arg constructor}) - this mode is not valid when the output 1415 * stream is not seekable and the uncompressed size is unknown when {@link #putArchiveEntry} is called. 1416 * </p> 1417 * <p> 1418 * If no entry inside the resulting archive requires Zip64 extensions then {@link Zip64Mode#Never Never} will create the smallest archive. 1419 * {@link Zip64Mode#AsNeeded AsNeeded} will create a slightly bigger archive if the uncompressed size of any entry has initially been unknown and create an 1420 * archive identical to {@link Zip64Mode#Never Never} otherwise. {@link Zip64Mode#Always Always} will create an archive that is at least 24 bytes per entry 1421 * bigger than the one {@link Zip64Mode#Never Never} would create. 1422 * </p> 1423 * <p> 1424 * Defaults to {@link Zip64Mode#AsNeeded AsNeeded} unless {@link #putArchiveEntry} is called with an entry of unknown size and data is written to a 1425 * non-seekable stream - in this case the default is {@link Zip64Mode#Never Never}. 1426 * </p> 1427 * 1428 * @since 1.3 1429 * @param mode Whether Zip64 extensions will be used. 1430 */ 1431 public void setUseZip64(final Zip64Mode mode) { 1432 zip64Mode = mode; 1433 } 1434 1435 /** 1436 * Whether to add a Zip64 extended information extra field to the local file header. 1437 * <p> 1438 * Returns true if 1439 * </p> 1440 * <ul> 1441 * <li>mode is Always</li> 1442 * <li>or we already know it is going to be needed</li> 1443 * <li>or the size is unknown and we can ensure it won't hurt other implementations if we add it (i.e. we can erase its usage</li> 1444 * </ul> 1445 */ 1446 private boolean shouldAddZip64Extra(final ZipArchiveEntry entry, final Zip64Mode mode) { 1447 return mode == Zip64Mode.Always || mode == Zip64Mode.AlwaysWithCompatibility || entry.getSize() >= ZipConstants.ZIP64_MAGIC 1448 || entry.getCompressedSize() >= ZipConstants.ZIP64_MAGIC 1449 || entry.getSize() == ArchiveEntry.SIZE_UNKNOWN && outputStream instanceof RandomAccessOutputStream && mode != Zip64Mode.Never; 1450 } 1451 1452 /** 1453 * 4.4.1.4 If one of the fields in the end of central directory record is too small to hold required data, the field SHOULD be set to -1 (0xFFFF or 1454 * 0xFFFFFFFF) and the ZIP64 format record SHOULD be created. 1455 * 1456 * @return true if zip64 End Of Central Directory is needed 1457 */ 1458 private boolean shouldUseZip64EOCD() { 1459 int numberOfThisDisk = 0; 1460 if (isSplitZip) { 1461 numberOfThisDisk = ((ZipSplitOutputStream) this.outputStream).getCurrentSplitSegmentIndex(); 1462 } 1463 final int numOfEntriesOnThisDisk = numberOfCDInDiskData.getOrDefault(numberOfThisDisk, 0); 1464 return numberOfThisDisk >= ZipConstants.ZIP64_MAGIC_SHORT /* number of this disk */ 1465 || cdDiskNumberStart >= ZipConstants.ZIP64_MAGIC_SHORT /* number of the disk with the start of the central directory */ 1466 || numOfEntriesOnThisDisk >= ZipConstants.ZIP64_MAGIC_SHORT /* total number of entries in the central directory on this disk */ 1467 || entries.size() >= ZipConstants.ZIP64_MAGIC_SHORT /* total number of entries in the central directory */ 1468 || cdLength >= ZipConstants.ZIP64_MAGIC /* size of the central directory */ 1469 || cdOffset >= ZipConstants.ZIP64_MAGIC; /* 1470 * offset of start of central directory with respect to the starting disk number 1471 */ 1472 } 1473 1474 private boolean usesDataDescriptor(final int zipMethod, final boolean phased) { 1475 return !phased && zipMethod == DEFLATED && !(outputStream instanceof RandomAccessOutputStream); 1476 } 1477 1478 /** 1479 * If the Zip64 mode is set to never, then all the data in End Of Central Directory should not exceed their limits. 1480 * 1481 * @throws Zip64RequiredException if Zip64 is actually needed 1482 */ 1483 private void validateIfZip64IsNeededInEOCD() throws Zip64RequiredException { 1484 // exception will only be thrown if the Zip64 mode is never while Zip64 is actually needed 1485 if (zip64Mode != Zip64Mode.Never) { 1486 return; 1487 } 1488 1489 int numberOfThisDisk = 0; 1490 if (isSplitZip) { 1491 numberOfThisDisk = ((ZipSplitOutputStream) this.outputStream).getCurrentSplitSegmentIndex(); 1492 } 1493 if (numberOfThisDisk >= ZipConstants.ZIP64_MAGIC_SHORT) { 1494 throw new Zip64RequiredException(Zip64RequiredException.DISK_NUMBER_TOO_BIG_MESSAGE); 1495 } 1496 1497 if (cdDiskNumberStart >= ZipConstants.ZIP64_MAGIC_SHORT) { 1498 throw new Zip64RequiredException(Zip64RequiredException.CENTRAL_DIRECTORY_DISK_NUMBER_TOO_BIG_MESSAGE); 1499 } 1500 1501 final int numOfEntriesOnThisDisk = numberOfCDInDiskData.getOrDefault(numberOfThisDisk, 0); 1502 if (numOfEntriesOnThisDisk >= ZipConstants.ZIP64_MAGIC_SHORT) { 1503 throw new Zip64RequiredException(Zip64RequiredException.TOO_MANY_ENTRIES_ON_DISK_MESSAGE); 1504 } 1505 1506 // number of entries 1507 if (entries.size() >= ZipConstants.ZIP64_MAGIC_SHORT) { 1508 throw new Zip64RequiredException(Zip64RequiredException.TOO_MANY_ENTRIES_MESSAGE); 1509 } 1510 1511 if (cdLength >= ZipConstants.ZIP64_MAGIC) { 1512 throw new Zip64RequiredException(Zip64RequiredException.CENTRAL_DIRECTORY_SIZE_TOO_BIG_MESSAGE); 1513 } 1514 1515 if (cdOffset >= ZipConstants.ZIP64_MAGIC) { 1516 throw new Zip64RequiredException(Zip64RequiredException.ARCHIVE_TOO_BIG_MESSAGE); 1517 } 1518 } 1519 1520 /** 1521 * Throws an exception if the size is unknown for a stored entry that is written to a non-seekable output or the entry is too big to be written without 1522 * Zip64 extra but the mode has been set to Never. 1523 */ 1524 private void validateSizeInformation(final Zip64Mode effectiveMode) throws ZipException { 1525 // Size/CRC not required if SeekableByteChannel is used 1526 if (entry.entry.getMethod() == STORED && !(outputStream instanceof RandomAccessOutputStream)) { 1527 if (entry.entry.getSize() == ArchiveEntry.SIZE_UNKNOWN) { 1528 throw new ZipException("Uncompressed size is required for" + " STORED method when not writing to a" + " file"); 1529 } 1530 if (entry.entry.getCrc() == ZipArchiveEntry.CRC_UNKNOWN) { 1531 throw new ZipException("CRC checksum is required for STORED" + " method when not writing to a file"); 1532 } 1533 entry.entry.setCompressedSize(entry.entry.getSize()); 1534 } 1535 1536 if ((entry.entry.getSize() >= ZipConstants.ZIP64_MAGIC || entry.entry.getCompressedSize() >= ZipConstants.ZIP64_MAGIC) 1537 && effectiveMode == Zip64Mode.Never) { 1538 throw new Zip64RequiredException(Zip64RequiredException.getEntryTooBigMessage(entry.entry)); 1539 } 1540 } 1541 1542 private int versionNeededToExtract(final int zipMethod, final boolean zip64, final boolean usedDataDescriptor) { 1543 if (zip64) { 1544 return ZipConstants.ZIP64_MIN_VERSION; 1545 } 1546 if (usedDataDescriptor) { 1547 return ZipConstants.DATA_DESCRIPTOR_MIN_VERSION; 1548 } 1549 return versionNeededToExtractMethod(zipMethod); 1550 } 1551 1552 private int versionNeededToExtractMethod(final int zipMethod) { 1553 return zipMethod == DEFLATED ? ZipConstants.DEFLATE_MIN_VERSION : ZipConstants.INITIAL_VERSION; 1554 } 1555 1556 /** 1557 * Writes bytes to ZIP entry. 1558 * 1559 * @param b the byte array to write 1560 * @param offset the start position to write from 1561 * @param length the number of bytes to write 1562 * @throws IOException on error 1563 */ 1564 @Override 1565 public void write(final byte[] b, final int offset, final int length) throws IOException { 1566 if (entry == null) { 1567 throw new IllegalStateException("No current entry"); 1568 } 1569 ZipUtil.checkRequestedFeatures(entry.entry); 1570 final long writtenThisTime = streamCompressor.write(b, offset, length, entry.entry.getMethod()); 1571 count(writtenThisTime); 1572 } 1573 1574 /** 1575 * Writes the "End of central dir record". 1576 * 1577 * @throws IOException on error 1578 * @throws Zip64RequiredException if the archive's size exceeds 4 GByte or there are more than 65535 entries inside the archive and 1579 * {@link #setUseZip64(Zip64Mode)} is {@link Zip64Mode#Never}. 1580 */ 1581 protected void writeCentralDirectoryEnd() throws IOException { 1582 if (!hasUsedZip64 && isSplitZip) { 1583 ((ZipSplitOutputStream) this.outputStream).prepareToWriteUnsplittableContent(eocdLength); 1584 } 1585 1586 validateIfZip64IsNeededInEOCD(); 1587 1588 writeCounted(EOCD_SIG); 1589 1590 // number of this disk 1591 int numberOfThisDisk = 0; 1592 if (isSplitZip) { 1593 numberOfThisDisk = ((ZipSplitOutputStream) this.outputStream).getCurrentSplitSegmentIndex(); 1594 } 1595 writeCounted(ZipShort.getBytes(numberOfThisDisk)); 1596 1597 // disk number of the start of central directory 1598 writeCounted(ZipShort.getBytes((int) cdDiskNumberStart)); 1599 1600 // number of entries 1601 final int numberOfEntries = entries.size(); 1602 1603 // total number of entries in the central directory on this disk 1604 final int numOfEntriesOnThisDisk = isSplitZip ? numberOfCDInDiskData.getOrDefault(numberOfThisDisk, 0) : numberOfEntries; 1605 final byte[] numOfEntriesOnThisDiskData = ZipShort.getBytes(Math.min(numOfEntriesOnThisDisk, ZipConstants.ZIP64_MAGIC_SHORT)); 1606 writeCounted(numOfEntriesOnThisDiskData); 1607 1608 // number of entries 1609 final byte[] num = ZipShort.getBytes(Math.min(numberOfEntries, ZipConstants.ZIP64_MAGIC_SHORT)); 1610 writeCounted(num); 1611 1612 // length and location of CD 1613 writeCounted(ZipLong.getBytes(Math.min(cdLength, ZipConstants.ZIP64_MAGIC))); 1614 writeCounted(ZipLong.getBytes(Math.min(cdOffset, ZipConstants.ZIP64_MAGIC))); 1615 1616 // ZIP file comment 1617 final ByteBuffer data = this.zipEncoding.encode(comment); 1618 final int dataLen = data.limit() - data.position(); 1619 writeCounted(ZipShort.getBytes(dataLen)); 1620 streamCompressor.writeCounted(data.array(), data.arrayOffset(), dataLen); 1621 } 1622 1623 private void writeCentralDirectoryInChunks() throws IOException { 1624 final int NUM_PER_WRITE = 1000; 1625 final ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream(70 * NUM_PER_WRITE); 1626 int count = 0; 1627 for (final ZipArchiveEntry ze : entries) { 1628 byteArrayOutputStream.write(createCentralFileHeader(ze)); 1629 if (++count > NUM_PER_WRITE) { 1630 writeCounted(byteArrayOutputStream.toByteArray()); 1631 byteArrayOutputStream.reset(); 1632 count = 0; 1633 } 1634 } 1635 writeCounted(byteArrayOutputStream.toByteArray()); 1636 } 1637 1638 /** 1639 * Writes the central file header entry. 1640 * 1641 * @param ze the entry to write 1642 * @throws IOException on error 1643 * @throws Zip64RequiredException if the archive's size exceeds 4 GByte and {@link #setUseZip64(Zip64Mode)} is {@link Zip64Mode#Never}. 1644 */ 1645 protected void writeCentralFileHeader(final ZipArchiveEntry ze) throws IOException { 1646 final byte[] centralFileHeader = createCentralFileHeader(ze); 1647 writeCounted(centralFileHeader); 1648 } 1649 1650 /** 1651 * Write bytes to output or random access file. 1652 * 1653 * @param data the byte array to write 1654 * @throws IOException on error 1655 */ 1656 private void writeCounted(final byte[] data) throws IOException { 1657 streamCompressor.writeCounted(data); 1658 } 1659 1660 /** 1661 * Writes the data descriptor entry. 1662 * 1663 * @param ze the entry to write 1664 * @throws IOException on error 1665 */ 1666 protected void writeDataDescriptor(final ZipArchiveEntry ze) throws IOException { 1667 if (!usesDataDescriptor(ze.getMethod(), false)) { 1668 return; 1669 } 1670 writeCounted(DD_SIG); 1671 writeCounted(ZipLong.getBytes(ze.getCrc())); 1672 if (!hasZip64Extra(ze)) { 1673 writeCounted(ZipLong.getBytes(ze.getCompressedSize())); 1674 writeCounted(ZipLong.getBytes(ze.getSize())); 1675 } else { 1676 writeCounted(ZipEightByteInteger.getBytes(ze.getCompressedSize())); 1677 writeCounted(ZipEightByteInteger.getBytes(ze.getSize())); 1678 } 1679 } 1680 1681 /** 1682 * Writes the local file header entry 1683 * 1684 * @param ze the entry to write 1685 * @throws IOException on error 1686 */ 1687 protected void writeLocalFileHeader(final ZipArchiveEntry ze) throws IOException { 1688 writeLocalFileHeader(ze, false); 1689 } 1690 1691 private void writeLocalFileHeader(final ZipArchiveEntry ze, final boolean phased) throws IOException { 1692 final boolean encodable = zipEncoding.canEncode(ze.getName()); 1693 final ByteBuffer name = getName(ze); 1694 1695 if (createUnicodeExtraFields != UnicodeExtraFieldPolicy.NEVER) { 1696 addUnicodeExtraFields(ze, encodable, name); 1697 } 1698 1699 long localHeaderStart = streamCompressor.getTotalBytesWritten(); 1700 if (isSplitZip) { 1701 // when creating a split zip, the offset should be 1702 // the offset to the corresponding segment disk 1703 final ZipSplitOutputStream splitOutputStream = (ZipSplitOutputStream) this.outputStream; 1704 ze.setDiskNumberStart(splitOutputStream.getCurrentSplitSegmentIndex()); 1705 localHeaderStart = splitOutputStream.getCurrentSplitSegmentBytesWritten(); 1706 } 1707 1708 final byte[] localHeader = createLocalFileHeader(ze, name, encodable, phased, localHeaderStart); 1709 metaData.put(ze, new EntryMetaData(localHeaderStart, usesDataDescriptor(ze.getMethod(), phased))); 1710 entry.localDataStart = localHeaderStart + LFH_CRC_OFFSET; // At crc offset 1711 writeCounted(localHeader); 1712 entry.dataStart = streamCompressor.getTotalBytesWritten(); 1713 } 1714 1715 /** 1716 * Write bytes to output or random access file. 1717 * 1718 * @param data the byte array to write 1719 * @throws IOException on error 1720 */ 1721 protected final void writeOut(final byte[] data) throws IOException { 1722 streamCompressor.writeOut(data, 0, data.length); 1723 } 1724 1725 /** 1726 * Write bytes to output or random access file. 1727 * 1728 * @param data the byte array to write 1729 * @param offset the start position to write from 1730 * @param length the number of bytes to write 1731 * @throws IOException on error 1732 */ 1733 protected final void writeOut(final byte[] data, final int offset, final int length) throws IOException { 1734 streamCompressor.writeOut(data, offset, length); 1735 } 1736 1737 /** 1738 * Write preamble data. For most of the time, this is used to make self-extracting zips. 1739 * 1740 * @param preamble data to write 1741 * @throws IOException if an entry already exists 1742 * @since 1.21 1743 */ 1744 public void writePreamble(final byte[] preamble) throws IOException { 1745 writePreamble(preamble, 0, preamble.length); 1746 } 1747 1748 /** 1749 * Write preamble data. For most of the time, this is used to make self-extracting zips. 1750 * 1751 * @param preamble data to write 1752 * @param offset the start offset in the data 1753 * @param length the number of bytes to write 1754 * @throws IOException if an entry already exists 1755 * @since 1.21 1756 */ 1757 public void writePreamble(final byte[] preamble, final int offset, final int length) throws IOException { 1758 if (entry != null) { 1759 throw new IllegalStateException("Preamble must be written before creating an entry"); 1760 } 1761 this.streamCompressor.writeCounted(preamble, offset, length); 1762 } 1763 1764 /** 1765 * Writes the "ZIP64 End of central dir record" and "ZIP64 End of central dir locator". 1766 * 1767 * @throws IOException on error 1768 * @since 1.3 1769 */ 1770 protected void writeZip64CentralDirectory() throws IOException { 1771 if (zip64Mode == Zip64Mode.Never) { 1772 return; 1773 } 1774 1775 if (!hasUsedZip64 && shouldUseZip64EOCD()) { 1776 // actually "will use" 1777 hasUsedZip64 = true; 1778 } 1779 1780 if (!hasUsedZip64) { 1781 return; 1782 } 1783 1784 long offset = streamCompressor.getTotalBytesWritten(); 1785 long diskNumberStart = 0L; 1786 if (isSplitZip) { 1787 // when creating a split zip, the offset of should be 1788 // the offset to the corresponding segment disk 1789 final ZipSplitOutputStream zipSplitOutputStream = (ZipSplitOutputStream) this.outputStream; 1790 offset = zipSplitOutputStream.getCurrentSplitSegmentBytesWritten(); 1791 diskNumberStart = zipSplitOutputStream.getCurrentSplitSegmentIndex(); 1792 } 1793 1794 writeOut(ZIP64_EOCD_SIG); 1795 // size of zip64 end of central directory, we don't have any variable length 1796 // as we don't support the extensible data sector, yet 1797 writeOut(ZipEightByteInteger.getBytes(ZipConstants.SHORT /* version made by */ 1798 + ZipConstants.SHORT /* version needed to extract */ 1799 + ZipConstants.WORD /* disk number */ 1800 + ZipConstants.WORD /* disk with central directory */ 1801 + ZipConstants.DWORD /* number of entries in CD on this disk */ 1802 + ZipConstants.DWORD /* total number of entries */ 1803 + ZipConstants.DWORD /* size of CD */ 1804 + (long) ZipConstants.DWORD /* offset of CD */ 1805 )); 1806 1807 // version made by and version needed to extract 1808 writeOut(ZipShort.getBytes(ZipConstants.ZIP64_MIN_VERSION)); 1809 writeOut(ZipShort.getBytes(ZipConstants.ZIP64_MIN_VERSION)); 1810 1811 // number of this disk 1812 int numberOfThisDisk = 0; 1813 if (isSplitZip) { 1814 numberOfThisDisk = ((ZipSplitOutputStream) this.outputStream).getCurrentSplitSegmentIndex(); 1815 } 1816 writeOut(ZipLong.getBytes(numberOfThisDisk)); 1817 1818 // disk number of the start of central directory 1819 writeOut(ZipLong.getBytes(cdDiskNumberStart)); 1820 1821 // total number of entries in the central directory on this disk 1822 final int numOfEntriesOnThisDisk = isSplitZip ? numberOfCDInDiskData.getOrDefault(numberOfThisDisk, 0) : entries.size(); 1823 final byte[] numOfEntriesOnThisDiskData = ZipEightByteInteger.getBytes(numOfEntriesOnThisDisk); 1824 writeOut(numOfEntriesOnThisDiskData); 1825 1826 // number of entries 1827 final byte[] num = ZipEightByteInteger.getBytes(entries.size()); 1828 writeOut(num); 1829 1830 // length and location of CD 1831 writeOut(ZipEightByteInteger.getBytes(cdLength)); 1832 writeOut(ZipEightByteInteger.getBytes(cdOffset)); 1833 1834 // no "zip64 extensible data sector" for now 1835 1836 if (isSplitZip) { 1837 // based on the ZIP specification, the End Of Central Directory record and 1838 // the Zip64 End Of Central Directory locator record must be on the same segment 1839 final int zip64EOCDLOCLength = ZipConstants.WORD /* length of ZIP64_EOCD_LOC_SIG */ 1840 + ZipConstants.WORD /* disk number of ZIP64_EOCD_SIG */ 1841 + ZipConstants.DWORD /* offset of ZIP64_EOCD_SIG */ 1842 + ZipConstants.WORD /* total number of disks */; 1843 1844 final long unsplittableContentSize = zip64EOCDLOCLength + eocdLength; 1845 ((ZipSplitOutputStream) this.outputStream).prepareToWriteUnsplittableContent(unsplittableContentSize); 1846 } 1847 1848 // and now the "ZIP64 end of central directory locator" 1849 writeOut(ZIP64_EOCD_LOC_SIG); 1850 1851 // disk number holding the ZIP64 EOCD record 1852 writeOut(ZipLong.getBytes(diskNumberStart)); 1853 // relative offset of ZIP64 EOCD record 1854 writeOut(ZipEightByteInteger.getBytes(offset)); 1855 // total number of disks 1856 if (isSplitZip) { 1857 // the Zip64 End Of Central Directory Locator and the End Of Central Directory must be 1858 // in the same split disk, it means they must be located in the last disk 1859 final int totalNumberOfDisks = ((ZipSplitOutputStream) this.outputStream).getCurrentSplitSegmentIndex() + 1; 1860 writeOut(ZipLong.getBytes(totalNumberOfDisks)); 1861 } else { 1862 writeOut(ONE); 1863 } 1864 } 1865}