001/* 002 * Licensed to the Apache Software Foundation (ASF) under one or more 003 * contributor license agreements. See the NOTICE file distributed with 004 * this work for additional information regarding copyright ownership. 005 * The ASF licenses this file to You under the Apache License, Version 2.0 006 * (the "License"); you may not use this file except in compliance with 007 * the License. You may obtain a copy of the License at 008 * 009 * http://www.apache.org/licenses/LICENSE-2.0 010 * 011 * Unless required by applicable law or agreed to in writing, software 012 * distributed under the License is distributed on an "AS IS" BASIS, 013 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 014 * See the License for the specific language governing permissions and 015 * limitations under the License. 016 */ 017package org.apache.commons.compress.archivers.zip; 018 019import java.io.ByteArrayOutputStream; 020import java.io.File; 021import java.io.IOException; 022import java.io.InputStream; 023import java.io.OutputStream; 024import java.nio.ByteBuffer; 025import java.nio.channels.SeekableByteChannel; 026import java.nio.charset.Charset; 027import java.nio.charset.StandardCharsets; 028import java.nio.file.LinkOption; 029import java.nio.file.OpenOption; 030import java.nio.file.Path; 031import java.util.HashMap; 032import java.util.LinkedList; 033import java.util.List; 034import java.util.Map; 035import java.util.zip.Deflater; 036import java.util.zip.ZipException; 037 038import org.apache.commons.compress.archivers.ArchiveEntry; 039import org.apache.commons.compress.archivers.ArchiveOutputStream; 040import org.apache.commons.compress.utils.ByteUtils; 041import org.apache.commons.io.Charsets; 042 043/** 044 * Reimplementation of {@link java.util.zip.ZipOutputStream java.util.zip.ZipOutputStream} to handle the extended functionality of this package, especially 045 * internal/external file attributes and extra fields with different layouts for local file data and central directory entries. 046 * <p> 047 * This class will try to use {@link java.nio.channels.SeekableByteChannel} when it knows that the output is going to go to a file and no split archive shall be 048 * created. 049 * </p> 050 * <p> 051 * If SeekableByteChannel cannot be used, this implementation will use a Data Descriptor to store size and CRC information for {@link #DEFLATED DEFLATED} 052 * entries, you don't need to calculate them yourself. Unfortunately, this is not possible for the {@link #STORED STORED} method, where setting the CRC and 053 * uncompressed size information is required before {@link #putArchiveEntry(ZipArchiveEntry)} can be called. 054 * </p> 055 * <p> 056 * As of Apache Commons Compress 1.3, the class transparently supports Zip64 extensions and thus individual entries and archives larger than 4 GB or with more 057 * than 65,536 entries in most cases but explicit control is provided via {@link #setUseZip64}. If the stream can not use SeekableByteChannel and you try to 058 * write a ZipArchiveEntry of unknown size, then Zip64 extensions will be disabled by default. 059 * </p> 060 * 061 * @NotThreadSafe 062 */ 063public class ZipArchiveOutputStream extends ArchiveOutputStream<ZipArchiveEntry> { 064 065 /** 066 * Structure collecting information for the entry that is currently being written. 067 */ 068 private static final class CurrentEntry { 069 070 /** 071 * Current ZIP entry. 072 */ 073 private final ZipArchiveEntry entry; 074 075 /** 076 * Offset for CRC entry in the local file header data for the current entry starts here. 077 */ 078 private long localDataStart; 079 080 /** 081 * Data for local header data 082 */ 083 private long dataStart; 084 085 /** 086 * Number of bytes read for the current entry (can't rely on Deflater#getBytesRead) when using DEFLATED. 087 */ 088 private long bytesRead; 089 090 /** 091 * Whether current entry was the first one using ZIP64 features. 092 */ 093 private boolean causedUseOfZip64; 094 095 /** 096 * Whether write() has been called at all. 097 * 098 * <p> 099 * In order to create a valid archive {@link #closeArchiveEntry closeArchiveEntry} will write an empty array to get the CRC right if nothing has been 100 * written to the stream at all. 101 * </p> 102 */ 103 private boolean hasWritten; 104 105 private CurrentEntry(final ZipArchiveEntry entry) { 106 this.entry = entry; 107 } 108 } 109 110 private static final class EntryMetaData { 111 private final long offset; 112 private final boolean usesDataDescriptor; 113 114 private EntryMetaData(final long offset, final boolean usesDataDescriptor) { 115 this.offset = offset; 116 this.usesDataDescriptor = usesDataDescriptor; 117 } 118 } 119 120 /** 121 * enum that represents the possible policies for creating Unicode extra fields. 122 */ 123 public static final class UnicodeExtraFieldPolicy { 124 125 /** 126 * Always create Unicode extra fields. 127 */ 128 public static final UnicodeExtraFieldPolicy ALWAYS = new UnicodeExtraFieldPolicy("always"); 129 130 /** 131 * Never create Unicode extra fields. 132 */ 133 public static final UnicodeExtraFieldPolicy NEVER = new UnicodeExtraFieldPolicy("never"); 134 135 /** 136 * Creates Unicode extra fields for file names that cannot be encoded using the specified encoding. 137 */ 138 public static final UnicodeExtraFieldPolicy NOT_ENCODEABLE = new UnicodeExtraFieldPolicy("not encodeable"); 139 140 private final String name; 141 142 private UnicodeExtraFieldPolicy(final String n) { 143 name = n; 144 } 145 146 @Override 147 public String toString() { 148 return name; 149 } 150 } 151 152 static final int BUFFER_SIZE = 512; 153 private static final int LFH_SIG_OFFSET = 0; 154 private static final int LFH_VERSION_NEEDED_OFFSET = 4; 155 private static final int LFH_GPB_OFFSET = 6; 156 private static final int LFH_METHOD_OFFSET = 8; 157 private static final int LFH_TIME_OFFSET = 10; 158 private static final int LFH_CRC_OFFSET = 14; 159 private static final int LFH_COMPRESSED_SIZE_OFFSET = 18; 160 private static final int LFH_ORIGINAL_SIZE_OFFSET = 22; 161 private static final int LFH_FILENAME_LENGTH_OFFSET = 26; 162 private static final int LFH_EXTRA_LENGTH_OFFSET = 28; 163 private static final int LFH_FILENAME_OFFSET = 30; 164 private static final int CFH_SIG_OFFSET = 0; 165 private static final int CFH_VERSION_MADE_BY_OFFSET = 4; 166 private static final int CFH_VERSION_NEEDED_OFFSET = 6; 167 private static final int CFH_GPB_OFFSET = 8; 168 private static final int CFH_METHOD_OFFSET = 10; 169 private static final int CFH_TIME_OFFSET = 12; 170 private static final int CFH_CRC_OFFSET = 16; 171 private static final int CFH_COMPRESSED_SIZE_OFFSET = 20; 172 private static final int CFH_ORIGINAL_SIZE_OFFSET = 24; 173 private static final int CFH_FILENAME_LENGTH_OFFSET = 28; 174 private static final int CFH_EXTRA_LENGTH_OFFSET = 30; 175 private static final int CFH_COMMENT_LENGTH_OFFSET = 32; 176 private static final int CFH_DISK_NUMBER_OFFSET = 34; 177 private static final int CFH_INTERNAL_ATTRIBUTES_OFFSET = 36; 178 179 private static final int CFH_EXTERNAL_ATTRIBUTES_OFFSET = 38; 180 181 private static final int CFH_LFH_OFFSET = 42; 182 183 private static final int CFH_FILENAME_OFFSET = 46; 184 185 /** 186 * Compression method for deflated entries. 187 */ 188 public static final int DEFLATED = java.util.zip.ZipEntry.DEFLATED; 189 190 /** 191 * Default compression level for deflated entries. 192 */ 193 public static final int DEFAULT_COMPRESSION = Deflater.DEFAULT_COMPRESSION; 194 195 /** 196 * Compression method for stored entries. 197 */ 198 public static final int STORED = java.util.zip.ZipEntry.STORED; 199 200 /** 201 * Default encoding for file names and comment. 202 */ 203 static final Charset DEFAULT_CHARSET = StandardCharsets.UTF_8; 204 205 /** 206 * General purpose flag, which indicates that file names are written in UTF-8. 207 * 208 * @deprecated use {@link GeneralPurposeBit#UFT8_NAMES_FLAG} instead 209 */ 210 @Deprecated 211 public static final int EFS_FLAG = GeneralPurposeBit.UFT8_NAMES_FLAG; 212 213 /** 214 * Helper, a 0 as ZipShort. 215 */ 216 private static final byte[] ZERO = { 0, 0 }; 217 218 /** 219 * Helper, a 0 as ZipLong. 220 */ 221 private static final byte[] LZERO = { 0, 0, 0, 0 }; 222 223 private static final byte[] ONE = ZipLong.getBytes(1L); 224 225 /* 226 * Various ZIP constants shared between this class, ZipArchiveInputStream and ZipFile 227 */ 228 /** 229 * local file header signature 230 */ 231 static final byte[] LFH_SIG = ZipLong.LFH_SIG.getBytes(); // NOSONAR 232 233 /** 234 * data descriptor signature 235 */ 236 static final byte[] DD_SIG = ZipLong.DD_SIG.getBytes(); // NOSONAR 237 238 /** 239 * central file header signature 240 */ 241 static final byte[] CFH_SIG = ZipLong.CFH_SIG.getBytes(); // NOSONAR 242 243 /** 244 * end of central dir signature 245 */ 246 static final byte[] EOCD_SIG = ZipLong.getBytes(0X06054B50L); // NOSONAR 247 248 /** 249 * ZIP64 end of central dir signature 250 */ 251 static final byte[] ZIP64_EOCD_SIG = ZipLong.getBytes(0X06064B50L); // NOSONAR 252 253 /** 254 * ZIP64 end of central dir locator signature 255 */ 256 static final byte[] ZIP64_EOCD_LOC_SIG = ZipLong.getBytes(0X07064B50L); // NOSONAR 257 258 /** 259 * Indicates if this archive is finished. protected for use in Jar implementation. 260 * 261 * @deprecated See {@link #isFinished()} and {@link #finish()}. 262 */ 263 @Deprecated 264 protected boolean finished; 265 266 /** 267 * Current entry. 268 */ 269 private CurrentEntry entry; 270 271 /** 272 * The file comment. 273 */ 274 private String comment = ""; 275 276 /** 277 * Compression level for next entry. 278 */ 279 private int level = DEFAULT_COMPRESSION; 280 281 /** 282 * Has the compression level changed when compared to the last entry? 283 */ 284 private boolean hasCompressionLevelChanged; 285 286 /** 287 * Default compression method for next entry. 288 */ 289 private int method = java.util.zip.ZipEntry.DEFLATED; 290 291 /** 292 * List of ZipArchiveEntries written so far. 293 */ 294 private final List<ZipArchiveEntry> entries = new LinkedList<>(); 295 296 private final StreamCompressor streamCompressor; 297 298 /** 299 * Start of central directory. 300 */ 301 private long cdOffset; 302 303 /** 304 * Length of central directory. 305 */ 306 private long cdLength; 307 308 /** 309 * Disk number start of central directory. 310 */ 311 private long cdDiskNumberStart; 312 313 /** 314 * Length of end of central directory 315 */ 316 private long eocdLength; 317 318 /** 319 * Holds some book-keeping data for each entry. 320 */ 321 private final Map<ZipArchiveEntry, EntryMetaData> metaData = new HashMap<>(); 322 323 /** 324 * The encoding to use for file names and the file comment. 325 * 326 * <p> 327 * For a list of possible values see <a href="https://docs.oracle.com/javase/8/docs/technotes/guides/intl/encoding.doc.html">Supported Encodings</a>. 328 * Defaults to UTF-8. 329 * </p> 330 */ 331 private Charset charset = DEFAULT_CHARSET; 332 333 /** 334 * The ZIP encoding to use for file names and the file comment. 335 * 336 * This field is of internal use and will be set in {@link #setEncoding(String)}. 337 */ 338 private ZipEncoding zipEncoding = ZipEncodingHelper.getZipEncoding(DEFAULT_CHARSET); 339 340 /** 341 * This Deflater object is used for output. 342 */ 343 protected final Deflater def; 344 345 /** 346 * whether to use the general purpose bit flag when writing UTF-8 file names or not. 347 */ 348 private boolean useUTF8Flag = true; 349 350 /** 351 * Whether to encode non-encodable file names as UTF-8. 352 */ 353 private boolean fallbackToUTF8; 354 355 /** 356 * whether to create UnicodePathExtraField-s for each entry. 357 */ 358 private UnicodeExtraFieldPolicy createUnicodeExtraFields = UnicodeExtraFieldPolicy.NEVER; 359 360 /** 361 * Whether anything inside this archive has used a ZIP64 feature. 362 * 363 * @since 1.3 364 */ 365 private boolean hasUsedZip64; 366 367 private Zip64Mode zip64Mode = Zip64Mode.AsNeeded; 368 369 private final byte[] copyBuffer = new byte[32768]; 370 371 /** 372 * Whether we are creating a split zip 373 */ 374 private final boolean isSplitZip; 375 376 /** 377 * Holds the number of Central Directories on each disk. This is used when writing Zip64 End Of Central Directory and End Of Central Directory. 378 */ 379 private final Map<Integer, Integer> numberOfCDInDiskData = new HashMap<>(); 380 381 /** 382 * Creates a new ZIP OutputStream writing to a File. Will use random access if possible. 383 * 384 * @param file the file to ZIP to 385 * @throws IOException on error 386 */ 387 public ZipArchiveOutputStream(final File file) throws IOException { 388 this(file.toPath()); 389 } 390 391 /** 392 * Creates a split ZIP Archive. 393 * 394 * <p> 395 * The files making up the archive will use Z01, Z02, ... extensions and the last part of it will be the given {@code 396 * file}. 397 * </p> 398 * 399 * <p> 400 * Even though the stream writes to a file this stream will behave as if no random access was possible. This means the sizes of stored entries need to be 401 * known before the actual entry data is written. 402 * </p> 403 * 404 * @param file the file that will become the last part of the split archive 405 * @param zipSplitSize maximum size of a single part of the split archive created by this stream. Must be between 64kB and about 4GB. 406 * 407 * @throws IOException on error 408 * @throws IllegalArgumentException if zipSplitSize is not in the required range 409 * @since 1.20 410 */ 411 public ZipArchiveOutputStream(final File file, final long zipSplitSize) throws IOException { 412 this(file.toPath(), zipSplitSize); 413 } 414 415 /** 416 * Creates a new ZIP OutputStream filtering the underlying stream. 417 * 418 * @param out the outputstream to zip 419 */ 420 public ZipArchiveOutputStream(final OutputStream out) { 421 this.out = out; 422 this.def = new Deflater(level, true); 423 this.streamCompressor = StreamCompressor.create(out, def); 424 this.isSplitZip = false; 425 } 426 427 /** 428 * Creates a split ZIP Archive. 429 * <p> 430 * The files making up the archive will use Z01, Z02, ... extensions and the last part of it will be the given {@code 431 * file}. 432 * </p> 433 * <p> 434 * Even though the stream writes to a file this stream will behave as if no random access was possible. This means the sizes of stored entries need to be 435 * known before the actual entry data is written. 436 * </p> 437 * 438 * @param path the path to the file that will become the last part of the split archive 439 * @param zipSplitSize maximum size of a single part of the split archive created by this stream. Must be between 64kB and about 4GB. 440 * @throws IOException on error 441 * @throws IllegalArgumentException if zipSplitSize is not in the required range 442 * @since 1.22 443 */ 444 public ZipArchiveOutputStream(final Path path, final long zipSplitSize) throws IOException { 445 this.def = new Deflater(level, true); 446 this.out = new ZipSplitOutputStream(path, zipSplitSize); 447 this.streamCompressor = StreamCompressor.create(this.out, def); 448 this.isSplitZip = true; 449 } 450 451 /** 452 * Creates a new ZIP OutputStream writing to a Path. Will use random access if possible. 453 * 454 * @param file the file to ZIP to 455 * @param options options specifying how the file is opened. 456 * @throws IOException on error 457 * @since 1.21 458 */ 459 public ZipArchiveOutputStream(final Path file, final OpenOption... options) throws IOException { 460 this.def = new Deflater(level, true); 461 this.out = options.length == 0 ? new FileRandomAccessOutputStream(file) : new FileRandomAccessOutputStream(file, options); 462 this.streamCompressor = StreamCompressor.create(out, def); 463 this.isSplitZip = false; 464 } 465 466 /** 467 * Creates a new ZIP OutputStream writing to a SeekableByteChannel. 468 * 469 * <p> 470 * {@link org.apache.commons.compress.utils.SeekableInMemoryByteChannel} allows you to write to an in-memory archive using random access. 471 * </p> 472 * 473 * @param channel the channel to ZIP to 474 * @since 1.13 475 */ 476 public ZipArchiveOutputStream(final SeekableByteChannel channel) { 477 this.out = new SeekableChannelRandomAccessOutputStream(channel); 478 this.def = new Deflater(level, true); 479 this.streamCompressor = StreamCompressor.create(out, def); 480 this.isSplitZip = false; 481 } 482 483 /** 484 * Adds an archive entry with a raw input stream. 485 * <p> 486 * If crc, size and compressed size are supplied on the entry, these values will be used as-is. Zip64 status is re-established based on the settings in this 487 * stream, and the supplied value is ignored. 488 * </p> 489 * <p> 490 * The entry is put and closed immediately. 491 * </p> 492 * 493 * @param entry The archive entry to add 494 * @param rawStream The raw input stream of a different entry. May be compressed/encrypted. 495 * @throws IOException If copying fails 496 */ 497 public void addRawArchiveEntry(final ZipArchiveEntry entry, final InputStream rawStream) throws IOException { 498 final ZipArchiveEntry ae = new ZipArchiveEntry(entry); 499 if (hasZip64Extra(ae)) { 500 // Will be re-added as required. this may make the file generated with this method 501 // somewhat smaller than standard mode, 502 // since standard mode is unable to remove the ZIP 64 header. 503 ae.removeExtraField(Zip64ExtendedInformationExtraField.HEADER_ID); 504 } 505 final boolean is2PhaseSource = ae.getCrc() != ZipArchiveEntry.CRC_UNKNOWN && ae.getSize() != ArchiveEntry.SIZE_UNKNOWN 506 && ae.getCompressedSize() != ArchiveEntry.SIZE_UNKNOWN; 507 putArchiveEntry(ae, is2PhaseSource); 508 copyFromZipInputStream(rawStream); 509 closeCopiedEntry(is2PhaseSource); 510 } 511 512 /** 513 * Adds UnicodeExtra fields for name and file comment if mode is ALWAYS or the data cannot be encoded using the configured encoding. 514 */ 515 private void addUnicodeExtraFields(final ZipArchiveEntry ze, final boolean encodable, final ByteBuffer name) throws IOException { 516 if (createUnicodeExtraFields == UnicodeExtraFieldPolicy.ALWAYS || !encodable) { 517 ze.addExtraField(new UnicodePathExtraField(ze.getName(), name.array(), name.arrayOffset(), name.limit() - name.position())); 518 } 519 520 final String comm = ze.getComment(); 521 if (comm != null && !comm.isEmpty()) { 522 523 final boolean commentEncodable = zipEncoding.canEncode(comm); 524 525 if (createUnicodeExtraFields == UnicodeExtraFieldPolicy.ALWAYS || !commentEncodable) { 526 final ByteBuffer commentB = getEntryEncoding(ze).encode(comm); 527 ze.addExtraField(new UnicodeCommentExtraField(comm, commentB.array(), commentB.arrayOffset(), commentB.limit() - commentB.position())); 528 } 529 } 530 } 531 532 /** 533 * Whether this stream is able to write the given entry. 534 * <p> 535 * May return false if it is set up to use encryption or a compression method that hasn't been implemented yet. 536 * </p> 537 * 538 * @since 1.1 539 */ 540 @Override 541 public boolean canWriteEntryData(final ArchiveEntry ae) { 542 if (ae instanceof ZipArchiveEntry) { 543 final ZipArchiveEntry zae = (ZipArchiveEntry) ae; 544 return zae.getMethod() != ZipMethod.IMPLODING.getCode() && zae.getMethod() != ZipMethod.UNSHRINKING.getCode() && ZipUtil.canHandleEntryData(zae); 545 } 546 return false; 547 } 548 549 /** 550 * Verifies the sizes aren't too big in the Zip64Mode.Never case and returns whether the entry would require a Zip64 extra field. 551 */ 552 private boolean checkIfNeedsZip64(final Zip64Mode effectiveMode) throws ZipException { 553 final boolean actuallyNeedsZip64 = isZip64Required(entry.entry, effectiveMode); 554 if (actuallyNeedsZip64 && effectiveMode == Zip64Mode.Never) { 555 throw new Zip64RequiredException(Zip64RequiredException.getEntryTooBigMessage(entry.entry)); 556 } 557 return actuallyNeedsZip64; 558 } 559 560 /** 561 * Closes this output stream and releases any system resources associated with the stream. 562 * 563 * @throws IOException if an I/O error occurs. 564 * @throws Zip64RequiredException if the archive's size exceeds 4 GByte or there are more than 65535 entries inside the archive and {@link #setUseZip64} is 565 * {@link Zip64Mode#Never}. 566 */ 567 @Override 568 public void close() throws IOException { 569 try { 570 if (!finished) { 571 finish(); 572 } 573 } finally { 574 destroy(); 575 } 576 } 577 578 /** 579 * Writes all necessary data for this entry. 580 * 581 * @throws IOException on error 582 * @throws Zip64RequiredException if the entry's uncompressed or compressed size exceeds 4 GByte and {@link #setUseZip64} is {@link Zip64Mode#Never}. 583 */ 584 @Override 585 public void closeArchiveEntry() throws IOException { 586 preClose(); 587 588 flushDeflater(); 589 590 final long bytesWritten = streamCompressor.getTotalBytesWritten() - entry.dataStart; 591 final long realCrc = streamCompressor.getCrc32(); 592 entry.bytesRead = streamCompressor.getBytesRead(); 593 final Zip64Mode effectiveMode = getEffectiveZip64Mode(entry.entry); 594 final boolean actuallyNeedsZip64 = handleSizesAndCrc(bytesWritten, realCrc, effectiveMode); 595 closeEntry(actuallyNeedsZip64, false); 596 streamCompressor.reset(); 597 } 598 599 /** 600 * Writes all necessary data for this entry. 601 * 602 * @param phased This entry is second phase of a 2-phase ZIP creation, size, compressed size and crc are known in ZipArchiveEntry 603 * @throws IOException on error 604 * @throws Zip64RequiredException if the entry's uncompressed or compressed size exceeds 4 GByte and {@link #setUseZip64} is {@link Zip64Mode#Never}. 605 */ 606 private void closeCopiedEntry(final boolean phased) throws IOException { 607 preClose(); 608 entry.bytesRead = entry.entry.getSize(); 609 final Zip64Mode effectiveMode = getEffectiveZip64Mode(entry.entry); 610 final boolean actuallyNeedsZip64 = checkIfNeedsZip64(effectiveMode); 611 closeEntry(actuallyNeedsZip64, phased); 612 } 613 614 private void closeEntry(final boolean actuallyNeedsZip64, final boolean phased) throws IOException { 615 if (!phased && out instanceof RandomAccessOutputStream) { 616 rewriteSizesAndCrc(actuallyNeedsZip64); 617 } 618 619 if (!phased) { 620 writeDataDescriptor(entry.entry); 621 } 622 entry = null; 623 } 624 625 private void copyFromZipInputStream(final InputStream src) throws IOException { 626 if (entry == null) { 627 throw new IllegalStateException("No current entry"); 628 } 629 ZipUtil.checkRequestedFeatures(entry.entry); 630 entry.hasWritten = true; 631 int length; 632 while ((length = src.read(copyBuffer)) >= 0) { 633 streamCompressor.writeCounted(copyBuffer, 0, length); 634 count(length); 635 } 636 } 637 638 /** 639 * Creates a new ZIP entry taking some information from the given file and using the provided name. 640 * <p> 641 * The name will be adjusted to end with a forward slash "/" if the file is a directory. If the file is not a directory a potential trailing forward slash 642 * will be stripped from the entry name. 643 * </p> 644 * <p> 645 * Must not be used if the stream has already been closed. 646 * </p> 647 */ 648 @Override 649 public ZipArchiveEntry createArchiveEntry(final File inputFile, final String entryName) throws IOException { 650 if (finished) { 651 throw new IOException("Stream has already been finished"); 652 } 653 return new ZipArchiveEntry(inputFile, entryName); 654 } 655 656 /** 657 * Creates a new ZIP entry taking some information from the given file and using the provided name. 658 * <p> 659 * The name will be adjusted to end with a forward slash "/" if the file is a directory. If the file is not a directory a potential trailing forward slash 660 * will be stripped from the entry name. 661 * </p> 662 * <p> 663 * Must not be used if the stream has already been closed. 664 * </p> 665 * 666 * @param inputPath path to create the entry from. 667 * @param entryName name of the entry. 668 * @param options options indicating how symbolic links are handled. 669 * @return a new instance. 670 * @throws IOException if an I/O error occurs. 671 * @since 1.21 672 */ 673 @Override 674 public ZipArchiveEntry createArchiveEntry(final Path inputPath, final String entryName, final LinkOption... options) throws IOException { 675 if (finished) { 676 throw new IOException("Stream has already been finished"); 677 } 678 return new ZipArchiveEntry(inputPath, entryName); 679 } 680 681 private byte[] createCentralFileHeader(final ZipArchiveEntry ze) throws IOException { 682 683 final EntryMetaData entryMetaData = metaData.get(ze); 684 final boolean needsZip64Extra = hasZip64Extra(ze) || ze.getCompressedSize() >= ZipConstants.ZIP64_MAGIC || ze.getSize() >= ZipConstants.ZIP64_MAGIC 685 || entryMetaData.offset >= ZipConstants.ZIP64_MAGIC || ze.getDiskNumberStart() >= ZipConstants.ZIP64_MAGIC_SHORT 686 || zip64Mode == Zip64Mode.Always || zip64Mode == Zip64Mode.AlwaysWithCompatibility; 687 688 if (needsZip64Extra && zip64Mode == Zip64Mode.Never) { 689 // must be the offset that is too big, otherwise an 690 // exception would have been throw in putArchiveEntry or 691 // closeArchiveEntry 692 throw new Zip64RequiredException(Zip64RequiredException.ARCHIVE_TOO_BIG_MESSAGE); 693 } 694 695 handleZip64Extra(ze, entryMetaData.offset, needsZip64Extra); 696 697 return createCentralFileHeader(ze, getName(ze), entryMetaData, needsZip64Extra); 698 } 699 700 /** 701 * Writes the central file header entry. 702 * 703 * @param ze the entry to write 704 * @param name The encoded name 705 * @param entryMetaData meta data for this file 706 * @throws IOException on error 707 */ 708 private byte[] createCentralFileHeader(final ZipArchiveEntry ze, final ByteBuffer name, final EntryMetaData entryMetaData, final boolean needsZip64Extra) 709 throws IOException { 710 if (isSplitZip) { 711 // calculate the disk number for every central file header, 712 // this will be used in writing End Of Central Directory and Zip64 End Of Central Directory 713 final int currentSplitSegment = ((ZipSplitOutputStream) this.out).getCurrentSplitSegmentIndex(); 714 if (numberOfCDInDiskData.get(currentSplitSegment) == null) { 715 numberOfCDInDiskData.put(currentSplitSegment, 1); 716 } else { 717 final int originalNumberOfCD = numberOfCDInDiskData.get(currentSplitSegment); 718 numberOfCDInDiskData.put(currentSplitSegment, originalNumberOfCD + 1); 719 } 720 } 721 722 final byte[] extra = ze.getCentralDirectoryExtra(); 723 final int extraLength = extra.length; 724 725 // file comment length 726 String comm = ze.getComment(); 727 if (comm == null) { 728 comm = ""; 729 } 730 731 final ByteBuffer commentB = getEntryEncoding(ze).encode(comm); 732 final int nameLen = name.limit() - name.position(); 733 final int commentLen = commentB.limit() - commentB.position(); 734 final int len = CFH_FILENAME_OFFSET + nameLen + extraLength + commentLen; 735 final byte[] buf = new byte[len]; 736 737 System.arraycopy(CFH_SIG, 0, buf, CFH_SIG_OFFSET, ZipConstants.WORD); 738 739 // version made by 740 // CheckStyle:MagicNumber OFF 741 ZipShort.putShort(ze.getPlatform() << 8 | (!hasUsedZip64 ? ZipConstants.DATA_DESCRIPTOR_MIN_VERSION : ZipConstants.ZIP64_MIN_VERSION), buf, 742 CFH_VERSION_MADE_BY_OFFSET); 743 744 final int zipMethod = ze.getMethod(); 745 final boolean encodable = zipEncoding.canEncode(ze.getName()); 746 ZipShort.putShort(versionNeededToExtract(zipMethod, needsZip64Extra, entryMetaData.usesDataDescriptor), buf, CFH_VERSION_NEEDED_OFFSET); 747 getGeneralPurposeBits(!encodable && fallbackToUTF8, entryMetaData.usesDataDescriptor).encode(buf, CFH_GPB_OFFSET); 748 749 // compression method 750 ZipShort.putShort(zipMethod, buf, CFH_METHOD_OFFSET); 751 752 // last mod. time and date 753 ZipUtil.toDosTime(ze.getTime(), buf, CFH_TIME_OFFSET); 754 755 // CRC 756 // compressed length 757 // uncompressed length 758 ZipLong.putLong(ze.getCrc(), buf, CFH_CRC_OFFSET); 759 if (ze.getCompressedSize() >= ZipConstants.ZIP64_MAGIC || ze.getSize() >= ZipConstants.ZIP64_MAGIC || zip64Mode == Zip64Mode.Always 760 || zip64Mode == Zip64Mode.AlwaysWithCompatibility) { 761 ZipLong.ZIP64_MAGIC.putLong(buf, CFH_COMPRESSED_SIZE_OFFSET); 762 ZipLong.ZIP64_MAGIC.putLong(buf, CFH_ORIGINAL_SIZE_OFFSET); 763 } else { 764 ZipLong.putLong(ze.getCompressedSize(), buf, CFH_COMPRESSED_SIZE_OFFSET); 765 ZipLong.putLong(ze.getSize(), buf, CFH_ORIGINAL_SIZE_OFFSET); 766 } 767 768 ZipShort.putShort(nameLen, buf, CFH_FILENAME_LENGTH_OFFSET); 769 770 // extra field length 771 ZipShort.putShort(extraLength, buf, CFH_EXTRA_LENGTH_OFFSET); 772 773 ZipShort.putShort(commentLen, buf, CFH_COMMENT_LENGTH_OFFSET); 774 775 // disk number start 776 if (isSplitZip) { 777 if (ze.getDiskNumberStart() >= ZipConstants.ZIP64_MAGIC_SHORT || zip64Mode == Zip64Mode.Always) { 778 ZipShort.putShort(ZipConstants.ZIP64_MAGIC_SHORT, buf, CFH_DISK_NUMBER_OFFSET); 779 } else { 780 ZipShort.putShort((int) ze.getDiskNumberStart(), buf, CFH_DISK_NUMBER_OFFSET); 781 } 782 } else { 783 System.arraycopy(ZERO, 0, buf, CFH_DISK_NUMBER_OFFSET, ZipConstants.SHORT); 784 } 785 786 // internal file attributes 787 ZipShort.putShort(ze.getInternalAttributes(), buf, CFH_INTERNAL_ATTRIBUTES_OFFSET); 788 789 // external file attributes 790 ZipLong.putLong(ze.getExternalAttributes(), buf, CFH_EXTERNAL_ATTRIBUTES_OFFSET); 791 792 // relative offset of LFH 793 if (entryMetaData.offset >= ZipConstants.ZIP64_MAGIC || zip64Mode == Zip64Mode.Always) { 794 ZipLong.putLong(ZipConstants.ZIP64_MAGIC, buf, CFH_LFH_OFFSET); 795 } else { 796 ZipLong.putLong(Math.min(entryMetaData.offset, ZipConstants.ZIP64_MAGIC), buf, CFH_LFH_OFFSET); 797 } 798 799 // file name 800 System.arraycopy(name.array(), name.arrayOffset(), buf, CFH_FILENAME_OFFSET, nameLen); 801 802 final int extraStart = CFH_FILENAME_OFFSET + nameLen; 803 System.arraycopy(extra, 0, buf, extraStart, extraLength); 804 805 final int commentStart = extraStart + extraLength; 806 807 // file comment 808 System.arraycopy(commentB.array(), commentB.arrayOffset(), buf, commentStart, commentLen); 809 return buf; 810 } 811 812 private byte[] createLocalFileHeader(final ZipArchiveEntry ze, final ByteBuffer name, final boolean encodable, final boolean phased, 813 final long archiveOffset) { 814 final ZipExtraField oldEx = ze.getExtraField(ResourceAlignmentExtraField.ID); 815 if (oldEx != null) { 816 ze.removeExtraField(ResourceAlignmentExtraField.ID); 817 } 818 final ResourceAlignmentExtraField oldAlignmentEx = oldEx instanceof ResourceAlignmentExtraField ? (ResourceAlignmentExtraField) oldEx : null; 819 820 int alignment = ze.getAlignment(); 821 if (alignment <= 0 && oldAlignmentEx != null) { 822 alignment = oldAlignmentEx.getAlignment(); 823 } 824 825 if (alignment > 1 || oldAlignmentEx != null && !oldAlignmentEx.allowMethodChange()) { 826 final int oldLength = LFH_FILENAME_OFFSET + name.limit() - name.position() + ze.getLocalFileDataExtra().length; 827 828 final int padding = (int) (-archiveOffset - oldLength - ZipExtraField.EXTRAFIELD_HEADER_SIZE - ResourceAlignmentExtraField.BASE_SIZE 829 & alignment - 1); 830 ze.addExtraField(new ResourceAlignmentExtraField(alignment, oldAlignmentEx != null && oldAlignmentEx.allowMethodChange(), padding)); 831 } 832 833 final byte[] extra = ze.getLocalFileDataExtra(); 834 final int nameLen = name.limit() - name.position(); 835 final int len = LFH_FILENAME_OFFSET + nameLen + extra.length; 836 final byte[] buf = new byte[len]; 837 838 System.arraycopy(LFH_SIG, 0, buf, LFH_SIG_OFFSET, ZipConstants.WORD); 839 840 // store method in local variable to prevent multiple method calls 841 final int zipMethod = ze.getMethod(); 842 final boolean dataDescriptor = usesDataDescriptor(zipMethod, phased); 843 844 ZipShort.putShort(versionNeededToExtract(zipMethod, hasZip64Extra(ze), dataDescriptor), buf, LFH_VERSION_NEEDED_OFFSET); 845 846 final GeneralPurposeBit generalPurposeBit = getGeneralPurposeBits(!encodable && fallbackToUTF8, dataDescriptor); 847 generalPurposeBit.encode(buf, LFH_GPB_OFFSET); 848 849 // compression method 850 ZipShort.putShort(zipMethod, buf, LFH_METHOD_OFFSET); 851 852 ZipUtil.toDosTime(ze.getTime(), buf, LFH_TIME_OFFSET); 853 854 // CRC 855 if (phased || !(zipMethod == DEFLATED || out instanceof RandomAccessOutputStream)) { 856 ZipLong.putLong(ze.getCrc(), buf, LFH_CRC_OFFSET); 857 } else { 858 System.arraycopy(LZERO, 0, buf, LFH_CRC_OFFSET, ZipConstants.WORD); 859 } 860 861 // compressed length 862 // uncompressed length 863 if (hasZip64Extra(entry.entry)) { 864 // point to ZIP64 extended information extra field for 865 // sizes, may get rewritten once sizes are known if 866 // stream is seekable 867 ZipLong.ZIP64_MAGIC.putLong(buf, LFH_COMPRESSED_SIZE_OFFSET); 868 ZipLong.ZIP64_MAGIC.putLong(buf, LFH_ORIGINAL_SIZE_OFFSET); 869 } else if (phased) { 870 ZipLong.putLong(ze.getCompressedSize(), buf, LFH_COMPRESSED_SIZE_OFFSET); 871 ZipLong.putLong(ze.getSize(), buf, LFH_ORIGINAL_SIZE_OFFSET); 872 } else if (zipMethod == DEFLATED || out instanceof RandomAccessOutputStream) { 873 System.arraycopy(LZERO, 0, buf, LFH_COMPRESSED_SIZE_OFFSET, ZipConstants.WORD); 874 System.arraycopy(LZERO, 0, buf, LFH_ORIGINAL_SIZE_OFFSET, ZipConstants.WORD); 875 } else { // Stored 876 ZipLong.putLong(ze.getSize(), buf, LFH_COMPRESSED_SIZE_OFFSET); 877 ZipLong.putLong(ze.getSize(), buf, LFH_ORIGINAL_SIZE_OFFSET); 878 } 879 // file name length 880 ZipShort.putShort(nameLen, buf, LFH_FILENAME_LENGTH_OFFSET); 881 882 // extra field length 883 ZipShort.putShort(extra.length, buf, LFH_EXTRA_LENGTH_OFFSET); 884 885 // file name 886 System.arraycopy(name.array(), name.arrayOffset(), buf, LFH_FILENAME_OFFSET, nameLen); 887 888 // extra fields 889 System.arraycopy(extra, 0, buf, LFH_FILENAME_OFFSET + nameLen, extra.length); 890 891 return buf; 892 } 893 894 /** 895 * Writes next block of compressed data to the output stream. 896 * 897 * @throws IOException on error 898 */ 899 protected final void deflate() throws IOException { 900 streamCompressor.deflate(); 901 } 902 903 /** 904 * Closes the underlying stream/file without finishing the archive, the result will likely be a corrupt archive. 905 * <p> 906 * This method only exists to support tests that generate corrupt archives so they can clean up any temporary files. 907 * </p> 908 */ 909 void destroy() throws IOException { 910 if (out != null) { 911 out.close(); 912 } 913 } 914 915 /** 916 * {@inheritDoc} 917 * 918 * @throws Zip64RequiredException if the archive's size exceeds 4 GByte or there are more than 65535 entries inside the archive and {@link #setUseZip64} is 919 * {@link Zip64Mode#Never}. 920 */ 921 @Override 922 public void finish() throws IOException { 923 if (finished) { 924 throw new IOException("This archive has already been finished"); 925 } 926 927 if (entry != null) { 928 throw new IOException("This archive contains unclosed entries."); 929 } 930 931 final long cdOverallOffset = streamCompressor.getTotalBytesWritten(); 932 cdOffset = cdOverallOffset; 933 if (isSplitZip) { 934 // when creating a split zip, the offset should be 935 // the offset to the corresponding segment disk 936 final ZipSplitOutputStream zipSplitOutputStream = (ZipSplitOutputStream) this.out; 937 cdOffset = zipSplitOutputStream.getCurrentSplitSegmentBytesWritten(); 938 cdDiskNumberStart = zipSplitOutputStream.getCurrentSplitSegmentIndex(); 939 } 940 writeCentralDirectoryInChunks(); 941 942 cdLength = streamCompressor.getTotalBytesWritten() - cdOverallOffset; 943 944 // calculate the length of end of central directory, as it may be used in writeZip64CentralDirectory 945 final ByteBuffer commentData = this.zipEncoding.encode(comment); 946 final long commentLength = (long) commentData.limit() - commentData.position(); 947 eocdLength = ZipConstants.WORD /* length of EOCD_SIG */ 948 + ZipConstants.SHORT /* number of this disk */ 949 + ZipConstants.SHORT /* disk number of start of central directory */ 950 + ZipConstants.SHORT /* total number of entries on this disk */ 951 + ZipConstants.SHORT /* total number of entries */ 952 + ZipConstants.WORD /* size of central directory */ 953 + ZipConstants.WORD /* offset of start of central directory */ 954 + ZipConstants.SHORT /* ZIP comment length */ 955 + commentLength /* ZIP comment */; 956 957 writeZip64CentralDirectory(); 958 writeCentralDirectoryEnd(); 959 metaData.clear(); 960 entries.clear(); 961 streamCompressor.close(); 962 if (isSplitZip) { 963 // trigger the ZipSplitOutputStream to write the final split segment 964 out.close(); 965 } 966 finished = true; 967 } 968 969 /** 970 * Flushes this output stream and forces any buffered output bytes to be written out to the stream. 971 * 972 * @throws IOException if an I/O error occurs. 973 */ 974 @Override 975 public void flush() throws IOException { 976 if (out != null) { 977 out.flush(); 978 } 979 } 980 981 /** 982 * Ensures all bytes sent to the deflater are written to the stream. 983 */ 984 private void flushDeflater() throws IOException { 985 if (entry.entry.getMethod() == DEFLATED) { 986 streamCompressor.flushDeflater(); 987 } 988 } 989 990 /** 991 * Returns the total number of bytes written to this stream. 992 * 993 * @return the number of written bytes 994 * @since 1.22 995 */ 996 @Override 997 public long getBytesWritten() { 998 return streamCompressor.getTotalBytesWritten(); 999 } 1000 1001 /** 1002 * If the mode is AsNeeded and the entry is a compressed entry of unknown size that gets written to a non-seekable stream then change the default to Never. 1003 * 1004 * @since 1.3 1005 */ 1006 private Zip64Mode getEffectiveZip64Mode(final ZipArchiveEntry ze) { 1007 if (zip64Mode != Zip64Mode.AsNeeded || out instanceof RandomAccessOutputStream || 1008 ze.getMethod() != DEFLATED || ze.getSize() != ArchiveEntry.SIZE_UNKNOWN) { 1009 return zip64Mode; 1010 } 1011 return Zip64Mode.Never; 1012 } 1013 1014 /** 1015 * The encoding to use for file names and the file comment. 1016 * 1017 * @return null if using the platform's default character encoding. 1018 */ 1019 public String getEncoding() { 1020 return charset != null ? charset.name() : null; 1021 } 1022 1023 private ZipEncoding getEntryEncoding(final ZipArchiveEntry ze) { 1024 final boolean encodable = zipEncoding.canEncode(ze.getName()); 1025 return !encodable && fallbackToUTF8 ? ZipEncodingHelper.ZIP_ENCODING_UTF_8 : zipEncoding; 1026 } 1027 1028 private GeneralPurposeBit getGeneralPurposeBits(final boolean utfFallback, final boolean usesDataDescriptor) { 1029 final GeneralPurposeBit b = new GeneralPurposeBit(); 1030 b.useUTF8ForNames(useUTF8Flag || utfFallback); 1031 if (usesDataDescriptor) { 1032 b.useDataDescriptor(true); 1033 } 1034 return b; 1035 } 1036 1037 private ByteBuffer getName(final ZipArchiveEntry ze) throws IOException { 1038 return getEntryEncoding(ze).encode(ze.getName()); 1039 } 1040 1041 /** 1042 * Gets the existing ZIP64 extended information extra field or create a new one and add it to the entry. 1043 * 1044 * @since 1.3 1045 */ 1046 private Zip64ExtendedInformationExtraField getZip64Extra(final ZipArchiveEntry ze) { 1047 if (entry != null) { 1048 entry.causedUseOfZip64 = !hasUsedZip64; 1049 } 1050 hasUsedZip64 = true; 1051 final ZipExtraField extra = ze.getExtraField(Zip64ExtendedInformationExtraField.HEADER_ID); 1052 Zip64ExtendedInformationExtraField z64 = extra instanceof Zip64ExtendedInformationExtraField ? (Zip64ExtendedInformationExtraField) extra : null; 1053 if (z64 == null) { 1054 /* 1055 * System.err.println("Adding z64 for " + ze.getName() + ", method: " + ze.getMethod() + " (" + (ze.getMethod() == STORED) + ")" + ", channel: " + 1056 * (channel != null)); 1057 */ 1058 z64 = new Zip64ExtendedInformationExtraField(); 1059 } 1060 1061 // even if the field is there already, make sure it is the first one 1062 ze.addAsFirstExtraField(z64); 1063 1064 return z64; 1065 } 1066 1067 /** 1068 * Ensures the current entry's size and CRC information is set to the values just written, verifies it isn't too big in the Zip64Mode.Never case and returns 1069 * whether the entry would require a Zip64 extra field. 1070 */ 1071 private boolean handleSizesAndCrc(final long bytesWritten, final long crc, final Zip64Mode effectiveMode) throws ZipException { 1072 if (entry.entry.getMethod() == DEFLATED) { 1073 /* 1074 * It turns out def.getBytesRead() returns wrong values if the size exceeds 4 GB on Java < Java7 entry.entry.setSize(def.getBytesRead()); 1075 */ 1076 entry.entry.setSize(entry.bytesRead); 1077 entry.entry.setCompressedSize(bytesWritten); 1078 entry.entry.setCrc(crc); 1079 1080 } else if (!(out instanceof RandomAccessOutputStream)) { 1081 if (entry.entry.getCrc() != crc) { 1082 throw new ZipException("Bad CRC checksum for entry " + entry.entry.getName() + ": " + Long.toHexString(entry.entry.getCrc()) + " instead of " 1083 + Long.toHexString(crc)); 1084 } 1085 1086 if (entry.entry.getSize() != bytesWritten) { 1087 throw new ZipException("Bad size for entry " + entry.entry.getName() + ": " + entry.entry.getSize() + " instead of " + bytesWritten); 1088 } 1089 } else { /* method is STORED and we used SeekableByteChannel */ 1090 entry.entry.setSize(bytesWritten); 1091 entry.entry.setCompressedSize(bytesWritten); 1092 entry.entry.setCrc(crc); 1093 } 1094 1095 return checkIfNeedsZip64(effectiveMode); 1096 } 1097 1098 /** 1099 * If the entry needs Zip64 extra information inside the central directory then configure its data. 1100 */ 1101 private void handleZip64Extra(final ZipArchiveEntry ze, final long lfhOffset, final boolean needsZip64Extra) { 1102 if (needsZip64Extra) { 1103 final Zip64ExtendedInformationExtraField z64 = getZip64Extra(ze); 1104 if (ze.getCompressedSize() >= ZipConstants.ZIP64_MAGIC || ze.getSize() >= ZipConstants.ZIP64_MAGIC || zip64Mode == Zip64Mode.Always 1105 || zip64Mode == Zip64Mode.AlwaysWithCompatibility) { 1106 z64.setCompressedSize(new ZipEightByteInteger(ze.getCompressedSize())); 1107 z64.setSize(new ZipEightByteInteger(ze.getSize())); 1108 } else { 1109 // reset value that may have been set for LFH 1110 z64.setCompressedSize(null); 1111 z64.setSize(null); 1112 } 1113 1114 final boolean needsToEncodeLfhOffset = lfhOffset >= ZipConstants.ZIP64_MAGIC || zip64Mode == Zip64Mode.Always; 1115 final boolean needsToEncodeDiskNumberStart = ze.getDiskNumberStart() >= ZipConstants.ZIP64_MAGIC_SHORT || zip64Mode == Zip64Mode.Always; 1116 1117 if (needsToEncodeLfhOffset || needsToEncodeDiskNumberStart) { 1118 z64.setRelativeHeaderOffset(new ZipEightByteInteger(lfhOffset)); 1119 } 1120 if (needsToEncodeDiskNumberStart) { 1121 z64.setDiskStartNumber(new ZipLong(ze.getDiskNumberStart())); 1122 } 1123 ze.setExtra(); 1124 } 1125 } 1126 1127 /** 1128 * Is there a ZIP64 extended information extra field for the entry? 1129 * 1130 * @since 1.3 1131 */ 1132 private boolean hasZip64Extra(final ZipArchiveEntry ze) { 1133 return ze.getExtraField(Zip64ExtendedInformationExtraField.HEADER_ID) instanceof Zip64ExtendedInformationExtraField; 1134 } 1135 1136 /** 1137 * This method indicates whether this archive is writing to a seekable stream (i.e., to a random access file). 1138 * <p> 1139 * For seekable streams, you don't need to calculate the CRC or uncompressed size for {@link #STORED} entries before invoking 1140 * {@link #putArchiveEntry(ZipArchiveEntry)}. 1141 * </p> 1142 * 1143 * @return true if seekable 1144 */ 1145 public boolean isSeekable() { 1146 return out instanceof RandomAccessOutputStream; 1147 } 1148 1149 private boolean isTooLargeForZip32(final ZipArchiveEntry zipArchiveEntry) { 1150 return zipArchiveEntry.getSize() >= ZipConstants.ZIP64_MAGIC || zipArchiveEntry.getCompressedSize() >= ZipConstants.ZIP64_MAGIC; 1151 } 1152 1153 private boolean isZip64Required(final ZipArchiveEntry entry1, final Zip64Mode requestedMode) { 1154 return requestedMode == Zip64Mode.Always || requestedMode == Zip64Mode.AlwaysWithCompatibility || isTooLargeForZip32(entry1); 1155 } 1156 1157 private void preClose() throws IOException { 1158 if (finished) { 1159 throw new IOException("Stream has already been finished"); 1160 } 1161 1162 if (entry == null) { 1163 throw new IOException("No current entry to close"); 1164 } 1165 1166 if (!entry.hasWritten) { 1167 write(ByteUtils.EMPTY_BYTE_ARRAY, 0, 0); 1168 } 1169 } 1170 1171 /** 1172 * {@inheritDoc} 1173 * 1174 * @throws ClassCastException if entry is not an instance of ZipArchiveEntry 1175 * @throws Zip64RequiredException if the entry's uncompressed or compressed size is known to exceed 4 GByte and {@link #setUseZip64} is 1176 * {@link Zip64Mode#Never}. 1177 */ 1178 @Override 1179 public void putArchiveEntry(final ZipArchiveEntry archiveEntry) throws IOException { 1180 putArchiveEntry(archiveEntry, false); 1181 } 1182 1183 /** 1184 * Writes the headers for an archive entry to the output stream. The caller must then write the content to the stream and call {@link #closeArchiveEntry()} 1185 * to complete the process. 1186 * 1187 * @param archiveEntry The archiveEntry 1188 * @param phased If true size, compressedSize and crc required to be known up-front in the archiveEntry 1189 * @throws ClassCastException if entry is not an instance of ZipArchiveEntry 1190 * @throws Zip64RequiredException if the entry's uncompressed or compressed size is known to exceed 4 GByte and {@link #setUseZip64} is 1191 * {@link Zip64Mode#Never}. 1192 */ 1193 private void putArchiveEntry(final ZipArchiveEntry archiveEntry, final boolean phased) throws IOException { 1194 if (finished) { 1195 throw new IOException("Stream has already been finished"); 1196 } 1197 1198 if (entry != null) { 1199 closeArchiveEntry(); 1200 } 1201 1202 entry = new CurrentEntry(archiveEntry); 1203 entries.add(entry.entry); 1204 1205 setDefaults(entry.entry); 1206 1207 final Zip64Mode effectiveMode = getEffectiveZip64Mode(entry.entry); 1208 validateSizeInformation(effectiveMode); 1209 1210 if (shouldAddZip64Extra(entry.entry, effectiveMode)) { 1211 1212 final Zip64ExtendedInformationExtraField z64 = getZip64Extra(entry.entry); 1213 1214 final ZipEightByteInteger size; 1215 final ZipEightByteInteger compressedSize; 1216 if (phased) { 1217 // sizes are already known 1218 size = new ZipEightByteInteger(entry.entry.getSize()); 1219 compressedSize = new ZipEightByteInteger(entry.entry.getCompressedSize()); 1220 } else if (entry.entry.getMethod() == STORED && entry.entry.getSize() != ArchiveEntry.SIZE_UNKNOWN) { 1221 // actually, we already know the sizes 1222 compressedSize = size = new ZipEightByteInteger(entry.entry.getSize()); 1223 } else { 1224 // just a placeholder, real data will be in data 1225 // descriptor or inserted later via SeekableByteChannel 1226 compressedSize = size = ZipEightByteInteger.ZERO; 1227 } 1228 z64.setSize(size); 1229 z64.setCompressedSize(compressedSize); 1230 entry.entry.setExtra(); 1231 } 1232 1233 if (entry.entry.getMethod() == DEFLATED && hasCompressionLevelChanged) { 1234 def.setLevel(level); 1235 hasCompressionLevelChanged = false; 1236 } 1237 writeLocalFileHeader(archiveEntry, phased); 1238 } 1239 1240 /** 1241 * When using random access output, write the local file header and potentially the ZIP64 extra containing the correct CRC and compressed/uncompressed 1242 * sizes. 1243 */ 1244 private void rewriteSizesAndCrc(final boolean actuallyNeedsZip64) throws IOException { 1245 final RandomAccessOutputStream randomStream = (RandomAccessOutputStream) out; 1246 long dataStart = entry.localDataStart; 1247 if (randomStream instanceof ZipSplitOutputStream) { 1248 dataStart = ((ZipSplitOutputStream) randomStream).calculateDiskPosition(entry.entry.getDiskNumberStart(), dataStart); 1249 } 1250 1251 long position = dataStart; 1252 randomStream.writeFully(ZipLong.getBytes(entry.entry.getCrc()), position); position += ZipConstants.WORD; 1253 if (!hasZip64Extra(entry.entry) || !actuallyNeedsZip64) { 1254 randomStream.writeFully(ZipLong.getBytes(entry.entry.getCompressedSize()), position); position += ZipConstants.WORD; 1255 randomStream.writeFully(ZipLong.getBytes(entry.entry.getSize()), position); position += ZipConstants.WORD; 1256 } else { 1257 randomStream.writeFully(ZipLong.ZIP64_MAGIC.getBytes(), position); position += ZipConstants.WORD; 1258 randomStream.writeFully(ZipLong.ZIP64_MAGIC.getBytes(), position); position += ZipConstants.WORD; 1259 } 1260 1261 if (hasZip64Extra(entry.entry)) { 1262 final ByteBuffer name = getName(entry.entry); 1263 final int nameLen = name.limit() - name.position(); 1264 // seek to ZIP64 extra, skip header and size information 1265 position = dataStart + 3 * ZipConstants.WORD + 2 * ZipConstants.SHORT + nameLen + 2 * ZipConstants.SHORT; 1266 // inside the ZIP64 extra uncompressed size comes 1267 // first, unlike the LFH, CD or data descriptor 1268 randomStream.writeFully(ZipEightByteInteger.getBytes(entry.entry.getSize()), position); position += ZipConstants.DWORD; 1269 randomStream.writeFully(ZipEightByteInteger.getBytes(entry.entry.getCompressedSize()), position); position += ZipConstants.DWORD; 1270 1271 if (!actuallyNeedsZip64) { 1272 // do some cleanup: 1273 // * rewrite version needed to extract 1274 position = dataStart - 5 * ZipConstants.SHORT; 1275 randomStream.writeFully(ZipShort.getBytes(versionNeededToExtract(entry.entry.getMethod(), false, false)), position); 1276 position += ZipConstants.SHORT; 1277 1278 // * remove ZIP64 extra, so it doesn't get written 1279 // to the central directory 1280 entry.entry.removeExtraField(Zip64ExtendedInformationExtraField.HEADER_ID); 1281 entry.entry.setExtra(); 1282 1283 // * reset hasUsedZip64 if it has been set because 1284 // of this entry 1285 if (entry.causedUseOfZip64) { 1286 hasUsedZip64 = false; 1287 } 1288 } 1289 } 1290 } 1291 1292 /** 1293 * Sets the file comment. 1294 * 1295 * @param comment the comment 1296 */ 1297 public void setComment(final String comment) { 1298 this.comment = comment; 1299 } 1300 1301 /** 1302 * Whether to create Unicode Extra Fields. 1303 * <p> 1304 * Defaults to NEVER. 1305 * </p> 1306 * 1307 * @param b whether to create Unicode Extra Fields. 1308 */ 1309 public void setCreateUnicodeExtraFields(final UnicodeExtraFieldPolicy b) { 1310 createUnicodeExtraFields = b; 1311 } 1312 1313 /** 1314 * Provides default values for compression method and last modification time. 1315 */ 1316 private void setDefaults(final ZipArchiveEntry entry) { 1317 if (entry.getMethod() == -1) { // not specified 1318 entry.setMethod(method); 1319 } 1320 1321 if (entry.getTime() == -1) { // not specified 1322 entry.setTime(System.currentTimeMillis()); 1323 } 1324 } 1325 1326 private void setEncoding(final Charset encoding) { 1327 this.charset = encoding; 1328 this.zipEncoding = ZipEncodingHelper.getZipEncoding(encoding); 1329 if (useUTF8Flag && !ZipEncodingHelper.isUTF8(encoding)) { 1330 useUTF8Flag = false; 1331 } 1332 } 1333 1334 /** 1335 * The encoding to use for file names and the file comment. 1336 * <p> 1337 * For a list of possible values see <a href="https://docs.oracle.com/javase/8/docs/technotes/guides/intl/encoding.doc.html">Supported Encodings</a>. 1338 * Defaults to UTF-8. 1339 * </p> 1340 * 1341 * @param encoding the encoding to use for file names, use null for the platform's default encoding 1342 */ 1343 public void setEncoding(final String encoding) { 1344 setEncoding(Charsets.toCharset(encoding)); 1345 } 1346 1347 /** 1348 * Whether to fall back to UTF and the language encoding flag if the file name cannot be encoded using the specified encoding. 1349 * <p> 1350 * Defaults to false. 1351 * </p> 1352 * 1353 * @param b whether to fall back to UTF and the language encoding flag if the file name cannot be encoded using the specified encoding. 1354 */ 1355 public void setFallbackToUTF8(final boolean b) { 1356 fallbackToUTF8 = b; 1357 } 1358 1359 /** 1360 * Sets the compression level for subsequent entries. 1361 * <p> 1362 * Default is Deflater.DEFAULT_COMPRESSION. 1363 * </p> 1364 * 1365 * @param level the compression level. 1366 * @throws IllegalArgumentException if an invalid compression level is specified. 1367 */ 1368 public void setLevel(final int level) { 1369 if (level < Deflater.DEFAULT_COMPRESSION || level > Deflater.BEST_COMPRESSION) { 1370 throw new IllegalArgumentException("Invalid compression level: " + level); 1371 } 1372 if (this.level == level) { 1373 return; 1374 } 1375 hasCompressionLevelChanged = true; 1376 this.level = level; 1377 } 1378 1379 /** 1380 * Sets the default compression method for subsequent entries. 1381 * <p> 1382 * Default is DEFLATED. 1383 * </p> 1384 * 1385 * @param method an {@code int} from java.util.zip.ZipEntry 1386 */ 1387 public void setMethod(final int method) { 1388 this.method = method; 1389 } 1390 1391 /** 1392 * Whether to set the language encoding flag if the file name encoding is UTF-8. 1393 * <p> 1394 * Defaults to true. 1395 * </p> 1396 * 1397 * @param b whether to set the language encoding flag if the file name encoding is UTF-8 1398 */ 1399 public void setUseLanguageEncodingFlag(final boolean b) { 1400 useUTF8Flag = b && ZipEncodingHelper.isUTF8(charset); 1401 } 1402 1403 /** 1404 * Whether Zip64 extensions will be used. 1405 * <p> 1406 * When setting the mode to {@link Zip64Mode#Never Never}, {@link #putArchiveEntry}, {@link #closeArchiveEntry}, {@link #finish} or {@link #close} may throw 1407 * a {@link Zip64RequiredException} if the entry's size or the total size of the archive exceeds 4GB or there are more than 65,536 entries inside the 1408 * archive. Any archive created in this mode will be readable by implementations that don't support Zip64. 1409 * </p> 1410 * <p> 1411 * When setting the mode to {@link Zip64Mode#Always Always}, Zip64 extensions will be used for all entries. Any archive created in this mode may be 1412 * unreadable by implementations that don't support Zip64 even if all its contents would be. 1413 * </p> 1414 * <p> 1415 * When setting the mode to {@link Zip64Mode#AsNeeded AsNeeded}, Zip64 extensions will transparently be used for those entries that require them. This mode 1416 * can only be used if the uncompressed size of the {@link ZipArchiveEntry} is known when calling {@link #putArchiveEntry} or the archive is written to a 1417 * seekable output (i.e. you have used the {@link #ZipArchiveOutputStream(java.io.File) File-arg constructor}) - this mode is not valid when the output 1418 * stream is not seekable and the uncompressed size is unknown when {@link #putArchiveEntry} is called. 1419 * </p> 1420 * <p> 1421 * If no entry inside the resulting archive requires Zip64 extensions then {@link Zip64Mode#Never Never} will create the smallest archive. 1422 * {@link Zip64Mode#AsNeeded AsNeeded} will create a slightly bigger archive if the uncompressed size of any entry has initially been unknown and create an 1423 * archive identical to {@link Zip64Mode#Never Never} otherwise. {@link Zip64Mode#Always Always} will create an archive that is at least 24 bytes per entry 1424 * bigger than the one {@link Zip64Mode#Never Never} would create. 1425 * </p> 1426 * <p> 1427 * Defaults to {@link Zip64Mode#AsNeeded AsNeeded} unless {@link #putArchiveEntry} is called with an entry of unknown size and data is written to a 1428 * non-seekable stream - in this case the default is {@link Zip64Mode#Never Never}. 1429 * </p> 1430 * 1431 * @since 1.3 1432 * @param mode Whether Zip64 extensions will be used. 1433 */ 1434 public void setUseZip64(final Zip64Mode mode) { 1435 zip64Mode = mode; 1436 } 1437 1438 /** 1439 * Whether to add a Zip64 extended information extra field to the local file header. 1440 * <p> 1441 * Returns true if 1442 * </p> 1443 * <ul> 1444 * <li>mode is Always</li> 1445 * <li>or we already know it is going to be needed</li> 1446 * <li>or the size is unknown and we can ensure it won't hurt other implementations if we add it (i.e. we can erase its usage</li> 1447 * </ul> 1448 */ 1449 private boolean shouldAddZip64Extra(final ZipArchiveEntry entry, final Zip64Mode mode) { 1450 return mode == Zip64Mode.Always || mode == Zip64Mode.AlwaysWithCompatibility || entry.getSize() >= ZipConstants.ZIP64_MAGIC 1451 || entry.getCompressedSize() >= ZipConstants.ZIP64_MAGIC 1452 || entry.getSize() == ArchiveEntry.SIZE_UNKNOWN && out instanceof RandomAccessOutputStream && mode != Zip64Mode.Never; 1453 } 1454 1455 /** 1456 * 4.4.1.4 If one of the fields in the end of central directory record is too small to hold required data, the field SHOULD be set to -1 (0xFFFF or 1457 * 0xFFFFFFFF) and the ZIP64 format record SHOULD be created. 1458 * 1459 * @return true if zip64 End Of Central Directory is needed 1460 */ 1461 private boolean shouldUseZip64EOCD() { 1462 int numberOfThisDisk = 0; 1463 if (isSplitZip) { 1464 numberOfThisDisk = ((ZipSplitOutputStream) this.out).getCurrentSplitSegmentIndex(); 1465 } 1466 final int numOfEntriesOnThisDisk = numberOfCDInDiskData.getOrDefault(numberOfThisDisk, 0); 1467 return numberOfThisDisk >= ZipConstants.ZIP64_MAGIC_SHORT /* number of this disk */ 1468 || cdDiskNumberStart >= ZipConstants.ZIP64_MAGIC_SHORT /* number of the disk with the start of the central directory */ 1469 || numOfEntriesOnThisDisk >= ZipConstants.ZIP64_MAGIC_SHORT /* total number of entries in the central directory on this disk */ 1470 || entries.size() >= ZipConstants.ZIP64_MAGIC_SHORT /* total number of entries in the central directory */ 1471 || cdLength >= ZipConstants.ZIP64_MAGIC /* size of the central directory */ 1472 || cdOffset >= ZipConstants.ZIP64_MAGIC; /* 1473 * offset of start of central directory with respect to the starting disk number 1474 */ 1475 } 1476 1477 private boolean usesDataDescriptor(final int zipMethod, final boolean phased) { 1478 return !phased && zipMethod == DEFLATED && !(out instanceof RandomAccessOutputStream); 1479 } 1480 1481 /** 1482 * If the Zip64 mode is set to never, then all the data in End Of Central Directory should not exceed their limits. 1483 * 1484 * @throws Zip64RequiredException if Zip64 is actually needed 1485 */ 1486 private void validateIfZip64IsNeededInEOCD() throws Zip64RequiredException { 1487 // exception will only be thrown if the Zip64 mode is never while Zip64 is actually needed 1488 if (zip64Mode != Zip64Mode.Never) { 1489 return; 1490 } 1491 1492 int numberOfThisDisk = 0; 1493 if (isSplitZip) { 1494 numberOfThisDisk = ((ZipSplitOutputStream) this.out).getCurrentSplitSegmentIndex(); 1495 } 1496 if (numberOfThisDisk >= ZipConstants.ZIP64_MAGIC_SHORT) { 1497 throw new Zip64RequiredException(Zip64RequiredException.DISK_NUMBER_TOO_BIG_MESSAGE); 1498 } 1499 1500 if (cdDiskNumberStart >= ZipConstants.ZIP64_MAGIC_SHORT) { 1501 throw new Zip64RequiredException(Zip64RequiredException.CENTRAL_DIRECTORY_DISK_NUMBER_TOO_BIG_MESSAGE); 1502 } 1503 1504 final int numOfEntriesOnThisDisk = numberOfCDInDiskData.getOrDefault(numberOfThisDisk, 0); 1505 if (numOfEntriesOnThisDisk >= ZipConstants.ZIP64_MAGIC_SHORT) { 1506 throw new Zip64RequiredException(Zip64RequiredException.TOO_MANY_ENTRIES_ON_DISK_MESSAGE); 1507 } 1508 1509 // number of entries 1510 if (entries.size() >= ZipConstants.ZIP64_MAGIC_SHORT) { 1511 throw new Zip64RequiredException(Zip64RequiredException.TOO_MANY_ENTRIES_MESSAGE); 1512 } 1513 1514 if (cdLength >= ZipConstants.ZIP64_MAGIC) { 1515 throw new Zip64RequiredException(Zip64RequiredException.CENTRAL_DIRECTORY_SIZE_TOO_BIG_MESSAGE); 1516 } 1517 1518 if (cdOffset >= ZipConstants.ZIP64_MAGIC) { 1519 throw new Zip64RequiredException(Zip64RequiredException.ARCHIVE_TOO_BIG_MESSAGE); 1520 } 1521 } 1522 1523 /** 1524 * Throws an exception if the size is unknown for a stored entry that is written to a non-seekable output or the entry is too big to be written without 1525 * Zip64 extra but the mode has been set to Never. 1526 */ 1527 private void validateSizeInformation(final Zip64Mode effectiveMode) throws ZipException { 1528 // Size/CRC not required if SeekableByteChannel is used 1529 if (entry.entry.getMethod() == STORED && !(out instanceof RandomAccessOutputStream)) { 1530 if (entry.entry.getSize() == ArchiveEntry.SIZE_UNKNOWN) { 1531 throw new ZipException("Uncompressed size is required for" + " STORED method when not writing to a" + " file"); 1532 } 1533 if (entry.entry.getCrc() == ZipArchiveEntry.CRC_UNKNOWN) { 1534 throw new ZipException("CRC checksum is required for STORED" + " method when not writing to a file"); 1535 } 1536 entry.entry.setCompressedSize(entry.entry.getSize()); 1537 } 1538 1539 if ((entry.entry.getSize() >= ZipConstants.ZIP64_MAGIC || entry.entry.getCompressedSize() >= ZipConstants.ZIP64_MAGIC) 1540 && effectiveMode == Zip64Mode.Never) { 1541 throw new Zip64RequiredException(Zip64RequiredException.getEntryTooBigMessage(entry.entry)); 1542 } 1543 } 1544 1545 private int versionNeededToExtract(final int zipMethod, final boolean zip64, final boolean usedDataDescriptor) { 1546 if (zip64) { 1547 return ZipConstants.ZIP64_MIN_VERSION; 1548 } 1549 if (usedDataDescriptor) { 1550 return ZipConstants.DATA_DESCRIPTOR_MIN_VERSION; 1551 } 1552 return versionNeededToExtractMethod(zipMethod); 1553 } 1554 1555 private int versionNeededToExtractMethod(final int zipMethod) { 1556 return zipMethod == DEFLATED ? ZipConstants.DEFLATE_MIN_VERSION : ZipConstants.INITIAL_VERSION; 1557 } 1558 1559 /** 1560 * Writes bytes to ZIP entry. 1561 * 1562 * @param b the byte array to write 1563 * @param offset the start position to write from 1564 * @param length the number of bytes to write 1565 * @throws IOException on error 1566 */ 1567 @Override 1568 public void write(final byte[] b, final int offset, final int length) throws IOException { 1569 if (entry == null) { 1570 throw new IllegalStateException("No current entry"); 1571 } 1572 ZipUtil.checkRequestedFeatures(entry.entry); 1573 final long writtenThisTime = streamCompressor.write(b, offset, length, entry.entry.getMethod()); 1574 count(writtenThisTime); 1575 } 1576 1577 /** 1578 * Writes the "End of central dir record". 1579 * 1580 * @throws IOException on error 1581 * @throws Zip64RequiredException if the archive's size exceeds 4 GByte or there are more than 65535 entries inside the archive and 1582 * {@link #setUseZip64(Zip64Mode)} is {@link Zip64Mode#Never}. 1583 */ 1584 protected void writeCentralDirectoryEnd() throws IOException { 1585 if (!hasUsedZip64 && isSplitZip) { 1586 ((ZipSplitOutputStream) this.out).prepareToWriteUnsplittableContent(eocdLength); 1587 } 1588 1589 validateIfZip64IsNeededInEOCD(); 1590 1591 writeCounted(EOCD_SIG); 1592 1593 // number of this disk 1594 int numberOfThisDisk = 0; 1595 if (isSplitZip) { 1596 numberOfThisDisk = ((ZipSplitOutputStream) this.out).getCurrentSplitSegmentIndex(); 1597 } 1598 writeCounted(ZipShort.getBytes(numberOfThisDisk)); 1599 1600 // disk number of the start of central directory 1601 writeCounted(ZipShort.getBytes((int) cdDiskNumberStart)); 1602 1603 // number of entries 1604 final int numberOfEntries = entries.size(); 1605 1606 // total number of entries in the central directory on this disk 1607 final int numOfEntriesOnThisDisk = isSplitZip ? numberOfCDInDiskData.getOrDefault(numberOfThisDisk, 0) : numberOfEntries; 1608 final byte[] numOfEntriesOnThisDiskData = ZipShort.getBytes(Math.min(numOfEntriesOnThisDisk, ZipConstants.ZIP64_MAGIC_SHORT)); 1609 writeCounted(numOfEntriesOnThisDiskData); 1610 1611 // number of entries 1612 final byte[] num = ZipShort.getBytes(Math.min(numberOfEntries, ZipConstants.ZIP64_MAGIC_SHORT)); 1613 writeCounted(num); 1614 1615 // length and location of CD 1616 writeCounted(ZipLong.getBytes(Math.min(cdLength, ZipConstants.ZIP64_MAGIC))); 1617 writeCounted(ZipLong.getBytes(Math.min(cdOffset, ZipConstants.ZIP64_MAGIC))); 1618 1619 // ZIP file comment 1620 final ByteBuffer data = this.zipEncoding.encode(comment); 1621 final int dataLen = data.limit() - data.position(); 1622 writeCounted(ZipShort.getBytes(dataLen)); 1623 streamCompressor.writeCounted(data.array(), data.arrayOffset(), dataLen); 1624 } 1625 1626 private void writeCentralDirectoryInChunks() throws IOException { 1627 final int NUM_PER_WRITE = 1000; 1628 final ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream(70 * NUM_PER_WRITE); 1629 int count = 0; 1630 for (final ZipArchiveEntry ze : entries) { 1631 byteArrayOutputStream.write(createCentralFileHeader(ze)); 1632 if (++count > NUM_PER_WRITE) { 1633 writeCounted(byteArrayOutputStream.toByteArray()); 1634 byteArrayOutputStream.reset(); 1635 count = 0; 1636 } 1637 } 1638 writeCounted(byteArrayOutputStream.toByteArray()); 1639 } 1640 1641 /** 1642 * Writes the central file header entry. 1643 * 1644 * @param ze the entry to write 1645 * @throws IOException on error 1646 * @throws Zip64RequiredException if the archive's size exceeds 4 GByte and {@link #setUseZip64(Zip64Mode)} is {@link Zip64Mode#Never}. 1647 */ 1648 protected void writeCentralFileHeader(final ZipArchiveEntry ze) throws IOException { 1649 final byte[] centralFileHeader = createCentralFileHeader(ze); 1650 writeCounted(centralFileHeader); 1651 } 1652 1653 /** 1654 * Write bytes to output or random access file. 1655 * 1656 * @param data the byte array to write 1657 * @throws IOException on error 1658 */ 1659 private void writeCounted(final byte[] data) throws IOException { 1660 streamCompressor.writeCounted(data); 1661 } 1662 1663 /** 1664 * Writes the data descriptor entry. 1665 * 1666 * @param ze the entry to write 1667 * @throws IOException on error 1668 */ 1669 protected void writeDataDescriptor(final ZipArchiveEntry ze) throws IOException { 1670 if (!usesDataDescriptor(ze.getMethod(), false)) { 1671 return; 1672 } 1673 writeCounted(DD_SIG); 1674 writeCounted(ZipLong.getBytes(ze.getCrc())); 1675 if (!hasZip64Extra(ze)) { 1676 writeCounted(ZipLong.getBytes(ze.getCompressedSize())); 1677 writeCounted(ZipLong.getBytes(ze.getSize())); 1678 } else { 1679 writeCounted(ZipEightByteInteger.getBytes(ze.getCompressedSize())); 1680 writeCounted(ZipEightByteInteger.getBytes(ze.getSize())); 1681 } 1682 } 1683 1684 /** 1685 * Writes the local file header entry 1686 * 1687 * @param ze the entry to write 1688 * @throws IOException on error 1689 */ 1690 protected void writeLocalFileHeader(final ZipArchiveEntry ze) throws IOException { 1691 writeLocalFileHeader(ze, false); 1692 } 1693 1694 private void writeLocalFileHeader(final ZipArchiveEntry ze, final boolean phased) throws IOException { 1695 final boolean encodable = zipEncoding.canEncode(ze.getName()); 1696 final ByteBuffer name = getName(ze); 1697 1698 if (createUnicodeExtraFields != UnicodeExtraFieldPolicy.NEVER) { 1699 addUnicodeExtraFields(ze, encodable, name); 1700 } 1701 1702 long localHeaderStart = streamCompressor.getTotalBytesWritten(); 1703 if (isSplitZip) { 1704 // when creating a split zip, the offset should be 1705 // the offset to the corresponding segment disk 1706 final ZipSplitOutputStream splitOutputStream = (ZipSplitOutputStream) this.out; 1707 ze.setDiskNumberStart(splitOutputStream.getCurrentSplitSegmentIndex()); 1708 localHeaderStart = splitOutputStream.getCurrentSplitSegmentBytesWritten(); 1709 } 1710 1711 final byte[] localHeader = createLocalFileHeader(ze, name, encodable, phased, localHeaderStart); 1712 metaData.put(ze, new EntryMetaData(localHeaderStart, usesDataDescriptor(ze.getMethod(), phased))); 1713 entry.localDataStart = localHeaderStart + LFH_CRC_OFFSET; // At crc offset 1714 writeCounted(localHeader); 1715 entry.dataStart = streamCompressor.getTotalBytesWritten(); 1716 } 1717 1718 /** 1719 * Write bytes to output or random access file. 1720 * 1721 * @param data the byte array to write 1722 * @throws IOException on error 1723 */ 1724 protected final void writeOut(final byte[] data) throws IOException { 1725 streamCompressor.writeOut(data, 0, data.length); 1726 } 1727 1728 /** 1729 * Write bytes to output or random access file. 1730 * 1731 * @param data the byte array to write 1732 * @param offset the start position to write from 1733 * @param length the number of bytes to write 1734 * @throws IOException on error 1735 */ 1736 protected final void writeOut(final byte[] data, final int offset, final int length) throws IOException { 1737 streamCompressor.writeOut(data, offset, length); 1738 } 1739 1740 /** 1741 * Write preamble data. For most of the time, this is used to make self-extracting zips. 1742 * 1743 * @param preamble data to write 1744 * @throws IOException if an entry already exists 1745 * @since 1.21 1746 */ 1747 public void writePreamble(final byte[] preamble) throws IOException { 1748 writePreamble(preamble, 0, preamble.length); 1749 } 1750 1751 /** 1752 * Write preamble data. For most of the time, this is used to make self-extracting zips. 1753 * 1754 * @param preamble data to write 1755 * @param offset the start offset in the data 1756 * @param length the number of bytes to write 1757 * @throws IOException if an entry already exists 1758 * @since 1.21 1759 */ 1760 public void writePreamble(final byte[] preamble, final int offset, final int length) throws IOException { 1761 if (entry != null) { 1762 throw new IllegalStateException("Preamble must be written before creating an entry"); 1763 } 1764 this.streamCompressor.writeCounted(preamble, offset, length); 1765 } 1766 1767 /** 1768 * Writes the "ZIP64 End of central dir record" and "ZIP64 End of central dir locator". 1769 * 1770 * @throws IOException on error 1771 * @since 1.3 1772 */ 1773 protected void writeZip64CentralDirectory() throws IOException { 1774 if (zip64Mode == Zip64Mode.Never) { 1775 return; 1776 } 1777 1778 if (!hasUsedZip64 && shouldUseZip64EOCD()) { 1779 // actually "will use" 1780 hasUsedZip64 = true; 1781 } 1782 1783 if (!hasUsedZip64) { 1784 return; 1785 } 1786 1787 long offset = streamCompressor.getTotalBytesWritten(); 1788 long diskNumberStart = 0L; 1789 if (isSplitZip) { 1790 // when creating a split zip, the offset of should be 1791 // the offset to the corresponding segment disk 1792 final ZipSplitOutputStream zipSplitOutputStream = (ZipSplitOutputStream) this.out; 1793 offset = zipSplitOutputStream.getCurrentSplitSegmentBytesWritten(); 1794 diskNumberStart = zipSplitOutputStream.getCurrentSplitSegmentIndex(); 1795 } 1796 1797 writeOut(ZIP64_EOCD_SIG); 1798 // size of zip64 end of central directory, we don't have any variable length 1799 // as we don't support the extensible data sector, yet 1800 writeOut(ZipEightByteInteger.getBytes(ZipConstants.SHORT /* version made by */ 1801 + ZipConstants.SHORT /* version needed to extract */ 1802 + ZipConstants.WORD /* disk number */ 1803 + ZipConstants.WORD /* disk with central directory */ 1804 + ZipConstants.DWORD /* number of entries in CD on this disk */ 1805 + ZipConstants.DWORD /* total number of entries */ 1806 + ZipConstants.DWORD /* size of CD */ 1807 + (long) ZipConstants.DWORD /* offset of CD */ 1808 )); 1809 1810 // version made by and version needed to extract 1811 writeOut(ZipShort.getBytes(ZipConstants.ZIP64_MIN_VERSION)); 1812 writeOut(ZipShort.getBytes(ZipConstants.ZIP64_MIN_VERSION)); 1813 1814 // number of this disk 1815 int numberOfThisDisk = 0; 1816 if (isSplitZip) { 1817 numberOfThisDisk = ((ZipSplitOutputStream) this.out).getCurrentSplitSegmentIndex(); 1818 } 1819 writeOut(ZipLong.getBytes(numberOfThisDisk)); 1820 1821 // disk number of the start of central directory 1822 writeOut(ZipLong.getBytes(cdDiskNumberStart)); 1823 1824 // total number of entries in the central directory on this disk 1825 final int numOfEntriesOnThisDisk = isSplitZip ? numberOfCDInDiskData.getOrDefault(numberOfThisDisk, 0) : entries.size(); 1826 final byte[] numOfEntriesOnThisDiskData = ZipEightByteInteger.getBytes(numOfEntriesOnThisDisk); 1827 writeOut(numOfEntriesOnThisDiskData); 1828 1829 // number of entries 1830 final byte[] num = ZipEightByteInteger.getBytes(entries.size()); 1831 writeOut(num); 1832 1833 // length and location of CD 1834 writeOut(ZipEightByteInteger.getBytes(cdLength)); 1835 writeOut(ZipEightByteInteger.getBytes(cdOffset)); 1836 1837 // no "zip64 extensible data sector" for now 1838 1839 if (isSplitZip) { 1840 // based on the ZIP specification, the End Of Central Directory record and 1841 // the Zip64 End Of Central Directory locator record must be on the same segment 1842 final int zip64EOCDLOCLength = ZipConstants.WORD /* length of ZIP64_EOCD_LOC_SIG */ 1843 + ZipConstants.WORD /* disk number of ZIP64_EOCD_SIG */ 1844 + ZipConstants.DWORD /* offset of ZIP64_EOCD_SIG */ 1845 + ZipConstants.WORD /* total number of disks */; 1846 1847 final long unsplittableContentSize = zip64EOCDLOCLength + eocdLength; 1848 ((ZipSplitOutputStream) this.out).prepareToWriteUnsplittableContent(unsplittableContentSize); 1849 } 1850 1851 // and now the "ZIP64 end of central directory locator" 1852 writeOut(ZIP64_EOCD_LOC_SIG); 1853 1854 // disk number holding the ZIP64 EOCD record 1855 writeOut(ZipLong.getBytes(diskNumberStart)); 1856 // relative offset of ZIP64 EOCD record 1857 writeOut(ZipEightByteInteger.getBytes(offset)); 1858 // total number of disks 1859 if (isSplitZip) { 1860 // the Zip64 End Of Central Directory Locator and the End Of Central Directory must be 1861 // in the same split disk, it means they must be located in the last disk 1862 final int totalNumberOfDisks = ((ZipSplitOutputStream) this.out).getCurrentSplitSegmentIndex() + 1; 1863 writeOut(ZipLong.getBytes(totalNumberOfDisks)); 1864 } else { 1865 writeOut(ONE); 1866 } 1867 } 1868}