001package org.apache.commons.jcs3.auxiliary.disk.block; 002 003import java.io.EOFException; 004 005/* 006 * Licensed to the Apache Software Foundation (ASF) under one 007 * or more contributor license agreements. See the NOTICE file 008 * distributed with this work for additional information 009 * regarding copyright ownership. The ASF licenses this file 010 * to you under the Apache License, Version 2.0 (the 011 * "License"); you may not use this file except in compliance 012 * with the License. You may obtain a copy of the License at 013 * 014 * http://www.apache.org/licenses/LICENSE-2.0 015 * 016 * Unless required by applicable law or agreed to in writing, 017 * software distributed under the License is distributed on an 018 * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 019 * KIND, either express or implied. See the License for the 020 * specific language governing permissions and limitations 021 * under the License. 022 */ 023 024import java.io.File; 025import java.io.IOException; 026import java.io.InputStream; 027import java.io.ObjectInputStream; 028import java.nio.ByteBuffer; 029import java.nio.channels.FileChannel; 030import java.nio.file.Files; 031import java.nio.file.StandardOpenOption; 032import java.util.HashMap; 033import java.util.HashSet; 034import java.util.Map; 035import java.util.Map.Entry; 036import java.util.Set; 037import java.util.TreeMap; 038import java.util.concurrent.atomic.AtomicInteger; 039 040import org.apache.commons.jcs3.auxiliary.disk.behavior.IDiskCacheAttributes.DiskLimitType; 041import org.apache.commons.jcs3.engine.behavior.IElementSerializer; 042import org.apache.commons.jcs3.io.ObjectInputStreamClassLoaderAware; 043import org.apache.commons.jcs3.log.Log; 044import org.apache.commons.jcs3.log.LogManager; 045import org.apache.commons.jcs3.utils.serialization.StandardSerializer; 046import org.apache.commons.jcs3.utils.struct.AbstractLRUMap; 047import org.apache.commons.jcs3.utils.struct.LRUMap; 048import org.apache.commons.jcs3.utils.timing.ElapsedTimer; 049 050/** 051 * This is responsible for storing the keys. 052 */ 053public class BlockDiskKeyStore<K> 054{ 055 /** 056 * Class for recycling and lru. This implements the LRU overflow callback, 057 * so we can mark the blocks as free. 058 */ 059 public class LRUMapCountLimited extends LRUMap<K, int[]> 060 { 061 /** 062 * <code>tag</code> tells us which map we are working on. 063 */ 064 public final static String TAG = "orig-lru-count"; 065 066 public LRUMapCountLimited(final int maxKeySize) 067 { 068 super(maxKeySize); 069 } 070 071 /** 072 * This is called when the may key size is reached. The least recently 073 * used item will be passed here. We will store the position and size of 074 * the spot on disk in the recycle bin. 075 * <p> 076 * 077 * @param key 078 * @param value 079 */ 080 @Override 081 protected void processRemovedLRU(final K key, final int[] value) 082 { 083 blockDiskCache.freeBlocks(value); 084 if (log.isDebugEnabled()) 085 { 086 log.debug("{0}: Removing key: [{1}] from key store.", logCacheName, key); 087 log.debug("{0}: Key store size: [{1}].", logCacheName, super.size()); 088 } 089 } 090 } 091 092 /** 093 * Class for recycling and lru. This implements the LRU size overflow 094 * callback, so we can mark the blocks as free. 095 */ 096 public class LRUMapSizeLimited extends AbstractLRUMap<K, int[]> 097 { 098 /** 099 * <code>tag</code> tells us which map we are working on. 100 */ 101 public final static String TAG = "orig-lru-size"; 102 103 // size of the content in kB 104 private final AtomicInteger contentSize; 105 private final int maxSize; 106 107 /** 108 * Default 109 */ 110 public LRUMapSizeLimited() 111 { 112 this(-1); 113 } 114 115 /** 116 * @param maxSize 117 * maximum cache size in kB 118 */ 119 public LRUMapSizeLimited(final int maxSize) 120 { 121 this.maxSize = maxSize; 122 this.contentSize = new AtomicInteger(0); 123 } 124 125 // keep the content size in kB, so 2^31 kB is reasonable value 126 private void addLengthToCacheSize(final int[] value) 127 { 128 contentSize.addAndGet(value.length * blockSize / 1024 + 1); 129 } 130 131 /** 132 * This is called when the may key size is reached. The least recently 133 * used item will be passed here. We will store the position and size of 134 * the spot on disk in the recycle bin. 135 * <p> 136 * 137 * @param key 138 * @param value 139 */ 140 @Override 141 protected void processRemovedLRU(final K key, final int[] value) 142 { 143 blockDiskCache.freeBlocks(value); 144 if (log.isDebugEnabled()) 145 { 146 log.debug("{0}: Removing key: [{1}] from key store.", logCacheName, key); 147 log.debug("{0}: Key store size: [{1}].", logCacheName, super.size()); 148 } 149 150 if (value != null) 151 { 152 subLengthFromCacheSize(value); 153 } 154 } 155 156 @Override 157 public int[] put(final K key, final int[] value) 158 { 159 int[] oldValue = null; 160 161 try 162 { 163 oldValue = super.put(key, value); 164 } 165 finally 166 { 167 if (value != null) 168 { 169 addLengthToCacheSize(value); 170 } 171 if (oldValue != null) 172 { 173 subLengthFromCacheSize(oldValue); 174 } 175 } 176 177 return oldValue; 178 } 179 180 @Override 181 public int[] remove(final Object key) 182 { 183 int[] value = null; 184 185 try 186 { 187 value = super.remove(key); 188 return value; 189 } 190 finally 191 { 192 if (value != null) 193 { 194 subLengthFromCacheSize(value); 195 } 196 } 197 } 198 199 @Override 200 protected boolean shouldRemove() 201 { 202 return maxSize > 0 && contentSize.get() > maxSize && this.size() > 1; 203 } 204 205 // keep the content size in kB, so 2^31 kB is reasonable value 206 private void subLengthFromCacheSize(final int[] value) 207 { 208 contentSize.addAndGet(value.length * blockSize / -1024 - 1); 209 } 210 } 211 212 /** The logger */ 213 private static final Log log = LogManager.getLog(BlockDiskKeyStore.class); 214 215 /** Attributes governing the behavior of the block disk cache. */ 216 private final BlockDiskCacheAttributes blockDiskCacheAttributes; 217 218 /** The key to block map */ 219 private Map<K, int[]> keyHash; 220 221 /** The file where we persist the keys */ 222 private final File keyFile; 223 224 /** The key file signature for new-style key files */ 225 private final static int KEY_FILE_SIGNATURE = 0x6A63734B; // "jcsK" 226 227 /** The name to prefix log messages with. */ 228 protected final String logCacheName; 229 230 /** Name of the file where we persist the keys */ 231 private final String fileName; 232 233 /** The maximum number of keys to store in memory */ 234 private final int maxKeySize; 235 236 /** 237 * we need this so we can communicate free blocks to the data store when 238 * keys fall off the LRU 239 */ 240 protected final BlockDiskCache<K, ?> blockDiskCache; 241 242 private DiskLimitType diskLimitType = DiskLimitType.COUNT; 243 244 private final int blockSize; 245 246 /** 247 * Serializer for reading and writing key file 248 */ 249 private final IElementSerializer serializer; 250 251 /** 252 * Set the configuration options. 253 * <p> 254 * 255 * @param cacheAttributes 256 * @param blockDiskCache 257 * used for freeing 258 */ 259 public BlockDiskKeyStore(final BlockDiskCacheAttributes cacheAttributes, final BlockDiskCache<K, ?> blockDiskCache) 260 { 261 this.blockDiskCacheAttributes = cacheAttributes; 262 this.logCacheName = "Region [" + this.blockDiskCacheAttributes.getCacheName() + "] "; 263 this.fileName = this.blockDiskCacheAttributes.getCacheName(); 264 this.maxKeySize = cacheAttributes.getMaxKeySize(); 265 this.blockDiskCache = blockDiskCache; 266 this.diskLimitType = cacheAttributes.getDiskLimitType(); 267 this.blockSize = cacheAttributes.getBlockSizeBytes(); 268 269 if (blockDiskCache == null) 270 { 271 this.serializer = new StandardSerializer(); 272 } 273 else 274 { 275 this.serializer = blockDiskCache.getElementSerializer(); 276 } 277 278 final File rootDirectory = cacheAttributes.getDiskPath(); 279 280 log.info("{0}: Cache file root directory [{1}]", logCacheName, rootDirectory); 281 282 this.keyFile = new File(rootDirectory, fileName + ".key"); 283 284 log.info("{0}: Key File [{1}]", logCacheName, this.keyFile.getAbsolutePath()); 285 286 if (keyFile.length() > 0) 287 { 288 loadKeys(); 289 if (!verify()) 290 { 291 log.warn("{0}: Key File is invalid. Resetting file.", logCacheName); 292 initKeyMap(); 293 reset(); 294 } 295 } 296 else 297 { 298 initKeyMap(); 299 } 300 } 301 302 /** 303 * This is mainly used for testing. It leave the disk in tact, and just 304 * clears memory. 305 */ 306 protected void clearMemoryMap() 307 { 308 this.keyHash.clear(); 309 } 310 311 /** 312 * Gets the entry set. 313 * <p> 314 * 315 * @return entry set. 316 */ 317 public Set<Map.Entry<K, int[]>> entrySet() 318 { 319 return this.keyHash.entrySet(); 320 } 321 322 /** 323 * gets the object for the key. 324 * <p> 325 * 326 * @param key 327 * @return Object 328 */ 329 public int[] get(final K key) 330 { 331 return this.keyHash.get(key); 332 } 333 334 /** 335 * Create the map for keys that contain the index position on disk. 336 */ 337 private void initKeyMap() 338 { 339 keyHash = null; 340 if (maxKeySize >= 0) 341 { 342 if (this.diskLimitType == DiskLimitType.SIZE) 343 { 344 keyHash = new LRUMapSizeLimited(maxKeySize); 345 } 346 else 347 { 348 keyHash = new LRUMapCountLimited(maxKeySize); 349 } 350 log.info("{0}: Set maxKeySize to: \"{1}\"", logCacheName, maxKeySize); 351 } 352 else 353 { 354 // If no max size, use a plain map for memory and processing 355 // efficiency. 356 keyHash = new HashMap<>(); 357 // keyHash = Collections.synchronizedMap( new HashMap() ); 358 log.info("{0}: Set maxKeySize to unlimited", logCacheName); 359 } 360 } 361 362 /** 363 * Tests emptiness (size == 0). 364 * 365 * @return Whether or not this is empty. 366 * @since 3.1 367 */ 368 public boolean isEmpty() 369 { 370 return size() == 0; 371 } 372 373 /** 374 * Gets the key set. 375 * <p> 376 * 377 * @return key set. 378 */ 379 public Set<K> keySet() 380 { 381 return this.keyHash.keySet(); 382 } 383 384 /** 385 * Loads the keys from the .key file. The keys are stored individually on 386 * disk. They are added one by one to an LRUMap. 387 */ 388 protected void loadKeys() 389 { 390 log.info("{0}: Loading keys for {1}", () -> logCacheName, keyFile::toString); 391 392 // create a key map to use. 393 initKeyMap(); 394 395 final HashMap<K, int[]> keys = new HashMap<>(); 396 397 synchronized (keyFile) 398 { 399 // Check file type 400 int fileSignature = 0; 401 402 try (FileChannel bc = FileChannel.open(keyFile.toPath(), StandardOpenOption.READ)) 403 { 404 final ByteBuffer signature = ByteBuffer.allocate(4); 405 bc.read(signature); 406 signature.flip(); 407 fileSignature = signature.getInt(); 408 409 if (fileSignature == KEY_FILE_SIGNATURE) 410 { 411 while (true) 412 { 413 try 414 { 415 final BlockDiskElementDescriptor<K> descriptor = 416 serializer.deSerializeFrom(bc, null); 417 if (descriptor != null) 418 { 419 keys.put(descriptor.getKey(), descriptor.getBlocks()); 420 } 421 } 422 catch (EOFException e) 423 { 424 break; 425 } 426 } 427 } 428 } 429 catch (final IOException | ClassNotFoundException e) 430 { 431 log.error("{0}: Problem loading keys for file {1}", logCacheName, fileName, e); 432 } 433 434 if (fileSignature != KEY_FILE_SIGNATURE) 435 { 436 try (InputStream fis = Files.newInputStream(keyFile.toPath()); 437 ObjectInputStream ois = new ObjectInputStreamClassLoaderAware(fis, null)) 438 { 439 while (true) 440 { 441 @SuppressWarnings("unchecked") 442 final 443 // Need to cast from Object 444 BlockDiskElementDescriptor<K> descriptor = (BlockDiskElementDescriptor<K>) ois.readObject(); 445 if (descriptor != null) 446 { 447 keys.put(descriptor.getKey(), descriptor.getBlocks()); 448 } 449 } 450 } 451 catch (final EOFException eof) 452 { 453 // nothing 454 } 455 catch (final IOException | ClassNotFoundException e) 456 { 457 log.error("{0}: Problem loading keys (old style) for file {1}", logCacheName, fileName, e); 458 } 459 } 460 } 461 462 if (!keys.isEmpty()) 463 { 464 keyHash.putAll(keys); 465 466 log.debug("{0}: Found {1} in keys file.", () -> logCacheName, keys::size); 467 log.info("{0}: Loaded keys from [{1}], key count: {2}; up to {3} will be available.", 468 () -> logCacheName, () -> fileName, this::size, 469 () -> maxKeySize); 470 } 471 } 472 473 /** 474 * Puts a int[] in the keyStore. 475 * <p> 476 * 477 * @param key 478 * @param value 479 */ 480 public void put(final K key, final int[] value) 481 { 482 this.keyHash.put(key, value); 483 } 484 485 /** 486 * Remove by key. 487 * <p> 488 * 489 * @param key 490 * @return BlockDiskElementDescriptor if it was present, else null 491 */ 492 public int[] remove(final K key) 493 { 494 return this.keyHash.remove(key); 495 } 496 497 /** 498 * Resets the file and creates a new key map. 499 */ 500 protected void reset() 501 { 502 synchronized (keyFile) 503 { 504 clearMemoryMap(); 505 saveKeys(); 506 } 507 } 508 509 /** 510 * Saves key file to disk. This gets the LRUMap entry set and write the 511 * entries out one by one after putting them in a wrapper. 512 */ 513 protected void saveKeys() 514 { 515 final ElapsedTimer timer = new ElapsedTimer(); 516 log.info("{0}: Saving keys to [{1}], key count [{2}]", () -> logCacheName, 517 this.keyFile::getAbsolutePath, this::size); 518 519 synchronized (keyFile) 520 { 521 try (FileChannel bc = FileChannel.open(keyFile.toPath(), 522 StandardOpenOption.CREATE, 523 StandardOpenOption.WRITE, 524 StandardOpenOption.TRUNCATE_EXISTING)) 525 { 526 if (!verify()) 527 { 528 throw new IOException("Inconsistent key file"); 529 } 530 531 // Write signature to distinguish old format from new one 532 ByteBuffer signature = ByteBuffer.allocate(4); 533 signature.putInt(KEY_FILE_SIGNATURE).flip(); 534 bc.write(signature); 535 536 // don't need to synchronize, since the underlying 537 // collection makes a copy 538 for (final Map.Entry<K, int[]> entry : keyHash.entrySet()) 539 { 540 final BlockDiskElementDescriptor<K> descriptor = 541 new BlockDiskElementDescriptor<>(entry.getKey(),entry.getValue()); 542 // stream these out in the loop. 543 serializer.serializeTo(descriptor, bc); 544 } 545 } 546 catch (final IOException e) 547 { 548 log.error("{0}: Problem storing keys.", logCacheName, e); 549 } 550 } 551 552 log.info("{0}: Finished saving keys. It took {1} to store {2} keys. Key file length [{3}]", 553 () -> logCacheName, timer::getElapsedTimeString, this::size, 554 keyFile::length); 555 } 556 557 /** 558 * Gets the size of the key hash. 559 * <p> 560 * 561 * @return the number of keys. 562 */ 563 public int size() 564 { 565 return this.keyHash.size(); 566 } 567 568 /** 569 * Verify key store integrity 570 * 571 * @return true if key store is valid 572 */ 573 private boolean verify() 574 { 575 final Map<Integer, Set<K>> blockAllocationMap = new TreeMap<>(); 576 for (final Entry<K, int[]> e : keyHash.entrySet()) 577 { 578 for (final int block : e.getValue()) 579 { 580 Set<K> keys = blockAllocationMap.computeIfAbsent(block, s -> new HashSet<>()); 581 if (!keys.isEmpty() && !log.isTraceEnabled()) 582 { 583 // keys are not null, and no debug - fail fast 584 return false; 585 } 586 keys.add(e.getKey()); 587 } 588 } 589 if (!log.isTraceEnabled()) 590 { 591 return true; 592 } 593 boolean ok = true; 594 for (final Entry<Integer, Set<K>> e : blockAllocationMap.entrySet()) 595 { 596 log.trace("Block {0}: {1}", e::getKey, e::getValue); 597 if (e.getValue().size() > 1) 598 { 599 ok = false; 600 } 601 } 602 return ok; 603 } 604}