1 package org.apache.commons.jcs3.auxiliary.disk.block;
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22 import java.io.File;
23 import java.io.IOException;
24 import java.util.ArrayList;
25 import java.util.Arrays;
26 import java.util.HashSet;
27 import java.util.List;
28 import java.util.Map;
29 import java.util.Map.Entry;
30 import java.util.Set;
31 import java.util.concurrent.ScheduledExecutorService;
32 import java.util.concurrent.ScheduledFuture;
33 import java.util.concurrent.TimeUnit;
34 import java.util.concurrent.locks.ReentrantReadWriteLock;
35 import java.util.stream.Collectors;
36
37 import org.apache.commons.jcs3.auxiliary.AuxiliaryCacheAttributes;
38 import org.apache.commons.jcs3.auxiliary.disk.AbstractDiskCache;
39 import org.apache.commons.jcs3.engine.behavior.ICacheElement;
40 import org.apache.commons.jcs3.engine.behavior.IElementSerializer;
41 import org.apache.commons.jcs3.engine.behavior.IRequireScheduler;
42 import org.apache.commons.jcs3.engine.control.group.GroupAttrName;
43 import org.apache.commons.jcs3.engine.control.group.GroupId;
44 import org.apache.commons.jcs3.engine.stats.StatElement;
45 import org.apache.commons.jcs3.engine.stats.Stats;
46 import org.apache.commons.jcs3.engine.stats.behavior.IStatElement;
47 import org.apache.commons.jcs3.engine.stats.behavior.IStats;
48 import org.apache.commons.jcs3.log.Log;
49 import org.apache.commons.jcs3.log.LogManager;
50 import org.apache.commons.jcs3.utils.serialization.StandardSerializer;
51
52
53
54
55 public class BlockDiskCache<K, V>
56 extends AbstractDiskCache<K, V>
57 implements IRequireScheduler
58 {
59
60 private static final Log log = LogManager.getLog( BlockDiskCache.class );
61
62
63 private final String logCacheName;
64
65
66 private final String fileName;
67
68
69 private BlockDisk dataFile;
70
71
72 private final BlockDiskCacheAttributes blockDiskCacheAttributes;
73
74
75 private final File rootDirectory;
76
77
78 private BlockDiskKeyStore<K> keyStore;
79
80
81
82
83
84 private final ReentrantReadWriteLock storageLock = new ReentrantReadWriteLock();
85
86 private ScheduledFuture<?> future;
87
88
89
90
91
92
93 public BlockDiskCache( final BlockDiskCacheAttributes cacheAttributes )
94 {
95 this( cacheAttributes, new StandardSerializer() );
96 }
97
98
99
100
101
102
103
104 public BlockDiskCache( final BlockDiskCacheAttributes cacheAttributes, final IElementSerializer elementSerializer )
105 {
106 super( cacheAttributes );
107 setElementSerializer( elementSerializer );
108
109 this.blockDiskCacheAttributes = cacheAttributes;
110 this.logCacheName = "Region [" + getCacheName() + "] ";
111
112 log.info("{0}: Constructing BlockDiskCache with attributes {1}", logCacheName, cacheAttributes );
113
114
115 this.fileName = getCacheName().replaceAll("[^a-zA-Z0-9-_\\.]", "_");
116 this.rootDirectory = cacheAttributes.getDiskPath();
117
118 log.info("{0}: Cache file root directory: [{1}]", logCacheName, rootDirectory);
119
120 try
121 {
122 if ( this.blockDiskCacheAttributes.getBlockSizeBytes() > 0 )
123 {
124 this.dataFile = new BlockDisk( new File( rootDirectory, fileName + ".data" ),
125 this.blockDiskCacheAttributes.getBlockSizeBytes(),
126 getElementSerializer() );
127 }
128 else
129 {
130 this.dataFile = new BlockDisk( new File( rootDirectory, fileName + ".data" ),
131 getElementSerializer() );
132 }
133
134 keyStore = new BlockDiskKeyStore<>( this.blockDiskCacheAttributes, this );
135
136 final boolean alright = verifyDisk();
137
138 if ( keyStore.isEmpty() || !alright )
139 {
140 this.reset();
141 }
142
143
144 setAlive(true);
145 log.info("{0}: Block Disk Cache is alive.", logCacheName);
146 }
147 catch ( final IOException e )
148 {
149 log.error("{0}: Failure initializing for fileName: {1} and root directory: {2}",
150 logCacheName, fileName, rootDirectory, e);
151 }
152 }
153
154
155
156
157 @Override
158 public void setScheduledExecutorService(final ScheduledExecutorService scheduledExecutor)
159 {
160
161
162 if ( this.blockDiskCacheAttributes.getKeyPersistenceIntervalSeconds() > 0 )
163 {
164 future = scheduledExecutor.scheduleAtFixedRate(keyStore::saveKeys,
165 this.blockDiskCacheAttributes.getKeyPersistenceIntervalSeconds(),
166 this.blockDiskCacheAttributes.getKeyPersistenceIntervalSeconds(),
167 TimeUnit.SECONDS);
168 }
169 }
170
171
172
173
174
175
176
177 protected boolean verifyDisk()
178 {
179 boolean alright = false;
180
181
182
183 storageLock.readLock().lock();
184
185 try
186 {
187 this.keyStore.entrySet().stream()
188 .limit(100)
189 .forEach(entry -> {
190 try
191 {
192 final Object data = this.dataFile.read(entry.getValue());
193 if ( data == null )
194 {
195 throw new IOException("Data is null");
196 }
197 }
198 catch (final IOException | ClassNotFoundException e)
199 {
200 throw new RuntimeException(logCacheName
201 + " Couldn't find data for key [" + entry.getKey() + "]", e);
202 }
203 });
204 alright = true;
205 }
206 catch ( final Exception e )
207 {
208 log.warn("{0}: Problem verifying disk.", logCacheName, e);
209 alright = false;
210 }
211 finally
212 {
213 storageLock.readLock().unlock();
214 }
215
216 return alright;
217 }
218
219
220
221
222
223
224 @Override
225 public Set<K> getKeySet() throws IOException
226 {
227 final HashSet<K> keys = new HashSet<>();
228
229 storageLock.readLock().lock();
230
231 try
232 {
233 keys.addAll(this.keyStore.keySet());
234 }
235 finally
236 {
237 storageLock.readLock().unlock();
238 }
239
240 return keys;
241 }
242
243
244
245
246
247
248
249
250 @Override
251 public Map<K, ICacheElement<K, V>> processGetMatching( final String pattern )
252 {
253 Set<K> keyArray = null;
254 storageLock.readLock().lock();
255 try
256 {
257 keyArray = new HashSet<>(keyStore.keySet());
258 }
259 finally
260 {
261 storageLock.readLock().unlock();
262 }
263
264 final Set<K> matchingKeys = getKeyMatcher().getMatchingKeysFromArray( pattern, keyArray );
265
266 return matchingKeys.stream()
267 .collect(Collectors.toMap(
268 key -> key,
269 this::processGet)).entrySet().stream()
270 .filter(entry -> entry.getValue() != null)
271 .collect(Collectors.toMap(
272 Entry::getKey,
273 Entry::getValue));
274 }
275
276
277
278
279
280
281
282 @Override
283 public int getSize()
284 {
285 return this.keyStore.size();
286 }
287
288
289
290
291
292
293
294
295
296
297
298
299 @Override
300 protected ICacheElement<K, V> processGet( final K key )
301 {
302 if ( !isAlive() )
303 {
304 log.debug("{0}: No longer alive so returning null for key = {1}", logCacheName, key );
305 return null;
306 }
307
308 log.debug("{0}: Trying to get from disk: {1}", logCacheName, key );
309
310 ICacheElement<K, V> object = null;
311
312
313 try
314 {
315 storageLock.readLock().lock();
316 try {
317 final int[] ded = this.keyStore.get( key );
318 if ( ded != null )
319 {
320 object = this.dataFile.read( ded );
321 }
322 } finally {
323 storageLock.readLock().unlock();
324 }
325
326 }
327 catch ( final IOException ioe )
328 {
329 log.error("{0}: Failure getting from disk--IOException, key = {1}", logCacheName, key, ioe );
330 reset();
331 }
332 catch ( final Exception e )
333 {
334 log.error("{0}: Failure getting from disk, key = {1}", logCacheName, key, e );
335 }
336 return object;
337 }
338
339
340
341
342
343
344
345
346
347
348
349
350 @Override
351 protected void processUpdate( final ICacheElement<K, V> element )
352 {
353 if ( !isAlive() )
354 {
355 log.debug("{0}: No longer alive; aborting put of key = {1}",
356 () -> logCacheName, element::getKey);
357 return;
358 }
359
360 int[] old = null;
361
362
363 storageLock.writeLock().lock();
364
365 try
366 {
367 old = this.keyStore.get( element.getKey() );
368
369 if ( old != null )
370 {
371 this.dataFile.freeBlocks( old );
372 }
373
374 final int[] blocks = this.dataFile.write( element );
375
376 this.keyStore.put( element.getKey(), blocks );
377
378 log.debug("{0}: Put to file [{1}] key [{2}]", () -> logCacheName,
379 () -> fileName, element::getKey);
380 }
381 catch ( final IOException e )
382 {
383 log.error("{0}: Failure updating element, key: {1} old: {2}",
384 logCacheName, element.getKey(), Arrays.toString(old), e);
385 }
386 finally
387 {
388 storageLock.writeLock().unlock();
389 }
390
391 log.debug("{0}: Storing element on disk, key: {1}", () -> logCacheName,
392 element::getKey);
393 }
394
395
396
397
398
399
400
401
402
403 @Override
404 protected boolean processRemove( final K key )
405 {
406 if ( !isAlive() )
407 {
408 log.debug("{0}: No longer alive so returning false for key = {1}", logCacheName, key );
409 return false;
410 }
411
412 boolean reset = false;
413 boolean removed = false;
414
415 storageLock.writeLock().lock();
416
417 try
418 {
419 if (key instanceof String && key.toString().endsWith(NAME_COMPONENT_DELIMITER))
420 {
421 removed = performPartialKeyRemoval((String) key);
422 }
423 else if (key instanceof GroupAttrName && ((GroupAttrName<?>) key).attrName == null)
424 {
425 removed = performGroupRemoval(((GroupAttrName<?>) key).groupId);
426 }
427 else
428 {
429 removed = performSingleKeyRemoval(key);
430 }
431 }
432 catch ( final Exception e )
433 {
434 log.error("{0}: Problem removing element.", logCacheName, e );
435 reset = true;
436 }
437 finally
438 {
439 storageLock.writeLock().unlock();
440 }
441
442 if ( reset )
443 {
444 reset();
445 }
446
447 return removed;
448 }
449
450
451
452
453
454
455
456
457
458
459
460 private boolean performGroupRemoval(final GroupId key)
461 {
462
463 final List<K> itemsToRemove = keyStore.keySet()
464 .stream()
465 .filter(k -> k instanceof GroupAttrName && ((GroupAttrName<?>) k).groupId.equals(key))
466 .collect(Collectors.toList());
467
468
469
470
471 itemsToRemove.forEach(this::performSingleKeyRemoval);
472
473
474 return !itemsToRemove.isEmpty();
475 }
476
477
478
479
480
481
482
483
484
485
486
487 private boolean performPartialKeyRemoval(final String key)
488 {
489
490 final List<K> itemsToRemove = keyStore.keySet()
491 .stream()
492 .filter(k -> k instanceof String && k.toString().startsWith(key))
493 .collect(Collectors.toList());
494
495
496
497
498 itemsToRemove.forEach(this::performSingleKeyRemoval);
499
500
501 return !itemsToRemove.isEmpty();
502 }
503
504
505 private boolean performSingleKeyRemoval(final K key) {
506 final boolean removed;
507
508 final int[] ded = this.keyStore.remove( key );
509 removed = ded != null;
510 if ( removed )
511 {
512 this.dataFile.freeBlocks( ded );
513 }
514
515 log.debug("{0}: Disk removal: Removed from key hash, key [{1}] removed = {2}",
516 logCacheName, key, removed);
517 return removed;
518 }
519
520
521
522
523
524
525 @Override
526 protected void processRemoveAll()
527 {
528 reset();
529 }
530
531
532
533
534
535
536
537 @Override
538 public void processDispose()
539 {
540 final Thread t = new Thread(this::disposeInternal, "BlockDiskCache-DisposalThread" );
541 t.start();
542
543 try
544 {
545 t.join( 60 * 1000 );
546 }
547 catch ( final InterruptedException ex )
548 {
549 log.error("{0}: Interrupted while waiting for disposal thread to finish.",
550 logCacheName, ex );
551 }
552 }
553
554
555
556
557 protected void disposeInternal()
558 {
559 if ( !isAlive() )
560 {
561 log.error("{0}: Not alive and dispose was called, filename: {1}", logCacheName, fileName);
562 return;
563 }
564 storageLock.writeLock().lock();
565 try
566 {
567
568 setAlive(false);
569 this.keyStore.saveKeys();
570
571 if (future != null)
572 {
573 future.cancel(true);
574 }
575
576 try
577 {
578 log.debug("{0}: Closing files, base filename: {1}", logCacheName, fileName );
579 dataFile.close();
580
581 }
582 catch ( final IOException e )
583 {
584 log.error("{0}: Failure closing files in dispose, filename: {1}",
585 logCacheName, fileName, e );
586 }
587 }
588 finally
589 {
590 storageLock.writeLock().unlock();
591 }
592
593 log.info("{0}: Shutdown complete.", logCacheName);
594 }
595
596
597
598
599
600
601 @Override
602 public AuxiliaryCacheAttributes getAuxiliaryCacheAttributes()
603 {
604 return this.blockDiskCacheAttributes;
605 }
606
607
608
609
610
611
612 private void reset()
613 {
614 log.info("{0}: Resetting cache", logCacheName);
615
616 try
617 {
618 storageLock.writeLock().lock();
619
620 this.keyStore.reset();
621
622 if ( dataFile != null )
623 {
624 dataFile.reset();
625 }
626 }
627 catch ( final IOException e )
628 {
629 log.error("{0}: Failure resetting state", logCacheName, e );
630 }
631 finally
632 {
633 storageLock.writeLock().unlock();
634 }
635 }
636
637
638
639
640
641
642 protected void freeBlocks( final int[] blocksToFree )
643 {
644 this.dataFile.freeBlocks( blocksToFree );
645 }
646
647
648
649
650
651
652 @Override
653 public IStats getStatistics()
654 {
655 final IStats stats = new Stats();
656 stats.setTypeName( "Block Disk Cache" );
657
658 final ArrayList<IStatElement<?>> elems = new ArrayList<>();
659
660 elems.add(new StatElement<>( "Is Alive", Boolean.valueOf(isAlive()) ) );
661 elems.add(new StatElement<>( "Key Map Size", Integer.valueOf(this.keyStore.size()) ) );
662
663 if (this.dataFile != null)
664 {
665 try
666 {
667 elems.add(new StatElement<>( "Data File Length", Long.valueOf(this.dataFile.length()) ) );
668 }
669 catch ( final IOException e )
670 {
671 log.error( e );
672 }
673
674 elems.add(new StatElement<>( "Block Size Bytes",
675 Integer.valueOf(this.dataFile.getBlockSizeBytes()) ) );
676 elems.add(new StatElement<>( "Number Of Blocks",
677 Integer.valueOf(this.dataFile.getNumberOfBlocks()) ) );
678 elems.add(new StatElement<>( "Average Put Size Bytes",
679 Long.valueOf(this.dataFile.getAveragePutSizeBytes()) ) );
680 elems.add(new StatElement<>( "Empty Blocks",
681 Integer.valueOf(this.dataFile.getEmptyBlocks()) ) );
682 }
683
684
685 final IStats sStats = super.getStatistics();
686 elems.addAll(sStats.getStatElements());
687
688 stats.setStatElements( elems );
689
690 return stats;
691 }
692
693
694
695
696
697
698 @Override
699 protected String getDiskLocation()
700 {
701 return dataFile.getFilePath();
702 }
703 }