1 package org.apache.commons.jcs.auxiliary.disk.indexed;
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22 import java.io.File;
23 import java.io.IOException;
24 import java.io.Serializable;
25 import java.util.ArrayList;
26 import java.util.Arrays;
27 import java.util.Comparator;
28 import java.util.HashMap;
29 import java.util.HashSet;
30 import java.util.Iterator;
31 import java.util.LinkedList;
32 import java.util.List;
33 import java.util.Map;
34 import java.util.Set;
35 import java.util.concurrent.ConcurrentSkipListSet;
36 import java.util.concurrent.atomic.AtomicInteger;
37 import java.util.concurrent.atomic.AtomicLong;
38 import java.util.concurrent.locks.ReentrantReadWriteLock;
39
40 import org.apache.commons.jcs.auxiliary.AuxiliaryCacheAttributes;
41 import org.apache.commons.jcs.auxiliary.disk.AbstractDiskCache;
42 import org.apache.commons.jcs.auxiliary.disk.behavior.IDiskCacheAttributes.DiskLimitType;
43 import org.apache.commons.jcs.engine.CacheConstants;
44 import org.apache.commons.jcs.engine.behavior.ICacheElement;
45 import org.apache.commons.jcs.engine.behavior.IElementSerializer;
46 import org.apache.commons.jcs.engine.control.group.GroupAttrName;
47 import org.apache.commons.jcs.engine.control.group.GroupId;
48 import org.apache.commons.jcs.engine.logging.behavior.ICacheEvent;
49 import org.apache.commons.jcs.engine.logging.behavior.ICacheEventLogger;
50 import org.apache.commons.jcs.engine.stats.StatElement;
51 import org.apache.commons.jcs.engine.stats.Stats;
52 import org.apache.commons.jcs.engine.stats.behavior.IStatElement;
53 import org.apache.commons.jcs.engine.stats.behavior.IStats;
54 import org.apache.commons.jcs.utils.struct.AbstractLRUMap;
55 import org.apache.commons.jcs.utils.struct.LRUMap;
56 import org.apache.commons.jcs.utils.timing.ElapsedTimer;
57 import org.apache.commons.logging.Log;
58 import org.apache.commons.logging.LogFactory;
59
60
61
62
63
64
65 public class IndexedDiskCache<K, V> extends AbstractDiskCache<K, V>
66 {
67
68 private static final Log log = LogFactory.getLog(IndexedDiskCache.class);
69
70
71 protected final String logCacheName;
72
73
74 private final String fileName;
75
76
77 private IndexedDisk dataFile;
78
79
80 private IndexedDisk keyFile;
81
82
83 private Map<K, IndexedDiskElementDescriptor> keyHash;
84
85
86 private final int maxKeySize;
87
88
89 private File rafDir;
90
91
92 private boolean doRecycle = true;
93
94
95 private boolean isRealTimeOptimizationEnabled = true;
96
97
98 private boolean isShutdownOptimizationEnabled = true;
99
100
101 private boolean isOptimizing = false;
102
103
104 private int timesOptimized = 0;
105
106
107 private volatile Thread currentOptimizationThread;
108
109
110 private int removeCount = 0;
111
112
113 private boolean queueInput = false;
114
115
116 private final ConcurrentSkipListSet<IndexedDiskElementDescriptor> queuedPutList =
117 new ConcurrentSkipListSet<IndexedDiskElementDescriptor>(new PositionComparator());
118
119
120 private ConcurrentSkipListSet<IndexedDiskElementDescriptor> recycle;
121
122
123 private final IndexedDiskCacheAttributes cattr;
124
125
126 private int recycleCnt = 0;
127
128
129 private int startupSize = 0;
130
131
132 private AtomicLong bytesFree = new AtomicLong(0);
133
134
135 private DiskLimitType diskLimitType = DiskLimitType.COUNT;
136
137
138 private AtomicInteger hitCount = new AtomicInteger(0);
139
140
141
142
143 protected ReentrantReadWriteLock storageLock = new ReentrantReadWriteLock();
144
145
146
147
148
149
150
151 public IndexedDiskCache(IndexedDiskCacheAttributes cacheAttributes)
152 {
153 this(cacheAttributes, null);
154 }
155
156
157
158
159
160
161
162
163
164 public IndexedDiskCache(IndexedDiskCacheAttributes cattr, IElementSerializer elementSerializer)
165 {
166 super(cattr);
167
168 setElementSerializer(elementSerializer);
169
170 this.cattr = cattr;
171 this.maxKeySize = cattr.getMaxKeySize();
172 this.isRealTimeOptimizationEnabled = cattr.getOptimizeAtRemoveCount() > 0;
173 this.isShutdownOptimizationEnabled = cattr.isOptimizeOnShutdown();
174 this.logCacheName = "Region [" + getCacheName() + "] ";
175 this.diskLimitType = cattr.getDiskLimitType();
176
177 this.fileName = getCacheName().replaceAll("[^a-zA-Z0-9-_\\.]", "_");
178
179 try
180 {
181 initializeFileSystem(cattr);
182
183 initializeKeysAndData(cattr);
184
185 initializeRecycleBin();
186
187
188 setAlive(true);
189 if (log.isInfoEnabled())
190 {
191 log.info(logCacheName + "Indexed Disk Cache is alive.");
192 }
193
194
195 if (isRealTimeOptimizationEnabled && keyHash.size() > 0)
196 {
197
198 doOptimizeRealTime();
199 }
200 }
201 catch (IOException e)
202 {
203 log.error(
204 logCacheName + "Failure initializing for fileName: " + fileName + " and directory: "
205 + this.rafDir.getAbsolutePath(), e);
206 }
207 }
208
209
210
211
212
213
214
215 private void initializeFileSystem(IndexedDiskCacheAttributes cattr)
216 {
217 this.rafDir = cattr.getDiskPath();
218 if (log.isInfoEnabled())
219 {
220 log.info(logCacheName + "Cache file root directory: " + rafDir);
221 }
222 }
223
224
225
226
227
228
229
230
231
232
233 private void initializeKeysAndData(IndexedDiskCacheAttributes cattr) throws IOException
234 {
235 this.dataFile = new IndexedDisk(new File(rafDir, fileName + ".data"), getElementSerializer());
236 this.keyFile = new IndexedDisk(new File(rafDir, fileName + ".key"), getElementSerializer());
237
238 if (cattr.isClearDiskOnStartup())
239 {
240 if (log.isInfoEnabled())
241 {
242 log.info(logCacheName + "ClearDiskOnStartup is set to true. Ingnoring any persisted data.");
243 }
244 initializeEmptyStore();
245 }
246 else if (keyFile.length() > 0)
247 {
248
249
250 initializeStoreFromPersistedData();
251 }
252 else
253 {
254
255
256 initializeEmptyStore();
257 }
258 }
259
260
261
262
263
264
265
266 private void initializeEmptyStore() throws IOException
267 {
268 initializeKeyMap();
269
270 if (dataFile.length() > 0)
271 {
272 dataFile.reset();
273 }
274 }
275
276
277
278
279
280
281
282
283 private void initializeStoreFromPersistedData() throws IOException
284 {
285 loadKeys();
286
287 if (keyHash.isEmpty())
288 {
289 dataFile.reset();
290 }
291 else
292 {
293 boolean isOk = checkKeyDataConsistency(false);
294 if (!isOk)
295 {
296 keyHash.clear();
297 keyFile.reset();
298 dataFile.reset();
299 log.warn(logCacheName + "Corruption detected. Reseting data and keys files.");
300 }
301 else
302 {
303 synchronized (this)
304 {
305 startupSize = keyHash.size();
306 }
307 }
308 }
309 }
310
311
312
313
314
315 protected void loadKeys()
316 {
317 if (log.isDebugEnabled())
318 {
319 log.debug(logCacheName + "Loading keys for " + keyFile.toString());
320 }
321
322 storageLock.writeLock().lock();
323
324 try
325 {
326
327 initializeKeyMap();
328
329 HashMap<K, IndexedDiskElementDescriptor> keys = keyFile.readObject(
330 new IndexedDiskElementDescriptor(0, (int) keyFile.length() - IndexedDisk.HEADER_SIZE_BYTES));
331
332 if (keys != null)
333 {
334 if (log.isDebugEnabled())
335 {
336 log.debug(logCacheName + "Found " + keys.size() + " in keys file.");
337 }
338
339 keyHash.putAll(keys);
340
341 if (log.isInfoEnabled())
342 {
343 log.info(logCacheName + "Loaded keys from [" + fileName + "], key count: " + keyHash.size() + "; up to "
344 + maxKeySize + " will be available.");
345 }
346 }
347
348 if (log.isDebugEnabled())
349 {
350 dump(false);
351 }
352 }
353 catch (Exception e)
354 {
355 log.error(logCacheName + "Problem loading keys for file " + fileName, e);
356 }
357 finally
358 {
359 storageLock.writeLock().unlock();
360 }
361 }
362
363
364
365
366
367
368
369
370
371
372
373
374 private boolean checkKeyDataConsistency(boolean checkForDedOverlaps)
375 {
376 ElapsedTimer timer = new ElapsedTimer();
377 log.debug(logCacheName + "Performing inital consistency check");
378
379 boolean isOk = true;
380 long fileLength = 0;
381 try
382 {
383 fileLength = dataFile.length();
384
385 for (Map.Entry<K, IndexedDiskElementDescriptor> e : keyHash.entrySet())
386 {
387 IndexedDiskElementDescriptor ded = e.getValue();
388
389 isOk = ded.pos + IndexedDisk.HEADER_SIZE_BYTES + ded.len <= fileLength;
390
391 if (!isOk)
392 {
393 log.warn(logCacheName + "The dataFile is corrupted!" + "\n raf.length() = " + fileLength + "\n ded.pos = "
394 + ded.pos);
395 break;
396 }
397 }
398
399 if (isOk && checkForDedOverlaps)
400 {
401 isOk = checkForDedOverlaps(createPositionSortedDescriptorList());
402 }
403 }
404 catch (IOException e)
405 {
406 log.error(e);
407 isOk = false;
408 }
409
410 if (log.isInfoEnabled())
411 {
412 log.info(logCacheName + "Finished inital consistency check, isOk = " + isOk + " in " + timer.getElapsedTimeString());
413 }
414
415 return isOk;
416 }
417
418
419
420
421
422
423
424
425
426
427 protected boolean checkForDedOverlaps(IndexedDiskElementDescriptor[] sortedDescriptors)
428 {
429 long start = System.currentTimeMillis();
430 boolean isOk = true;
431 long expectedNextPos = 0;
432 for (int i = 0; i < sortedDescriptors.length; i++)
433 {
434 IndexedDiskElementDescriptor ded = sortedDescriptors[i];
435 if (expectedNextPos > ded.pos)
436 {
437 log.error(logCacheName + "Corrupt file: overlapping deds " + ded);
438 isOk = false;
439 break;
440 }
441 else
442 {
443 expectedNextPos = ded.pos + IndexedDisk.HEADER_SIZE_BYTES + ded.len;
444 }
445 }
446 long end = System.currentTimeMillis();
447 if (log.isDebugEnabled())
448 {
449 log.debug(logCacheName + "Check for DED overlaps took " + (end - start) + " ms.");
450 }
451
452 return isOk;
453 }
454
455
456
457
458 protected void saveKeys()
459 {
460 try
461 {
462 if (log.isInfoEnabled())
463 {
464 log.info(logCacheName + "Saving keys to: " + fileName + ", key count: " + keyHash.size());
465 }
466
467 keyFile.reset();
468
469 HashMap<K, IndexedDiskElementDescriptor> keys = new HashMap<K, IndexedDiskElementDescriptor>();
470 keys.putAll(keyHash);
471
472 if (keys.size() > 0)
473 {
474 keyFile.writeObject(keys, 0);
475 }
476
477 if (log.isInfoEnabled())
478 {
479 log.info(logCacheName + "Finished saving keys.");
480 }
481 }
482 catch (IOException e)
483 {
484 log.error(logCacheName + "Problem storing keys.", e);
485 }
486 }
487
488
489
490
491
492
493
494
495
496 @Override
497 protected void processUpdate(ICacheElement<K, V> ce)
498 {
499 if (!isAlive())
500 {
501 log.error(logCacheName + "No longer alive; aborting put of key = " + ce.getKey());
502 return;
503 }
504
505 if (log.isDebugEnabled())
506 {
507 log.debug(logCacheName + "Storing element on disk, key: " + ce.getKey());
508 }
509
510 IndexedDiskElementDescriptor ded = null;
511
512
513 IndexedDiskElementDescriptor old = null;
514
515 try
516 {
517 byte[] data = getElementSerializer().serialize(ce);
518
519
520 storageLock.writeLock().lock();
521 try
522 {
523 old = keyHash.get(ce.getKey());
524
525
526
527 if (old != null && data.length <= old.len)
528 {
529
530
531 ded = old;
532 ded.len = data.length;
533 }
534 else
535 {
536
537 ded = new IndexedDiskElementDescriptor(dataFile.length(), data.length);
538
539 if (doRecycle)
540 {
541 IndexedDiskElementDescriptor rep = recycle.ceiling(ded);
542 if (rep != null)
543 {
544
545 recycle.remove(rep);
546 ded = rep;
547 ded.len = data.length;
548 recycleCnt++;
549 this.adjustBytesFree(ded, false);
550 if (log.isDebugEnabled())
551 {
552 log.debug(logCacheName + "using recycled ded " + ded.pos + " rep.len = " + rep.len + " ded.len = "
553 + ded.len);
554 }
555 }
556 }
557
558
559 keyHash.put(ce.getKey(), ded);
560
561 if (queueInput)
562 {
563 queuedPutList.add(ded);
564 if (log.isDebugEnabled())
565 {
566 log.debug(logCacheName + "added to queued put list." + queuedPutList.size());
567 }
568 }
569
570
571 if (old != null)
572 {
573 addToRecycleBin(old);
574 }
575 }
576
577 dataFile.write(ded, data);
578 }
579 finally
580 {
581 storageLock.writeLock().unlock();
582 }
583
584 if (log.isDebugEnabled())
585 {
586 log.debug(logCacheName + "Put to file: " + fileName + ", key: " + ce.getKey() + ", position: " + ded.pos
587 + ", size: " + ded.len);
588 }
589 }
590 catch (IOException e)
591 {
592 log.error(logCacheName + "Failure updating element, key: " + ce.getKey() + " old: " + old, e);
593 }
594 }
595
596
597
598
599
600
601
602
603
604 @Override
605 protected ICacheElement<K, V> processGet(K key)
606 {
607 if (!isAlive())
608 {
609 log.error(logCacheName + "No longer alive so returning null for key = " + key);
610 return null;
611 }
612
613 if (log.isDebugEnabled())
614 {
615 log.debug(logCacheName + "Trying to get from disk: " + key);
616 }
617
618 ICacheElement<K, V> object = null;
619 try
620 {
621 storageLock.readLock().lock();
622 try
623 {
624 object = readElement(key);
625 }
626 finally
627 {
628 storageLock.readLock().unlock();
629 }
630
631 if (object != null)
632 {
633 hitCount.incrementAndGet();
634 }
635 }
636 catch (IOException ioe)
637 {
638 log.error(logCacheName + "Failure getting from disk, key = " + key, ioe);
639 reset();
640 }
641 return object;
642 }
643
644
645
646
647
648
649
650
651
652 @Override
653 public Map<K, ICacheElement<K, V>> processGetMatching(String pattern)
654 {
655 Map<K, ICacheElement<K, V>> elements = new HashMap<K, ICacheElement<K, V>>();
656 Set<K> keyArray = null;
657 storageLock.readLock().lock();
658 try
659 {
660 keyArray = new HashSet<K>(keyHash.keySet());
661 }
662 finally
663 {
664 storageLock.readLock().unlock();
665 }
666
667 Set<K> matchingKeys = getKeyMatcher().getMatchingKeysFromArray(pattern, keyArray);
668
669 for (K key : matchingKeys)
670 {
671 ICacheElement<K, V> element = processGet(key);
672 if (element != null)
673 {
674 elements.put(key, element);
675 }
676 }
677 return elements;
678 }
679
680
681
682
683
684
685
686
687
688 private ICacheElement<K, V> readElement(K key) throws IOException
689 {
690 ICacheElement<K, V> object = null;
691
692 IndexedDiskElementDescriptor ded = keyHash.get(key);
693
694 if (ded != null)
695 {
696 if (log.isDebugEnabled())
697 {
698 log.debug(logCacheName + "Found on disk, key: " + key);
699 }
700 try
701 {
702 ICacheElement<K, V> readObject = dataFile.readObject(ded);
703 object = readObject;
704
705 }
706 catch (IOException e)
707 {
708 log.error(logCacheName + "IO Exception, Problem reading object from file", e);
709 throw e;
710 }
711 catch (Exception e)
712 {
713 log.error(logCacheName + "Exception, Problem reading object from file", e);
714 throw new IOException(logCacheName + "Problem reading object from disk. " + e.getMessage());
715 }
716 }
717
718 return object;
719 }
720
721
722
723
724
725
726
727 @Override
728 public Set<K> getKeySet() throws IOException
729 {
730 HashSet<K> keys = new HashSet<K>();
731
732 storageLock.readLock().lock();
733
734 try
735 {
736 keys.addAll(this.keyHash.keySet());
737 }
738 finally
739 {
740 storageLock.readLock().unlock();
741 }
742
743 return keys;
744 }
745
746
747
748
749
750
751
752
753
754 @Override
755 protected boolean processRemove(K key)
756 {
757 if (!isAlive())
758 {
759 log.error(logCacheName + "No longer alive so returning false for key = " + key);
760 return false;
761 }
762
763 if (key == null)
764 {
765 return false;
766 }
767
768 boolean reset = false;
769 boolean removed = false;
770 try
771 {
772 storageLock.writeLock().lock();
773
774 if (key instanceof String && key.toString().endsWith(CacheConstants.NAME_COMPONENT_DELIMITER))
775 {
776 removed = performPartialKeyRemoval((String) key);
777 }
778 else if (key instanceof GroupAttrName && ((GroupAttrName<?>) key).attrName == null)
779 {
780 removed = performGroupRemoval(((GroupAttrName<?>) key).groupId);
781 }
782 else
783 {
784 removed = performSingleKeyRemoval(key);
785 }
786 }
787 finally
788 {
789 storageLock.writeLock().unlock();
790 }
791
792 if (reset)
793 {
794 reset();
795 }
796
797
798
799 if (removed)
800 {
801 doOptimizeRealTime();
802 }
803
804 return removed;
805 }
806
807
808
809
810
811
812
813
814
815
816
817 private boolean performPartialKeyRemoval(String key)
818 {
819 boolean removed = false;
820
821
822 List<K> itemsToRemove = new LinkedList<K>();
823
824 for (K k : keyHash.keySet())
825 {
826 if (k instanceof String && k.toString().startsWith(key))
827 {
828 itemsToRemove.add(k);
829 }
830 }
831
832
833 for (K fullKey : itemsToRemove)
834 {
835
836
837 performSingleKeyRemoval(fullKey);
838 removed = true;
839
840 }
841
842 return removed;
843 }
844
845
846
847
848
849
850
851
852
853
854
855 private boolean performGroupRemoval(GroupId key)
856 {
857 boolean removed = false;
858
859
860 List<K> itemsToRemove = new LinkedList<K>();
861
862
863 for (K k : keyHash.keySet())
864 {
865 if (k instanceof GroupAttrName && ((GroupAttrName<?>) k).groupId.equals(key))
866 {
867 itemsToRemove.add(k);
868 }
869 }
870
871
872 for (K fullKey : itemsToRemove)
873 {
874
875
876 performSingleKeyRemoval(fullKey);
877 removed = true;
878
879 }
880
881 return removed;
882 }
883
884
885
886
887
888
889
890
891
892
893 private boolean performSingleKeyRemoval(K key)
894 {
895 boolean removed;
896
897 IndexedDiskElementDescriptor ded = keyHash.remove(key);
898 removed = ded != null;
899 addToRecycleBin(ded);
900
901 if (log.isDebugEnabled())
902 {
903 log.debug(logCacheName + "Disk removal: Removed from key hash, key [" + key + "] removed = " + removed);
904 }
905 return removed;
906 }
907
908
909
910
911 @Override
912 public void processRemoveAll()
913 {
914 ICacheEvent<String> cacheEvent = createICacheEvent(getCacheName(), "all", ICacheEventLogger.REMOVEALL_EVENT);
915 try
916 {
917 reset();
918 }
919 finally
920 {
921 logICacheEvent(cacheEvent);
922 }
923 }
924
925
926
927
928
929
930 private void reset()
931 {
932 if (log.isWarnEnabled())
933 {
934 log.warn(logCacheName + "Resetting cache");
935 }
936
937 try
938 {
939 storageLock.writeLock().lock();
940
941 if (dataFile != null)
942 {
943 dataFile.close();
944 }
945 File dataFileTemp = new File(rafDir, fileName + ".data");
946 boolean result = dataFileTemp.delete();
947 if (!result && log.isDebugEnabled())
948 {
949 log.debug("Could not delete file " + dataFileTemp);
950 }
951
952 if (keyFile != null)
953 {
954 keyFile.close();
955 }
956 File keyFileTemp = new File(rafDir, fileName + ".key");
957 result = keyFileTemp.delete();
958 if (!result && log.isDebugEnabled())
959 {
960 log.debug("Could not delete file " + keyFileTemp);
961 }
962
963 dataFile = new IndexedDisk(new File(rafDir, fileName + ".data"), getElementSerializer());
964 keyFile = new IndexedDisk(new File(rafDir, fileName + ".key"), getElementSerializer());
965
966 initializeRecycleBin();
967
968 initializeKeyMap();
969 }
970 catch (IOException e)
971 {
972 log.error(logCacheName + "Failure reseting state", e);
973 }
974 finally
975 {
976 storageLock.writeLock().unlock();
977 }
978 }
979
980
981
982
983
984 private void initializeRecycleBin()
985 {
986 recycle = new ConcurrentSkipListSet<IndexedDiskElementDescriptor>();
987 }
988
989
990
991
992 private void initializeKeyMap()
993 {
994 keyHash = null;
995 if (maxKeySize >= 0)
996 {
997 if (this.diskLimitType == DiskLimitType.COUNT)
998 {
999 keyHash = new LRUMapCountLimited(maxKeySize);
1000 }
1001 else
1002 {
1003 keyHash = new LRUMapSizeLimited(maxKeySize);
1004 }
1005
1006 if (log.isInfoEnabled())
1007 {
1008 log.info(logCacheName + "Set maxKeySize to: '" + maxKeySize + "'");
1009 }
1010 }
1011 else
1012 {
1013
1014 keyHash = new HashMap<K, IndexedDiskElementDescriptor>();
1015
1016 if (log.isInfoEnabled())
1017 {
1018 log.info(logCacheName + "Set maxKeySize to unlimited'");
1019 }
1020 }
1021 }
1022
1023
1024
1025
1026
1027
1028
1029 @Override
1030 public void processDispose()
1031 {
1032 ICacheEvent<String> cacheEvent = createICacheEvent(getCacheName(), "none", ICacheEventLogger.DISPOSE_EVENT);
1033 try
1034 {
1035 Runnable disR = new Runnable()
1036 {
1037 @Override
1038 public void run()
1039 {
1040 disposeInternal();
1041 }
1042 };
1043 Thread t = new Thread(disR, "IndexedDiskCache-DisposalThread");
1044 t.start();
1045
1046 try
1047 {
1048 t.join(60 * 1000);
1049 }
1050 catch (InterruptedException ex)
1051 {
1052 log.error(logCacheName + "Interrupted while waiting for disposal thread to finish.", ex);
1053 }
1054 }
1055 finally
1056 {
1057 logICacheEvent(cacheEvent);
1058 }
1059 }
1060
1061
1062
1063
1064 protected void disposeInternal()
1065 {
1066 if (!isAlive())
1067 {
1068 log.error(logCacheName + "Not alive and dispose was called, filename: " + fileName);
1069 return;
1070 }
1071
1072
1073 setAlive(false);
1074
1075 Thread optimizationThread = currentOptimizationThread;
1076 if (isRealTimeOptimizationEnabled && optimizationThread != null)
1077 {
1078
1079 if (log.isDebugEnabled())
1080 {
1081 log.debug(logCacheName + "In dispose, optimization already " + "in progress; waiting for completion.");
1082 }
1083 try
1084 {
1085 optimizationThread.join();
1086 }
1087 catch (InterruptedException e)
1088 {
1089 log.error(logCacheName + "Unable to join current optimization thread.", e);
1090 }
1091 }
1092 else if (isShutdownOptimizationEnabled && this.getBytesFree() > 0)
1093 {
1094 optimizeFile();
1095 }
1096
1097 saveKeys();
1098
1099 try
1100 {
1101 if (log.isDebugEnabled())
1102 {
1103 log.debug(logCacheName + "Closing files, base filename: " + fileName);
1104 }
1105 dataFile.close();
1106 dataFile = null;
1107 keyFile.close();
1108 keyFile = null;
1109 }
1110 catch (IOException e)
1111 {
1112 log.error(logCacheName + "Failure closing files in dispose, filename: " + fileName, e);
1113 }
1114
1115 if (log.isInfoEnabled())
1116 {
1117 log.info(logCacheName + "Shutdown complete.");
1118 }
1119 }
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132 protected void addToRecycleBin(IndexedDiskElementDescriptor ded)
1133 {
1134
1135 if (ded != null)
1136 {
1137 storageLock.readLock().lock();
1138
1139 try
1140 {
1141 this.adjustBytesFree(ded, true);
1142
1143 if (doRecycle)
1144 {
1145 recycle.add(ded);
1146 if (log.isDebugEnabled())
1147 {
1148 log.debug(logCacheName + "recycled ded" + ded);
1149 }
1150
1151 }
1152 }
1153 finally
1154 {
1155 storageLock.readLock().unlock();
1156 }
1157 }
1158 }
1159
1160
1161
1162
1163 protected void doOptimizeRealTime()
1164 {
1165 if (isRealTimeOptimizationEnabled && !isOptimizing && removeCount++ >= cattr.getOptimizeAtRemoveCount())
1166 {
1167 isOptimizing = true;
1168
1169 if (log.isInfoEnabled())
1170 {
1171 log.info(logCacheName + "Optimizing file. removeCount [" + removeCount + "] OptimizeAtRemoveCount ["
1172 + cattr.getOptimizeAtRemoveCount() + "]");
1173 }
1174
1175 if (currentOptimizationThread == null)
1176 {
1177 storageLock.writeLock().lock();
1178
1179 try
1180 {
1181 if (currentOptimizationThread == null)
1182 {
1183 currentOptimizationThread = new Thread(new Runnable()
1184 {
1185 @Override
1186 public void run()
1187 {
1188 optimizeFile();
1189
1190 currentOptimizationThread = null;
1191 }
1192 }, "IndexedDiskCache-OptimizationThread");
1193 }
1194 }
1195 finally
1196 {
1197 storageLock.writeLock().unlock();
1198 }
1199
1200 if (currentOptimizationThread != null)
1201 {
1202 currentOptimizationThread.start();
1203 }
1204 }
1205 }
1206 }
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228 protected void optimizeFile()
1229 {
1230 ElapsedTimer timer = new ElapsedTimer();
1231 timesOptimized++;
1232 if (log.isInfoEnabled())
1233 {
1234 log.info(logCacheName + "Beginning Optimization #" + timesOptimized);
1235 }
1236
1237
1238 IndexedDiskElementDescriptor[] defragList = null;
1239
1240 storageLock.writeLock().lock();
1241
1242 try
1243 {
1244 queueInput = true;
1245
1246 doRecycle = false;
1247 defragList = createPositionSortedDescriptorList();
1248 }
1249 finally
1250 {
1251
1252 storageLock.writeLock().unlock();
1253 }
1254
1255
1256
1257 long expectedNextPos = defragFile(defragList, 0);
1258
1259
1260 storageLock.writeLock().lock();
1261
1262 try
1263 {
1264 try
1265 {
1266 if (!queuedPutList.isEmpty())
1267 {
1268 defragList = queuedPutList.toArray(new IndexedDiskElementDescriptor[queuedPutList.size()]);
1269
1270
1271 expectedNextPos = defragFile(defragList, expectedNextPos);
1272 }
1273
1274 dataFile.truncate(expectedNextPos);
1275 }
1276 catch (IOException e)
1277 {
1278 log.error(logCacheName + "Error optimizing queued puts.", e);
1279 }
1280
1281
1282 removeCount = 0;
1283 resetBytesFree();
1284 initializeRecycleBin();
1285 queuedPutList.clear();
1286 queueInput = false;
1287
1288 doRecycle = true;
1289 isOptimizing = false;
1290 }
1291 finally
1292 {
1293 storageLock.writeLock().unlock();
1294 }
1295
1296 if (log.isInfoEnabled())
1297 {
1298 log.info(logCacheName + "Finished #" + timesOptimized + " Optimization took " + timer.getElapsedTimeString());
1299 }
1300 }
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314 private long defragFile(IndexedDiskElementDescriptor[] defragList, long startingPos)
1315 {
1316 ElapsedTimer timer = new ElapsedTimer();
1317 long preFileSize = 0;
1318 long postFileSize = 0;
1319 long expectedNextPos = 0;
1320 try
1321 {
1322 preFileSize = this.dataFile.length();
1323
1324 expectedNextPos = startingPos;
1325 for (int i = 0; i < defragList.length; i++)
1326 {
1327 storageLock.writeLock().lock();
1328 try
1329 {
1330 if (expectedNextPos != defragList[i].pos)
1331 {
1332 dataFile.move(defragList[i], expectedNextPos);
1333 }
1334 expectedNextPos = defragList[i].pos + IndexedDisk.HEADER_SIZE_BYTES + defragList[i].len;
1335 }
1336 finally
1337 {
1338 storageLock.writeLock().unlock();
1339 }
1340 }
1341
1342 postFileSize = this.dataFile.length();
1343
1344
1345 return expectedNextPos;
1346 }
1347 catch (IOException e)
1348 {
1349 log.error(logCacheName + "Error occurred during defragmentation.", e);
1350 }
1351 finally
1352 {
1353 if (log.isInfoEnabled())
1354 {
1355 log.info(logCacheName + "Defragmentation took " + timer.getElapsedTimeString() + ". File Size (before="
1356 + preFileSize + ") (after=" + postFileSize + ") (truncating to " + expectedNextPos + ")");
1357 }
1358 }
1359
1360 return 0;
1361 }
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372 private IndexedDiskElementDescriptor[] createPositionSortedDescriptorList()
1373 {
1374 IndexedDiskElementDescriptor[] defragList = new IndexedDiskElementDescriptor[keyHash.size()];
1375 Iterator<Map.Entry<K, IndexedDiskElementDescriptor>> iterator = keyHash.entrySet().iterator();
1376 for (int i = 0; iterator.hasNext(); i++)
1377 {
1378 Map.Entry<K, IndexedDiskElementDescriptor> next = iterator.next();
1379 defragList[i] = next.getValue();
1380 }
1381
1382 Arrays.sort(defragList, new PositionComparator());
1383
1384 return defragList;
1385 }
1386
1387
1388
1389
1390
1391
1392
1393 @Override
1394 public int getSize()
1395 {
1396 return keyHash.size();
1397 }
1398
1399
1400
1401
1402
1403
1404
1405 protected int getRecyleBinSize()
1406 {
1407 return this.recycle.size();
1408 }
1409
1410
1411
1412
1413
1414
1415
1416 protected int getRecyleCount()
1417 {
1418 return this.recycleCnt;
1419 }
1420
1421
1422
1423
1424
1425
1426
1427
1428 protected long getBytesFree()
1429 {
1430 return this.bytesFree.get();
1431 }
1432
1433
1434
1435
1436 private void resetBytesFree()
1437 {
1438 this.bytesFree.set(0);
1439 }
1440
1441
1442
1443
1444
1445
1446
1447
1448 private void adjustBytesFree(IndexedDiskElementDescriptor ded, boolean add)
1449 {
1450 if (ded != null)
1451 {
1452 int amount = ded.len + IndexedDisk.HEADER_SIZE_BYTES;
1453
1454 if (add)
1455 {
1456 this.bytesFree.addAndGet(amount);
1457 }
1458 else
1459 {
1460 this.bytesFree.addAndGet(-amount);
1461 }
1462 }
1463 }
1464
1465
1466
1467
1468
1469
1470
1471
1472 protected long getDataFileSize() throws IOException
1473 {
1474 long size = 0;
1475
1476 storageLock.readLock().lock();
1477
1478 try
1479 {
1480 if (dataFile != null)
1481 {
1482 size = dataFile.length();
1483 }
1484 }
1485 finally
1486 {
1487 storageLock.readLock().unlock();
1488 }
1489
1490 return size;
1491 }
1492
1493
1494
1495
1496 public void dump()
1497 {
1498 dump(true);
1499 }
1500
1501
1502
1503
1504
1505
1506
1507
1508 public void dump(boolean dumpValues)
1509 {
1510 if (log.isDebugEnabled())
1511 {
1512 log.debug(logCacheName + "[dump] Number of keys: " + keyHash.size());
1513
1514 for (Map.Entry<K, IndexedDiskElementDescriptor> e : keyHash.entrySet())
1515 {
1516 K key = e.getKey();
1517 IndexedDiskElementDescriptor ded = e.getValue();
1518
1519 log.debug(logCacheName + "[dump] Disk element, key: " + key + ", pos: " + ded.pos + ", ded.len" + ded.len
1520 + (dumpValues ? ", val: " + get(key) : ""));
1521 }
1522 }
1523 }
1524
1525
1526
1527
1528 @Override
1529 public AuxiliaryCacheAttributes getAuxiliaryCacheAttributes()
1530 {
1531 return this.cattr;
1532 }
1533
1534
1535
1536
1537
1538
1539
1540 @Override
1541 public synchronized IStats getStatistics()
1542 {
1543 IStats stats = new Stats();
1544 stats.setTypeName("Indexed Disk Cache");
1545
1546 ArrayList<IStatElement<?>> elems = new ArrayList<IStatElement<?>>();
1547
1548 elems.add(new StatElement<Boolean>("Is Alive", Boolean.valueOf(isAlive())));
1549 elems.add(new StatElement<Integer>("Key Map Size", Integer.valueOf(this.keyHash != null ? this.keyHash.size() : -1)));
1550 try
1551 {
1552 elems
1553 .add(new StatElement<Long>("Data File Length", Long.valueOf(this.dataFile != null ? this.dataFile.length() : -1L)));
1554 }
1555 catch (IOException e)
1556 {
1557 log.error(e);
1558 }
1559 elems.add(new StatElement<Integer>("Max Key Size", this.maxKeySize));
1560 elems.add(new StatElement<AtomicInteger>("Hit Count", this.hitCount));
1561 elems.add(new StatElement<AtomicLong>("Bytes Free", this.bytesFree));
1562 elems.add(new StatElement<Integer>("Optimize Operation Count", Integer.valueOf(this.removeCount)));
1563 elems.add(new StatElement<Integer>("Times Optimized", Integer.valueOf(this.timesOptimized)));
1564 elems.add(new StatElement<Integer>("Recycle Count", Integer.valueOf(this.recycleCnt)));
1565 elems.add(new StatElement<Integer>("Recycle Bin Size", Integer.valueOf(this.recycle.size())));
1566 elems.add(new StatElement<Integer>("Startup Size", Integer.valueOf(this.startupSize)));
1567
1568
1569 IStats sStats = super.getStatistics();
1570 elems.addAll(sStats.getStatElements());
1571
1572 stats.setStatElements(elems);
1573
1574 return stats;
1575 }
1576
1577
1578
1579
1580
1581
1582
1583 protected int getTimesOptimized()
1584 {
1585 return timesOptimized;
1586 }
1587
1588
1589
1590
1591
1592
1593
1594 @Override
1595 protected String getDiskLocation()
1596 {
1597 return dataFile.getFilePath();
1598 }
1599
1600
1601
1602
1603
1604 protected static final class PositionComparator implements Comparator<IndexedDiskElementDescriptor>, Serializable
1605 {
1606
1607 private static final long serialVersionUID = -8387365338590814113L;
1608
1609
1610
1611
1612
1613
1614
1615 @Override
1616 public int compare(IndexedDiskElementDescriptor ded1, IndexedDiskElementDescriptor ded2)
1617 {
1618 if (ded1.pos < ded2.pos)
1619 {
1620 return -1;
1621 }
1622 else if (ded1.pos == ded2.pos)
1623 {
1624 return 0;
1625 }
1626 else
1627 {
1628 return 1;
1629 }
1630 }
1631 }
1632
1633
1634
1635
1636
1637 public class LRUMapSizeLimited extends AbstractLRUMap<K, IndexedDiskElementDescriptor>
1638 {
1639
1640
1641
1642 public static final String TAG = "orig";
1643
1644
1645 private AtomicInteger contentSize;
1646 private int maxSize;
1647
1648
1649
1650
1651 public LRUMapSizeLimited()
1652 {
1653 this(-1);
1654 }
1655
1656
1657
1658
1659 public LRUMapSizeLimited(int maxKeySize)
1660 {
1661 super();
1662 this.maxSize = maxKeySize;
1663 this.contentSize = new AtomicInteger(0);
1664 }
1665
1666
1667 private void subLengthFromCacheSize(IndexedDiskElementDescriptor value)
1668 {
1669 contentSize.addAndGet((value.len + IndexedDisk.HEADER_SIZE_BYTES) / -1024 - 1);
1670 }
1671
1672
1673 private void addLengthToCacheSize(IndexedDiskElementDescriptor value)
1674 {
1675 contentSize.addAndGet((value.len + IndexedDisk.HEADER_SIZE_BYTES) / 1024 + 1);
1676 }
1677
1678 @Override
1679 public IndexedDiskElementDescriptor put(K key, IndexedDiskElementDescriptor value)
1680 {
1681 IndexedDiskElementDescriptor oldValue = null;
1682
1683 try
1684 {
1685 oldValue = super.put(key, value);
1686 }
1687 finally
1688 {
1689
1690 if (value != null)
1691 {
1692 addLengthToCacheSize(value);
1693 }
1694 if (oldValue != null)
1695 {
1696 subLengthFromCacheSize(oldValue);
1697 }
1698 }
1699
1700 return oldValue;
1701 }
1702
1703 @Override
1704 public IndexedDiskElementDescriptor remove(Object key)
1705 {
1706 IndexedDiskElementDescriptor value = null;
1707
1708 try
1709 {
1710 value = super.remove(key);
1711 return value;
1712 }
1713 finally
1714 {
1715 if (value != null)
1716 {
1717 subLengthFromCacheSize(value);
1718 }
1719 }
1720 }
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730 @Override
1731 protected void processRemovedLRU(K key, IndexedDiskElementDescriptor value)
1732 {
1733 if (value != null)
1734 {
1735 subLengthFromCacheSize(value);
1736 }
1737
1738 addToRecycleBin(value);
1739
1740 if (log.isDebugEnabled())
1741 {
1742 log.debug(logCacheName + "Removing key: [" + key + "] from key store.");
1743 log.debug(logCacheName + "Key store size: [" + this.size() + "].");
1744 }
1745
1746 doOptimizeRealTime();
1747 }
1748
1749 @Override
1750 protected boolean shouldRemove()
1751 {
1752 return maxSize > 0 && contentSize.get() > maxSize && this.size() > 0;
1753 }
1754 }
1755
1756
1757
1758
1759
1760
1761 public class LRUMapCountLimited extends LRUMap<K, IndexedDiskElementDescriptor>
1762
1763 {
1764 public LRUMapCountLimited(int maxKeySize)
1765 {
1766 super(maxKeySize);
1767 }
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777 @Override
1778 protected void processRemovedLRU(K key, IndexedDiskElementDescriptor value)
1779 {
1780 addToRecycleBin(value);
1781 if (log.isDebugEnabled())
1782 {
1783 log.debug(logCacheName + "Removing key: [" + key + "] from key store.");
1784 log.debug(logCacheName + "Key store size: [" + this.size() + "].");
1785 }
1786
1787 doOptimizeRealTime();
1788 }
1789 }
1790 }