View Javadoc
1   package org.apache.commons.jcs3.auxiliary.disk.indexed;
2   
3   /*
4    * Licensed to the Apache Software Foundation (ASF) under one
5    * or more contributor license agreements.  See the NOTICE file
6    * distributed with this work for additional information
7    * regarding copyright ownership.  The ASF licenses this file
8    * to you under the Apache License, Version 2.0 (the
9    * "License"); you may not use this file except in compliance
10   * with the License.  You may obtain a copy of the License at
11   *
12   *   http://www.apache.org/licenses/LICENSE-2.0
13   *
14   * Unless required by applicable law or agreed to in writing,
15   * software distributed under the License is distributed on an
16   * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
17   * KIND, either express or implied.  See the License for the
18   * specific language governing permissions and limitations
19   * under the License.
20   */
21  
22  import java.io.File;
23  import java.io.IOException;
24  import java.io.Serializable;
25  import java.nio.file.Files;
26  import java.util.ArrayList;
27  import java.util.Collections;
28  import java.util.Comparator;
29  import java.util.HashMap;
30  import java.util.HashSet;
31  import java.util.LinkedList;
32  import java.util.List;
33  import java.util.Map;
34  import java.util.Set;
35  import java.util.concurrent.ConcurrentSkipListSet;
36  import java.util.concurrent.atomic.AtomicInteger;
37  import java.util.concurrent.atomic.AtomicLong;
38  import java.util.concurrent.locks.ReentrantReadWriteLock;
39  
40  import org.apache.commons.jcs3.auxiliary.AuxiliaryCacheAttributes;
41  import org.apache.commons.jcs3.auxiliary.disk.AbstractDiskCache;
42  import org.apache.commons.jcs3.auxiliary.disk.behavior.IDiskCacheAttributes.DiskLimitType;
43  import org.apache.commons.jcs3.engine.behavior.ICacheElement;
44  import org.apache.commons.jcs3.engine.behavior.IElementSerializer;
45  import org.apache.commons.jcs3.engine.control.group.GroupAttrName;
46  import org.apache.commons.jcs3.engine.control.group.GroupId;
47  import org.apache.commons.jcs3.engine.logging.behavior.ICacheEvent;
48  import org.apache.commons.jcs3.engine.logging.behavior.ICacheEventLogger;
49  import org.apache.commons.jcs3.engine.stats.StatElement;
50  import org.apache.commons.jcs3.engine.stats.Stats;
51  import org.apache.commons.jcs3.engine.stats.behavior.IStatElement;
52  import org.apache.commons.jcs3.engine.stats.behavior.IStats;
53  import org.apache.commons.jcs3.log.Log;
54  import org.apache.commons.jcs3.log.LogManager;
55  import org.apache.commons.jcs3.utils.struct.AbstractLRUMap;
56  import org.apache.commons.jcs3.utils.struct.LRUMap;
57  import org.apache.commons.jcs3.utils.timing.ElapsedTimer;
58  
59  /**
60   * Disk cache that uses a RandomAccessFile with keys stored in memory. The maximum number of keys
61   * stored in memory is configurable. The disk cache tries to recycle spots on disk to limit file
62   * expansion.
63   */
64  public class IndexedDiskCache<K, V> extends AbstractDiskCache<K, V>
65  {
66      /** The logger */
67      private static final Log log = LogManager.getLog(IndexedDiskCache.class);
68  
69      /** Cache name used in log messages */
70      protected final String logCacheName;
71  
72      /** The name of the file where the data is stored */
73      private final String fileName;
74  
75      /** The IndexedDisk manages reads and writes to the data file. */
76      private IndexedDisk dataFile;
77  
78      /** The IndexedDisk manages reads and writes to the key file. */
79      private IndexedDisk keyFile;
80  
81      /** Map containing the keys and disk offsets. */
82      private final Map<K, IndexedDiskElementDescriptor> keyHash;
83  
84      /** The maximum number of keys that we will keep in memory. */
85      private final int maxKeySize;
86  
87      /** A handle on the data file. */
88      private File rafDir;
89  
90      /** Should we keep adding to the recycle bin. False during optimization. */
91      private boolean doRecycle = true;
92  
93      /** Should we optimize real time */
94      private boolean isRealTimeOptimizationEnabled = true;
95  
96      /** Should we optimize on shutdown. */
97      private boolean isShutdownOptimizationEnabled = true;
98  
99      /** are we currently optimizing the files */
100     private boolean isOptimizing;
101 
102     /** The number of times the file has been optimized. */
103     private int timesOptimized;
104 
105     /** The thread optimizing the file. */
106     private volatile Thread currentOptimizationThread;
107 
108     /** used for counting the number of requests */
109     private int removeCount;
110 
111     /** Should we queue puts. True when optimizing. We write the queue post optimization. */
112     private boolean queueInput;
113 
114     /** list where puts made during optimization are made */
115     private final ConcurrentSkipListSet<IndexedDiskElementDescriptor> queuedPutList;
116 
117     /** RECYCLE BIN -- array of empty spots */
118     private final ConcurrentSkipListSet<IndexedDiskElementDescriptor> recycle;
119 
120     /** User configurable parameters */
121     private final IndexedDiskCacheAttributes cattr;
122 
123     /** How many slots have we recycled. */
124     private int recycleCnt;
125 
126     /** How many items were there on startup. */
127     private int startupSize;
128 
129     /** the number of bytes free on disk. */
130     private final AtomicLong bytesFree = new AtomicLong();
131 
132     /** mode we are working on (size or count limited **/
133     private DiskLimitType diskLimitType = DiskLimitType.COUNT;
134 
135     /** simple stat */
136     private final AtomicInteger hitCount = new AtomicInteger(0);
137 
138     /**
139      * Use this lock to synchronize reads and writes to the underlying storage mechanism.
140      */
141     protected ReentrantReadWriteLock storageLock = new ReentrantReadWriteLock();
142 
143     /**
144      * Constructor for the DiskCache object.
145      * <p>
146      *
147      * @param cacheAttributes
148      */
149     public IndexedDiskCache(final IndexedDiskCacheAttributes cacheAttributes)
150     {
151         this(cacheAttributes, null);
152     }
153 
154     /**
155      * Constructor for the DiskCache object.
156      * <p>
157      *
158      * @param cattr
159      * @param elementSerializer
160      *            used if supplied, the super's super will not set a null
161      */
162     public IndexedDiskCache(final IndexedDiskCacheAttributes cattr, final IElementSerializer elementSerializer)
163     {
164         super(cattr);
165 
166         setElementSerializer(elementSerializer);
167 
168         this.cattr = cattr;
169         this.maxKeySize = cattr.getMaxKeySize();
170         this.isRealTimeOptimizationEnabled = cattr.getOptimizeAtRemoveCount() > 0;
171         this.isShutdownOptimizationEnabled = cattr.isOptimizeOnShutdown();
172         this.logCacheName = "Region [" + getCacheName() + "] ";
173         this.diskLimitType = cattr.getDiskLimitType();
174         // Make a clean file name
175         this.fileName = getCacheName().replaceAll("[^a-zA-Z0-9-_\\.]", "_");
176         this.keyHash = createInitialKeyMap();
177         this.queuedPutList = new ConcurrentSkipListSet<>(new PositionComparator());
178         this.recycle = new ConcurrentSkipListSet<>();
179 
180         try
181         {
182             initializeFileSystem(cattr);
183             initializeKeysAndData(cattr);
184 
185             // Initialization finished successfully, so set alive to true.
186             setAlive(true);
187             log.info("{0}: Indexed Disk Cache is alive.", logCacheName);
188 
189             // TODO: Should we improve detection of whether or not the file should be optimized.
190             if (isRealTimeOptimizationEnabled && !keyHash.isEmpty())
191             {
192                 // Kick off a real time optimization, in case we didn't do a final optimization.
193                 doOptimizeRealTime();
194             }
195         }
196         catch (final IOException e)
197         {
198             log.error("{0}: Failure initializing for fileName: {1} and directory: {2}",
199                     logCacheName, fileName, this.rafDir.getAbsolutePath(), e);
200         }
201     }
202 
203     /**
204      * Tries to create the root directory if it does not already exist.
205      * <p>
206      *
207      * @param cattr
208      */
209     private void initializeFileSystem(final IndexedDiskCacheAttributes cattr)
210     {
211         this.rafDir = cattr.getDiskPath();
212         log.info("{0}: Cache file root directory: {1}", logCacheName, rafDir);
213     }
214 
215     /**
216      * Creates the key and data disk caches.
217      * <p>
218      * Loads any keys if they are present and ClearDiskOnStartup is false.
219      * <p>
220      *
221      * @param cattr
222      * @throws IOException
223      */
224     private void initializeKeysAndData(final IndexedDiskCacheAttributes cattr) throws IOException
225     {
226         this.dataFile = new IndexedDisk(new File(rafDir, fileName + ".data"), getElementSerializer());
227         this.keyFile = new IndexedDisk(new File(rafDir, fileName + ".key"), getElementSerializer());
228 
229         if (cattr.isClearDiskOnStartup())
230         {
231             log.info("{0}: ClearDiskOnStartup is set to true. Ignoring any persisted data.",
232                     logCacheName);
233             initializeEmptyStore();
234         }
235         else if (!keyFile.isEmpty())
236         {
237             // If the key file has contents, try to initialize the keys
238             // from it. In no keys are loaded reset the data file.
239             initializeStoreFromPersistedData();
240         }
241         else
242         {
243             // Otherwise start with a new empty map for the keys, and reset
244             // the data file if it has contents.
245             initializeEmptyStore();
246         }
247     }
248 
249     /**
250      * Initializes an empty disk cache.
251      * <p>
252      *
253      * @throws IOException
254      */
255     private void initializeEmptyStore() throws IOException
256     {
257         this.keyHash.clear();
258 
259         if (!dataFile.isEmpty())
260         {
261             dataFile.reset();
262         }
263     }
264 
265     /**
266      * Loads any persisted data and checks for consistency. If there is a consistency issue, the
267      * files are cleared.
268      * <p>
269      *
270      * @throws IOException
271      */
272     private void initializeStoreFromPersistedData() throws IOException
273     {
274         loadKeys();
275 
276         if (keyHash.isEmpty())
277         {
278             dataFile.reset();
279         }
280         else
281         {
282             final boolean isOk = checkKeyDataConsistency(false);
283             if (!isOk)
284             {
285                 keyHash.clear();
286                 keyFile.reset();
287                 dataFile.reset();
288                 log.warn("{0}: Corruption detected. Resetting data and keys files.", logCacheName);
289             }
290             else
291             {
292                 synchronized (this)
293                 {
294                     startupSize = keyHash.size();
295                 }
296             }
297         }
298     }
299 
300     /**
301      * Loads the keys from the .key file. The keys are stored in a HashMap on disk. This is
302      * converted into a LRUMap.
303      */
304     protected void loadKeys()
305     {
306         log.debug("{0}: Loading keys for {1}", () -> logCacheName, () -> keyFile.toString());
307 
308         storageLock.writeLock().lock();
309 
310         try
311         {
312             // clear a key map to use.
313             keyHash.clear();
314 
315             final HashMap<K, IndexedDiskElementDescriptor> keys = keyFile.readObject(
316                 new IndexedDiskElementDescriptor(0, (int) keyFile.length() - IndexedDisk.HEADER_SIZE_BYTES));
317 
318             if (keys != null)
319             {
320                 log.debug("{0}: Found {1} in keys file.", logCacheName, keys.size());
321 
322                 keyHash.putAll(keys);
323 
324                 log.info("{0}: Loaded keys from [{1}], key count: {2}; up to {3} will be available.",
325                         () -> logCacheName, () -> fileName, keyHash::size, () -> maxKeySize);
326             }
327 
328             if (log.isTraceEnabled())
329             {
330                 dump(false);
331             }
332         }
333         catch (final Exception e)
334         {
335             log.error("{0}: Problem loading keys for file {1}", logCacheName, fileName, e);
336         }
337         finally
338         {
339             storageLock.writeLock().unlock();
340         }
341     }
342 
343     /**
344      * Check for minimal consistency between the keys and the datafile. Makes sure no starting
345      * positions in the keys exceed the file length.
346      * <p>
347      * The caller should take the appropriate action if the keys and data are not consistent.
348      *
349      * @param checkForDedOverlaps
350      *            if {@code true}, do a more thorough check by checking for
351      *            data overlap
352      * @return {@code true} if the test passes
353      */
354     private boolean checkKeyDataConsistency(final boolean checkForDedOverlaps)
355     {
356         final ElapsedTimer timer = new ElapsedTimer();
357         log.debug("{0}: Performing inital consistency check", logCacheName);
358 
359         boolean isOk = true;
360         try
361         {
362             final long fileLength = dataFile.length();
363 
364             final IndexedDiskElementDescriptor corruptDed = keyHash.values().stream()
365                 .filter(ded -> ded.pos + IndexedDisk.HEADER_SIZE_BYTES + ded.len > fileLength)
366                 .findFirst()
367                 .orElse(null);
368 
369             if (corruptDed != null)
370             {
371                 isOk = false;
372                 log.warn("{0}: The dataFile is corrupted!\n raf.length() = {1}\n ded.pos = {2}",
373                         logCacheName, fileLength, corruptDed.pos);
374             }
375             else if (checkForDedOverlaps)
376             {
377                 isOk = checkForDedOverlaps(createPositionSortedDescriptorList());
378             }
379         }
380         catch (final IOException e)
381         {
382             log.error(e);
383             isOk = false;
384         }
385 
386         log.info("{0}: Finished inital consistency check, isOk = {1} in {2}",
387                 logCacheName, isOk, timer.getElapsedTimeString());
388 
389         return isOk;
390     }
391 
392     /**
393      * Detects any overlapping elements. This expects a sorted list.
394      * <p>
395      * The total length of an item is IndexedDisk.RECORD_HEADER + ded.len.
396      * <p>
397      *
398      * @param sortedDescriptors
399      * @return false if there are overlaps.
400      */
401     protected boolean checkForDedOverlaps(final IndexedDiskElementDescriptor[] sortedDescriptors)
402     {
403         final ElapsedTimer timer = new ElapsedTimer();
404         boolean isOk = true;
405         long expectedNextPos = 0;
406         for (final IndexedDiskElementDescriptor ded : sortedDescriptors) {
407             if (expectedNextPos > ded.pos)
408             {
409                 log.error("{0}: Corrupt file: overlapping deds {1}", logCacheName, ded);
410                 isOk = false;
411                 break;
412             }
413             expectedNextPos = ded.pos + IndexedDisk.HEADER_SIZE_BYTES + ded.len;
414         }
415         log.debug("{0}: Check for DED overlaps took {1} ms.", () -> logCacheName,
416                 timer::getElapsedTime);
417 
418         return isOk;
419     }
420 
421     /**
422      * Saves key file to disk. This converts the LRUMap to a HashMap for deserialization.
423      */
424     protected void saveKeys()
425     {
426         try
427         {
428             log.info("{0}: Saving keys to: {1}, key count: {2}",
429                     () -> logCacheName, () -> fileName, keyHash::size);
430 
431             keyFile.reset();
432 
433             final HashMap<K, IndexedDiskElementDescriptor> keys = new HashMap<>(keyHash);
434             if (!keys.isEmpty())
435             {
436                 keyFile.writeObject(keys, 0);
437             }
438 
439             log.info("{0}: Finished saving keys.", logCacheName);
440         }
441         catch (final IOException e)
442         {
443             log.error("{0}: Problem storing keys.", logCacheName, e);
444         }
445     }
446 
447     /**
448      * Update the disk cache. Called from the Queue. Makes sure the Item has not been retrieved from
449      * purgatory while in queue for disk. Remove items from purgatory when they go to disk.
450      * <p>
451      *
452      * @param ce
453      *            The ICacheElement&lt;K, V&gt; to put to disk.
454      */
455     @Override
456     protected void processUpdate(final ICacheElement<K, V> ce)
457     {
458         if (!isAlive())
459         {
460             log.error("{0}: No longer alive; aborting put of key = {1}",
461                     () -> logCacheName, ce::getKey);
462             return;
463         }
464 
465         log.debug("{0}: Storing element on disk, key: {1}",
466                 () -> logCacheName, ce::getKey);
467 
468         // old element with same key
469         IndexedDiskElementDescriptor old = null;
470 
471         try
472         {
473             IndexedDiskElementDescriptor ded = null;
474             final byte[] data = getElementSerializer().serialize(ce);
475 
476             // make sure this only locks for one particular cache region
477             storageLock.writeLock().lock();
478             try
479             {
480                 old = keyHash.get(ce.getKey());
481 
482                 // Item with the same key already exists in file.
483                 // Try to reuse the location if possible.
484                 if (old != null && data.length <= old.len)
485                 {
486                     // Reuse the old ded. The defrag relies on ded updates by reference, not
487                     // replacement.
488                     ded = old;
489                     ded.len = data.length;
490                 }
491                 else
492                 {
493                     // we need this to compare in the recycle bin
494                     ded = new IndexedDiskElementDescriptor(dataFile.length(), data.length);
495 
496                     if (doRecycle)
497                     {
498                         final IndexedDiskElementDescriptor rep = recycle.ceiling(ded);
499                         if (rep != null)
500                         {
501                             // remove element from recycle bin
502                             recycle.remove(rep);
503                             ded = rep;
504                             ded.len = data.length;
505                             recycleCnt++;
506                             this.adjustBytesFree(ded, false);
507                             log.debug("{0}: using recycled ded {1} rep.len = {2} ded.len = {3}",
508                                     logCacheName, ded.pos, rep.len, ded.len);
509                         }
510                     }
511 
512                     // Put it in the map
513                     keyHash.put(ce.getKey(), ded);
514 
515                     if (queueInput)
516                     {
517                         queuedPutList.add(ded);
518                         log.debug("{0}: added to queued put list. {1}",
519                                 () -> logCacheName, queuedPutList::size);
520                     }
521 
522                     // add the old slot to the recycle bin
523                     if (old != null)
524                     {
525                         addToRecycleBin(old);
526                     }
527                 }
528 
529                 dataFile.write(ded, data);
530             }
531             finally
532             {
533                 storageLock.writeLock().unlock();
534             }
535 
536             log.debug("{0}: Put to file: {1}, key: {2}, position: {3}, size: {4}",
537                     logCacheName, fileName, ce.getKey(), ded.pos, ded.len);
538         }
539         catch (final IOException e)
540         {
541             log.error("{0}: Failure updating element, key: {1} old: {2}",
542                     logCacheName, ce.getKey(), old, e);
543         }
544     }
545 
546     /**
547      * Gets the key, then goes to disk to get the object.
548      * <p>
549      *
550      * @param key
551      * @return ICacheElement&lt;K, V&gt; or null
552      * @see AbstractDiskCache#doGet
553      */
554     @Override
555     protected ICacheElement<K, V> processGet(final K key)
556     {
557         if (!isAlive())
558         {
559             log.error("{0}: No longer alive so returning null for key = {1}",
560                     logCacheName, key);
561             return null;
562         }
563 
564         log.debug("{0}: Trying to get from disk: {1}", logCacheName, key);
565 
566         ICacheElement<K, V> object = null;
567         try
568         {
569             storageLock.readLock().lock();
570             try
571             {
572                 object = readElement(key);
573             }
574             finally
575             {
576                 storageLock.readLock().unlock();
577             }
578 
579             if (object != null)
580             {
581                 hitCount.incrementAndGet();
582             }
583         }
584         catch (final IOException ioe)
585         {
586             log.error("{0}: Failure getting from disk, key = {1}", logCacheName, key, ioe);
587             reset();
588         }
589         return object;
590     }
591 
592     /**
593      * Gets matching items from the cache.
594      * <p>
595      *
596      * @param pattern
597      * @return a map of K key to ICacheElement&lt;K, V&gt; element, or an empty map if there is no
598      *         data in cache matching keys
599      */
600     @Override
601     public Map<K, ICacheElement<K, V>> processGetMatching(final String pattern)
602     {
603         final Map<K, ICacheElement<K, V>> elements = new HashMap<>();
604         Set<K> keyArray = null;
605         storageLock.readLock().lock();
606         try
607         {
608             keyArray = new HashSet<>(keyHash.keySet());
609         }
610         finally
611         {
612             storageLock.readLock().unlock();
613         }
614 
615         final Set<K> matchingKeys = getKeyMatcher().getMatchingKeysFromArray(pattern, keyArray);
616 
617         for (final K key : matchingKeys)
618         {
619             final ICacheElement<K, V> element = processGet(key);
620             if (element != null)
621             {
622                 elements.put(key, element);
623             }
624         }
625         return elements;
626     }
627 
628     /**
629      * Reads the item from disk.
630      * <p>
631      *
632      * @param key
633      * @return ICacheElement
634      * @throws IOException
635      */
636     private ICacheElement<K, V> readElement(final K key) throws IOException
637     {
638         final IndexedDiskElementDescriptor ded = keyHash.get(key);
639 
640         if (ded != null)
641         {
642             log.debug("{0}: Found on disk, key: ", logCacheName, key);
643 
644             try
645             {
646                 return dataFile.readObject(ded);
647                 // TODO consider checking key equality and throwing if there is a failure
648             }
649             catch (final IOException e)
650             {
651                 log.error("{0}: IO Exception, Problem reading object from file", logCacheName, e);
652                 throw e;
653             }
654             catch (final Exception e)
655             {
656                 log.error("{0}: Exception, Problem reading object from file", logCacheName, e);
657                 throw new IOException(logCacheName + "Problem reading object from disk.", e);
658             }
659         }
660 
661         return null;
662     }
663 
664     /**
665      * Return the keys in this cache.
666      * <p>
667      *
668      * @see org.apache.commons.jcs3.auxiliary.disk.AbstractDiskCache#getKeySet()
669      */
670     @Override
671     public Set<K> getKeySet() throws IOException
672     {
673         final HashSet<K> keys = new HashSet<>();
674 
675         storageLock.readLock().lock();
676 
677         try
678         {
679             keys.addAll(this.keyHash.keySet());
680         }
681         finally
682         {
683             storageLock.readLock().unlock();
684         }
685 
686         return keys;
687     }
688 
689     /**
690      * Returns true if the removal was successful; or false if there is nothing to remove. Current
691      * implementation always result in a disk orphan.
692      * <p>
693      *
694      * @return true if at least one item was removed.
695      * @param key
696      */
697     @Override
698     protected boolean processRemove(final K key)
699     {
700         if (!isAlive())
701         {
702             log.error("{0}: No longer alive so returning false for key = {1}", logCacheName, key);
703             return false;
704         }
705 
706         if (key == null)
707         {
708             return false;
709         }
710 
711         boolean removed = false;
712         try
713         {
714             storageLock.writeLock().lock();
715 
716             if (key instanceof String && key.toString().endsWith(NAME_COMPONENT_DELIMITER))
717             {
718                 removed = performPartialKeyRemoval((String) key);
719             }
720             else if (key instanceof GroupAttrName && ((GroupAttrName<?>) key).attrName == null)
721             {
722                 removed = performGroupRemoval(((GroupAttrName<?>) key).groupId);
723             }
724             else
725             {
726                 removed = performSingleKeyRemoval(key);
727             }
728         }
729         finally
730         {
731             storageLock.writeLock().unlock();
732         }
733 
734         // this increments the remove count.
735         // there is no reason to call this if an item was not removed.
736         if (removed)
737         {
738             doOptimizeRealTime();
739         }
740 
741         return removed;
742     }
743 
744     /**
745      * Iterates over the keyset. Builds a list of matches. Removes all the keys in the list. Does
746      * not remove via the iterator, since the map impl may not support it.
747      * <p>
748      * This operates under a lock obtained in doRemove().
749      * <p>
750      *
751      * @param key
752      * @return true if there was a match
753      */
754     private boolean performPartialKeyRemoval(final String key)
755     {
756         boolean removed = false;
757 
758         // remove all keys of the same name hierarchy.
759         final List<K> itemsToRemove = new LinkedList<>();
760 
761         for (final K k : keyHash.keySet())
762         {
763             if (k instanceof String && k.toString().startsWith(key))
764             {
765                 itemsToRemove.add(k);
766             }
767         }
768 
769         // remove matches.
770         for (final K fullKey : itemsToRemove)
771         {
772             // Don't add to recycle bin here
773             // https://issues.apache.org/jira/browse/JCS-67
774             performSingleKeyRemoval(fullKey);
775             removed = true;
776             // TODO this needs to update the remove count separately
777         }
778 
779         return removed;
780     }
781 
782     /**
783      * Remove all elements from the group. This does not use the iterator to remove. It builds a
784      * list of group elements and then removes them one by one.
785      * <p>
786      * This operates under a lock obtained in doRemove().
787      * <p>
788      *
789      * @param key
790      * @return true if an element was removed
791      */
792     private boolean performGroupRemoval(final GroupId key)
793     {
794         boolean removed = false;
795 
796         // remove all keys of the same name group.
797         final List<K> itemsToRemove = new LinkedList<>();
798 
799         // remove all keys of the same name hierarchy.
800         for (final K k : keyHash.keySet())
801         {
802             if (k instanceof GroupAttrName && ((GroupAttrName<?>) k).groupId.equals(key))
803             {
804                 itemsToRemove.add(k);
805             }
806         }
807 
808         // remove matches.
809         for (final K fullKey : itemsToRemove)
810         {
811             // Don't add to recycle bin here
812             // https://issues.apache.org/jira/browse/JCS-67
813             performSingleKeyRemoval(fullKey);
814             removed = true;
815             // TODO this needs to update the remove count separately
816         }
817 
818         return removed;
819     }
820 
821     /**
822      * Removes an individual key from the cache.
823      * <p>
824      * This operates under a lock obtained in doRemove().
825      * <p>
826      *
827      * @param key
828      * @return true if an item was removed.
829      */
830     private boolean performSingleKeyRemoval(final K key)
831     {
832         final boolean removed;
833         // remove single item.
834         final IndexedDiskElementDescriptor ded = keyHash.remove(key);
835         removed = ded != null;
836         addToRecycleBin(ded);
837 
838         log.debug("{0}: Disk removal: Removed from key hash, key [{1}] removed = {2}",
839                 logCacheName, key, removed);
840         return removed;
841     }
842 
843     /**
844      * Remove all the items from the disk cache by resetting everything.
845      */
846     @Override
847     public void processRemoveAll()
848     {
849         final ICacheEvent<String> cacheEvent =
850                 createICacheEvent(getCacheName(), "all", ICacheEventLogger.REMOVEALL_EVENT);
851         try
852         {
853             reset();
854         }
855         finally
856         {
857             logICacheEvent(cacheEvent);
858         }
859     }
860 
861     /**
862      * Reset effectively clears the disk cache, creating new files, recycle bins, and keymaps.
863      * <p>
864      * It can be used to handle errors by last resort, force content update, or removeall.
865      */
866     private void reset()
867     {
868         log.info("{0}: Resetting cache", logCacheName);
869 
870         try
871         {
872             storageLock.writeLock().lock();
873 
874             if (dataFile != null)
875             {
876                 dataFile.close();
877             }
878 
879             final File dataFileTemp = new File(rafDir, fileName + ".data");
880             Files.delete(dataFileTemp.toPath());
881 
882             if (keyFile != null)
883             {
884                 keyFile.close();
885             }
886             final File keyFileTemp = new File(rafDir, fileName + ".key");
887             Files.delete(keyFileTemp.toPath());
888 
889             dataFile = new IndexedDisk(dataFileTemp, getElementSerializer());
890             keyFile = new IndexedDisk(keyFileTemp, getElementSerializer());
891 
892             this.recycle.clear();
893             this.keyHash.clear();
894         }
895         catch (final IOException e)
896         {
897             log.error("{0}: Failure resetting state", logCacheName, e);
898         }
899         finally
900         {
901             storageLock.writeLock().unlock();
902         }
903     }
904 
905     /**
906      * Create the map for keys that contain the index position on disk.
907      *
908      * @return a new empty Map for keys and IndexedDiskElementDescriptors
909      */
910     private Map<K, IndexedDiskElementDescriptor> createInitialKeyMap()
911     {
912         Map<K, IndexedDiskElementDescriptor> keyMap = null;
913         if (maxKeySize >= 0)
914         {
915             if (this.diskLimitType == DiskLimitType.COUNT)
916             {
917                 keyMap = new LRUMapCountLimited(maxKeySize);
918             }
919             else
920             {
921                 keyMap = new LRUMapSizeLimited(maxKeySize);
922             }
923 
924             log.info("{0}: Set maxKeySize to: \"{1}\"", logCacheName, maxKeySize);
925         }
926         else
927         {
928             // If no max size, use a plain map for memory and processing efficiency.
929             keyMap = new HashMap<>();
930             // keyHash = Collections.synchronizedMap( new HashMap() );
931             log.info("{0}: Set maxKeySize to unlimited", logCacheName);
932         }
933 
934         return keyMap;
935     }
936 
937     /**
938      * Dispose of the disk cache in a background thread. Joins against this thread to put a cap on
939      * the disposal time.
940      * <p>
941      * TODO make dispose window configurable.
942      */
943     @Override
944     public void processDispose()
945     {
946         final ICacheEvent<String> cacheEvent = createICacheEvent(getCacheName(), "none", ICacheEventLogger.DISPOSE_EVENT);
947         try
948         {
949             final Thread t = new Thread(this::disposeInternal, "IndexedDiskCache-DisposalThread");
950             t.start();
951             // wait up to 60 seconds for dispose and then quit if not done.
952             try
953             {
954                 t.join(60 * 1000);
955             }
956             catch (final InterruptedException ex)
957             {
958                 log.error("{0}: Interrupted while waiting for disposal thread to finish.",
959                         logCacheName, ex);
960             }
961         }
962         finally
963         {
964             logICacheEvent(cacheEvent);
965         }
966     }
967 
968     /**
969      * Internal method that handles the disposal.
970      */
971     protected void disposeInternal()
972     {
973         if (!isAlive())
974         {
975             log.error("{0}: Not alive and dispose was called, filename: {1}",
976                     logCacheName, fileName);
977             return;
978         }
979 
980         // Prevents any interaction with the cache while we're shutting down.
981         setAlive(false);
982 
983         final Thread optimizationThread = currentOptimizationThread;
984         if (isRealTimeOptimizationEnabled && optimizationThread != null)
985         {
986             // Join with the current optimization thread.
987             log.debug("{0}: In dispose, optimization already in progress; waiting for completion.",
988                     logCacheName);
989 
990             try
991             {
992                 optimizationThread.join();
993             }
994             catch (final InterruptedException e)
995             {
996                 log.error("{0}: Unable to join current optimization thread.",
997                         logCacheName, e);
998             }
999         }
1000         else if (isShutdownOptimizationEnabled && this.getBytesFree() > 0)
1001         {
1002             optimizeFile();
1003         }
1004 
1005         saveKeys();
1006 
1007         try
1008         {
1009             log.debug("{0}: Closing files, base filename: {1}", logCacheName,
1010                     fileName);
1011             dataFile.close();
1012             dataFile = null;
1013             keyFile.close();
1014             keyFile = null;
1015         }
1016         catch (final IOException e)
1017         {
1018             log.error("{0}: Failure closing files in dispose, filename: {1}",
1019                     logCacheName, fileName, e);
1020         }
1021 
1022         log.info("{0}: Shutdown complete.", logCacheName);
1023     }
1024 
1025     /**
1026      * Add descriptor to recycle bin if it is not null. Adds the length of the item to the bytes
1027      * free.
1028      * <p>
1029      * This is called in three places: (1) When an item is removed. All item removals funnel down to the removeSingleItem method.
1030      * (2) When an item on disk is updated with a value that will not fit in the previous slot. (3) When the max key size is
1031      * reached, the freed slot will be added.
1032      * <p>
1033      *
1034      * @param ded
1035      */
1036     protected void addToRecycleBin(final IndexedDiskElementDescriptor ded)
1037     {
1038         // reuse the spot
1039         if (ded != null)
1040         {
1041             storageLock.readLock().lock();
1042 
1043             try
1044             {
1045                 adjustBytesFree(ded, true);
1046 
1047                 if (doRecycle)
1048                 {
1049                     recycle.add(ded);
1050                     log.debug("{0}: recycled ded {1}", logCacheName, ded);
1051                 }
1052             }
1053             finally
1054             {
1055                 storageLock.readLock().unlock();
1056             }
1057         }
1058     }
1059 
1060     /**
1061      * Performs the check for optimization, and if it is required, do it.
1062      */
1063     protected void doOptimizeRealTime()
1064     {
1065         if (isRealTimeOptimizationEnabled && !isOptimizing
1066             && removeCount++ >= cattr.getOptimizeAtRemoveCount())
1067         {
1068             isOptimizing = true;
1069 
1070             log.info("{0}: Optimizing file. removeCount [{1}] OptimizeAtRemoveCount [{2}]",
1071                     logCacheName, removeCount, cattr.getOptimizeAtRemoveCount());
1072 
1073             if (currentOptimizationThread == null)
1074             {
1075                 storageLock.writeLock().lock();
1076 
1077                 try
1078                 {
1079                     if (currentOptimizationThread == null)
1080                     {
1081                         currentOptimizationThread = new Thread(() -> {
1082                             optimizeFile();
1083                             currentOptimizationThread = null;
1084                         }, "IndexedDiskCache-OptimizationThread");
1085                     }
1086                 }
1087                 finally
1088                 {
1089                     storageLock.writeLock().unlock();
1090                 }
1091 
1092                 if (currentOptimizationThread != null)
1093                 {
1094                     currentOptimizationThread.start();
1095                 }
1096             }
1097         }
1098     }
1099 
1100     /**
1101      * File optimization is handled by this method. It works as follows:
1102      * <ol>
1103      * <li>Shutdown recycling and turn on queuing of puts.</li>
1104      * <li>Take a snapshot of the current descriptors. If there are any removes, ignore them, as they will be compacted during the
1105      * next optimization.</li>
1106      * <li>Optimize the snapshot. For each descriptor:
1107      * <ol>
1108      * <li>Obtain the write-lock.</li>
1109      * <li>Shift the element on the disk, in order to compact out the free space.</li>
1110      * <li>Release the write-lock. This allows elements to still be accessible during optimization.</li>
1111      * </ol>
1112      * </li>
1113      * <li>Obtain the write-lock.</li>
1114      * <li>All queued puts are made at the end of the file. Optimize these under a single write-lock.</li>
1115      * <li>Truncate the file.</li>
1116      * <li>Release the write-lock.</li>
1117      * <li>Restore system to standard operation.</li>
1118      * </ol>
1119      */
1120     protected void optimizeFile()
1121     {
1122         final ElapsedTimer timer = new ElapsedTimer();
1123         timesOptimized++;
1124         log.info("{0}: Beginning Optimization #{1}", logCacheName, timesOptimized);
1125 
1126         // CREATE SNAPSHOT
1127         IndexedDiskElementDescriptor[] defragList = null;
1128 
1129         storageLock.writeLock().lock();
1130 
1131         try
1132         {
1133             queueInput = true;
1134             // shut off recycle while we're optimizing,
1135             doRecycle = false;
1136             defragList = createPositionSortedDescriptorList();
1137         }
1138         finally
1139         {
1140             storageLock.writeLock().unlock();
1141         }
1142 
1143         // Defrag the file outside of the write lock. This allows a move to be made,
1144         // and yet have the element still accessible for reading or writing.
1145         long expectedNextPos = defragFile(defragList, 0);
1146 
1147         // ADD THE QUEUED ITEMS to the end and then truncate
1148         storageLock.writeLock().lock();
1149 
1150         try
1151         {
1152             try
1153             {
1154                 if (!queuedPutList.isEmpty())
1155                 {
1156                     defragList = queuedPutList.toArray(new IndexedDiskElementDescriptor[0]);
1157 
1158                     // pack them at the end
1159                     expectedNextPos = defragFile(defragList, expectedNextPos);
1160                 }
1161                 // TRUNCATE THE FILE
1162                 dataFile.truncate(expectedNextPos);
1163             }
1164             catch (final IOException e)
1165             {
1166                 log.error("{0}: Error optimizing queued puts.", logCacheName, e);
1167             }
1168 
1169             // RESTORE NORMAL OPERATION
1170             removeCount = 0;
1171             resetBytesFree();
1172             this.recycle.clear();
1173             queuedPutList.clear();
1174             queueInput = false;
1175             // turn recycle back on.
1176             doRecycle = true;
1177             isOptimizing = false;
1178         }
1179         finally
1180         {
1181             storageLock.writeLock().unlock();
1182         }
1183 
1184         log.info("{0}: Finished #{1}, Optimization took {2}",
1185                 logCacheName, timesOptimized, timer.getElapsedTimeString());
1186     }
1187 
1188     /**
1189      * Defragments the file in place by compacting out the free space (i.e., moving records
1190      * forward). If there were no gaps the resulting file would be the same size as the previous
1191      * file. This must be supplied an ordered defragList.
1192      * <p>
1193      *
1194      * @param defragList
1195      *            sorted list of descriptors for optimization
1196      * @param startingPos
1197      *            the start position in the file
1198      * @return this is the potential new file end
1199      */
1200     private long defragFile(final IndexedDiskElementDescriptor[] defragList, final long startingPos)
1201     {
1202         final ElapsedTimer timer = new ElapsedTimer();
1203         long preFileSize = 0;
1204         long postFileSize = 0;
1205         long expectedNextPos = 0;
1206         try
1207         {
1208             preFileSize = this.dataFile.length();
1209             // find the first gap in the disk and start defragging.
1210             expectedNextPos = startingPos;
1211             for (final IndexedDiskElementDescriptor element : defragList) {
1212                 storageLock.writeLock().lock();
1213                 try
1214                 {
1215                     if (expectedNextPos != element.pos)
1216                     {
1217                         dataFile.move(element, expectedNextPos);
1218                     }
1219                     expectedNextPos = element.pos + IndexedDisk.HEADER_SIZE_BYTES + element.len;
1220                 }
1221                 finally
1222                 {
1223                     storageLock.writeLock().unlock();
1224                 }
1225             }
1226 
1227             postFileSize = this.dataFile.length();
1228 
1229             // this is the potential new file end
1230             return expectedNextPos;
1231         }
1232         catch (final IOException e)
1233         {
1234             log.error("{0}: Error occurred during defragmentation.", logCacheName, e);
1235         }
1236         finally
1237         {
1238             log.info("{0}: Defragmentation took {1}. File Size (before={2}) (after={3}) (truncating to {4})",
1239                     logCacheName, timer.getElapsedTimeString(), preFileSize, postFileSize, expectedNextPos);
1240         }
1241 
1242         return 0;
1243     }
1244 
1245     /**
1246      * Creates a snapshot of the IndexedDiskElementDescriptors in the keyHash and returns them
1247      * sorted by position in the dataFile.
1248      * <p>
1249      *
1250      * @return IndexedDiskElementDescriptor[]
1251      */
1252     private IndexedDiskElementDescriptor[] createPositionSortedDescriptorList()
1253     {
1254         final List<IndexedDiskElementDescriptor> defragList = new ArrayList<>(keyHash.values());
1255         Collections.sort(defragList, (ded1, ded2) -> Long.compare(ded1.pos, ded2.pos));
1256 
1257         return defragList.toArray(new IndexedDiskElementDescriptor[0]);
1258     }
1259 
1260     /**
1261      * Returns the current cache size.
1262      * <p>
1263      *
1264      * @return The size value
1265      */
1266     @Override
1267     public int getSize()
1268     {
1269         return keyHash.size();
1270     }
1271 
1272     /**
1273      * Returns the size of the recycle bin in number of elements.
1274      * <p>
1275      *
1276      * @return The number of items in the bin.
1277      */
1278     protected int getRecyleBinSize()
1279     {
1280         return this.recycle.size();
1281     }
1282 
1283     /**
1284      * Returns the number of times we have used spots from the recycle bin.
1285      * <p>
1286      *
1287      * @return The number of spots used.
1288      */
1289     protected int getRecyleCount()
1290     {
1291         return this.recycleCnt;
1292     }
1293 
1294     /**
1295      * Returns the number of bytes that are free. When an item is removed, its length is recorded.
1296      * When a spot is used form the recycle bin, the length of the item stored is recorded.
1297      * <p>
1298      *
1299      * @return The number bytes free on the disk file.
1300      */
1301     protected long getBytesFree()
1302     {
1303         return this.bytesFree.get();
1304     }
1305 
1306     /**
1307      * Resets the number of bytes that are free.
1308      */
1309     private void resetBytesFree()
1310     {
1311         this.bytesFree.set(0);
1312     }
1313 
1314     /**
1315      * To subtract you can pass in false for add..
1316      * <p>
1317      *
1318      * @param ded
1319      * @param add
1320      */
1321     private void adjustBytesFree(final IndexedDiskElementDescriptor ded, final boolean add)
1322     {
1323         if (ded != null)
1324         {
1325             final int amount = ded.len + IndexedDisk.HEADER_SIZE_BYTES;
1326 
1327             if (add)
1328             {
1329                 this.bytesFree.addAndGet(amount);
1330             }
1331             else
1332             {
1333                 this.bytesFree.addAndGet(-amount);
1334             }
1335         }
1336     }
1337 
1338     /**
1339      * This is for debugging and testing.
1340      * <p>
1341      *
1342      * @return the length of the data file.
1343      * @throws IOException
1344      */
1345     protected long getDataFileSize() throws IOException
1346     {
1347         long size = 0;
1348 
1349         storageLock.readLock().lock();
1350 
1351         try
1352         {
1353             if (dataFile != null)
1354             {
1355                 size = dataFile.length();
1356             }
1357         }
1358         finally
1359         {
1360             storageLock.readLock().unlock();
1361         }
1362 
1363         return size;
1364     }
1365 
1366     /**
1367      * For debugging. This dumps the values by default.
1368      */
1369     public void dump()
1370     {
1371         dump(true);
1372     }
1373 
1374     /**
1375      * For debugging.
1376      * <p>
1377      *
1378      * @param dumpValues
1379      *            A boolean indicating if values should be dumped.
1380      */
1381     public void dump(final boolean dumpValues)
1382     {
1383         if (log.isTraceEnabled())
1384         {
1385             log.trace("{0}: [dump] Number of keys: {1}", logCacheName, keyHash.size());
1386 
1387             for (final Map.Entry<K, IndexedDiskElementDescriptor> e : keyHash.entrySet())
1388             {
1389                 final K key = e.getKey();
1390                 final IndexedDiskElementDescriptor ded = e.getValue();
1391 
1392                 log.trace("{0}: [dump] Disk element, key: {1}, pos: {2}, len: {3}" +
1393                         (dumpValues ? ", val: " + get(key) : ""),
1394                         logCacheName, key, ded.pos, ded.len);
1395             }
1396         }
1397     }
1398 
1399     /**
1400      * @return Returns the AuxiliaryCacheAttributes.
1401      */
1402     @Override
1403     public AuxiliaryCacheAttributes getAuxiliaryCacheAttributes()
1404     {
1405         return this.cattr;
1406     }
1407 
1408     /**
1409      * Returns info about the disk cache.
1410      * <p>
1411      *
1412      * @see org.apache.commons.jcs3.auxiliary.AuxiliaryCache#getStatistics()
1413      */
1414     @Override
1415     public synchronized IStats getStatistics()
1416     {
1417         final IStats stats = new Stats();
1418         stats.setTypeName("Indexed Disk Cache");
1419 
1420         final ArrayList<IStatElement<?>> elems = new ArrayList<>();
1421 
1422         elems.add(new StatElement<>("Is Alive", Boolean.valueOf(isAlive())));
1423         elems.add(new StatElement<>("Key Map Size", Integer.valueOf(this.keyHash != null ? this.keyHash.size() : -1)));
1424         try
1425         {
1426             elems.add(
1427                     new StatElement<>("Data File Length", Long.valueOf(this.dataFile != null ? this.dataFile.length() : -1L)));
1428         }
1429         catch (final IOException e)
1430         {
1431             log.error(e);
1432         }
1433         elems.add(new StatElement<>("Max Key Size", this.maxKeySize));
1434         elems.add(new StatElement<>("Hit Count", this.hitCount));
1435         elems.add(new StatElement<>("Bytes Free", this.bytesFree));
1436         elems.add(new StatElement<>("Optimize Operation Count", Integer.valueOf(this.removeCount)));
1437         elems.add(new StatElement<>("Times Optimized", Integer.valueOf(this.timesOptimized)));
1438         elems.add(new StatElement<>("Recycle Count", Integer.valueOf(this.recycleCnt)));
1439         elems.add(new StatElement<>("Recycle Bin Size", Integer.valueOf(this.recycle.size())));
1440         elems.add(new StatElement<>("Startup Size", Integer.valueOf(this.startupSize)));
1441 
1442         // get the stats from the super too
1443         final IStats sStats = super.getStatistics();
1444         elems.addAll(sStats.getStatElements());
1445 
1446         stats.setStatElements(elems);
1447 
1448         return stats;
1449     }
1450 
1451     /**
1452      * This is exposed for testing.
1453      * <p>
1454      *
1455      * @return Returns the timesOptimized.
1456      */
1457     protected int getTimesOptimized()
1458     {
1459         return timesOptimized;
1460     }
1461 
1462     /**
1463      * This is used by the event logging.
1464      * <p>
1465      *
1466      * @return the location of the disk, either path or ip.
1467      */
1468     @Override
1469     protected String getDiskLocation()
1470     {
1471         return dataFile.getFilePath();
1472     }
1473 
1474     /**
1475      * Compares IndexedDiskElementDescriptor based on their position.
1476      * <p>
1477      * @deprecated Use lambda instead
1478      */
1479     @Deprecated
1480     protected static final class PositionComparator implements Comparator<IndexedDiskElementDescriptor>, Serializable
1481     {
1482         /** serialVersionUID */
1483         private static final long serialVersionUID = -8387365338590814113L;
1484 
1485         /**
1486          * Compares two descriptors based on position.
1487          * <p>
1488          *
1489          * @see java.util.Comparator#compare(Object, Object)
1490          */
1491         @Override
1492         public int compare(final IndexedDiskElementDescriptor ded1, final IndexedDiskElementDescriptor ded2)
1493         {
1494             return Long.compare(ded1.pos, ded2.pos);
1495         }
1496     }
1497 
1498     /**
1499      * Class for recycling and lru. This implements the LRU overflow callback, so we can add items
1500      * to the recycle bin. This class counts the size element to decide, when to throw away an element
1501      */
1502     public class LRUMapSizeLimited extends AbstractLRUMap<K, IndexedDiskElementDescriptor>
1503     {
1504         /**
1505          * <code>tag</code> tells us which map we are working on.
1506          */
1507         public static final String TAG = "orig";
1508 
1509         // size of the content in kB
1510         private final AtomicInteger contentSize;
1511         private final int maxSize;
1512 
1513         /**
1514          * Default
1515          */
1516         public LRUMapSizeLimited()
1517         {
1518             this(-1);
1519         }
1520 
1521         /**
1522          * @param maxKeySize
1523          */
1524         public LRUMapSizeLimited(final int maxKeySize)
1525         {
1526             this.maxSize = maxKeySize;
1527             this.contentSize = new AtomicInteger(0);
1528         }
1529 
1530         // keep the content size in kB, so 2^31 kB is reasonable value
1531         private void subLengthFromCacheSize(final IndexedDiskElementDescriptor value)
1532         {
1533             contentSize.addAndGet((value.len + IndexedDisk.HEADER_SIZE_BYTES) / -1024 - 1);
1534         }
1535 
1536         // keep the content size in kB, so 2^31 kB is reasonable value
1537         private void addLengthToCacheSize(final IndexedDiskElementDescriptor value)
1538         {
1539             contentSize.addAndGet((value.len + IndexedDisk.HEADER_SIZE_BYTES) / 1024 + 1);
1540         }
1541 
1542         @Override
1543         public IndexedDiskElementDescriptor put(final K key, final IndexedDiskElementDescriptor value)
1544         {
1545             IndexedDiskElementDescriptor oldValue = null;
1546 
1547             try
1548             {
1549                 oldValue = super.put(key, value);
1550             }
1551             finally
1552             {
1553                 // keep the content size in kB, so 2^31 kB is reasonable value
1554                 if (value != null)
1555                 {
1556                     addLengthToCacheSize(value);
1557                 }
1558                 if (oldValue != null)
1559                 {
1560                     subLengthFromCacheSize(oldValue);
1561                 }
1562             }
1563 
1564             return oldValue;
1565         }
1566 
1567         @Override
1568         public IndexedDiskElementDescriptor remove(final Object key)
1569         {
1570             IndexedDiskElementDescriptor value = null;
1571 
1572             try
1573             {
1574                 value = super.remove(key);
1575                 return value;
1576             }
1577             finally
1578             {
1579                 if (value != null)
1580                 {
1581                     subLengthFromCacheSize(value);
1582                 }
1583             }
1584         }
1585 
1586         /**
1587          * This is called when the may key size is reached. The least recently used item will be
1588          * passed here. We will store the position and size of the spot on disk in the recycle bin.
1589          * <p>
1590          *
1591          * @param key
1592          * @param value
1593          */
1594         @Override
1595         protected void processRemovedLRU(final K key, final IndexedDiskElementDescriptor value)
1596         {
1597             if (value != null)
1598             {
1599                 subLengthFromCacheSize(value);
1600             }
1601 
1602             addToRecycleBin(value);
1603 
1604             log.debug("{0}: Removing key: [{1}] from key store.", logCacheName, key);
1605             log.debug("{0}: Key store size: [{1}].", logCacheName, this.size());
1606 
1607             doOptimizeRealTime();
1608         }
1609 
1610         @Override
1611         protected boolean shouldRemove()
1612         {
1613             return maxSize > 0 && contentSize.get() > maxSize && !this.isEmpty();
1614         }
1615     }
1616 
1617     /**
1618      * Class for recycling and lru. This implements the LRU overflow callback, so we can add items
1619      * to the recycle bin. This class counts the elements to decide, when to throw away an element
1620      */
1621 
1622     public class LRUMapCountLimited extends LRUMap<K, IndexedDiskElementDescriptor>
1623     // implements Serializable
1624     {
1625         public LRUMapCountLimited(final int maxKeySize)
1626         {
1627             super(maxKeySize);
1628         }
1629 
1630         /**
1631          * This is called when the may key size is reached. The least recently used item will be
1632          * passed here. We will store the position and size of the spot on disk in the recycle bin.
1633          * <p>
1634          *
1635          * @param key
1636          * @param value
1637          */
1638         @Override
1639         protected void processRemovedLRU(final K key, final IndexedDiskElementDescriptor value)
1640         {
1641             addToRecycleBin(value);
1642             log.debug("{0}: Removing key: [{1}] from key store.", logCacheName, key);
1643             log.debug("{0}: Key store size: [{1}].", logCacheName, this.size());
1644 
1645             doOptimizeRealTime();
1646         }
1647     }
1648 }