001package org.apache.commons.jcs.auxiliary.disk.indexed;
002
003/*
004 * Licensed to the Apache Software Foundation (ASF) under one
005 * or more contributor license agreements.  See the NOTICE file
006 * distributed with this work for additional information
007 * regarding copyright ownership.  The ASF licenses this file
008 * to you under the Apache License, Version 2.0 (the
009 * "License"); you may not use this file except in compliance
010 * with the License.  You may obtain a copy of the License at
011 *
012 *   http://www.apache.org/licenses/LICENSE-2.0
013 *
014 * Unless required by applicable law or agreed to in writing,
015 * software distributed under the License is distributed on an
016 * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
017 * KIND, either express or implied.  See the License for the
018 * specific language governing permissions and limitations
019 * under the License.
020 */
021
022import java.io.File;
023import java.io.IOException;
024import java.io.Serializable;
025import java.util.ArrayList;
026import java.util.Arrays;
027import java.util.Comparator;
028import java.util.HashMap;
029import java.util.HashSet;
030import java.util.Iterator;
031import java.util.LinkedList;
032import java.util.List;
033import java.util.Map;
034import java.util.Set;
035import java.util.concurrent.ConcurrentSkipListSet;
036import java.util.concurrent.atomic.AtomicInteger;
037import java.util.concurrent.atomic.AtomicLong;
038import java.util.concurrent.locks.ReentrantReadWriteLock;
039
040import org.apache.commons.jcs.auxiliary.AuxiliaryCacheAttributes;
041import org.apache.commons.jcs.auxiliary.disk.AbstractDiskCache;
042import org.apache.commons.jcs.auxiliary.disk.behavior.IDiskCacheAttributes.DiskLimitType;
043import org.apache.commons.jcs.engine.CacheConstants;
044import org.apache.commons.jcs.engine.behavior.ICacheElement;
045import org.apache.commons.jcs.engine.behavior.IElementSerializer;
046import org.apache.commons.jcs.engine.control.group.GroupAttrName;
047import org.apache.commons.jcs.engine.control.group.GroupId;
048import org.apache.commons.jcs.engine.logging.behavior.ICacheEvent;
049import org.apache.commons.jcs.engine.logging.behavior.ICacheEventLogger;
050import org.apache.commons.jcs.engine.stats.StatElement;
051import org.apache.commons.jcs.engine.stats.Stats;
052import org.apache.commons.jcs.engine.stats.behavior.IStatElement;
053import org.apache.commons.jcs.engine.stats.behavior.IStats;
054import org.apache.commons.jcs.utils.struct.AbstractLRUMap;
055import org.apache.commons.jcs.utils.struct.LRUMap;
056import org.apache.commons.jcs.utils.timing.ElapsedTimer;
057import org.apache.commons.logging.Log;
058import org.apache.commons.logging.LogFactory;
059
060/**
061 * Disk cache that uses a RandomAccessFile with keys stored in memory. The maximum number of keys
062 * stored in memory is configurable. The disk cache tries to recycle spots on disk to limit file
063 * expansion.
064 */
065public class IndexedDiskCache<K, V> extends AbstractDiskCache<K, V>
066{
067    /** The logger */
068    private static final Log log = LogFactory.getLog(IndexedDiskCache.class);
069
070    /** Cache name used in log messages */
071    protected final String logCacheName;
072
073    /** The name of the file where the data is stored */
074    private final String fileName;
075
076    /** The IndexedDisk manages reads and writes to the data file. */
077    private IndexedDisk dataFile;
078
079    /** The IndexedDisk manages reads and writes to the key file. */
080    private IndexedDisk keyFile;
081
082    /** Map containing the keys and disk offsets. */
083    private Map<K, IndexedDiskElementDescriptor> keyHash;
084
085    /** The maximum number of keys that we will keep in memory. */
086    private final int maxKeySize;
087
088    /** A handle on the data file. */
089    private File rafDir;
090
091    /** Should we keep adding to the recycle bin. False during optimization. */
092    private boolean doRecycle = true;
093
094    /** Should we optimize real time */
095    private boolean isRealTimeOptimizationEnabled = true;
096
097    /** Should we optimize on shutdown. */
098    private boolean isShutdownOptimizationEnabled = true;
099
100    /** are we currently optimizing the files */
101    private boolean isOptimizing = false;
102
103    /** The number of times the file has been optimized. */
104    private int timesOptimized = 0;
105
106    /** The thread optimizing the file. */
107    private volatile Thread currentOptimizationThread;
108
109    /** used for counting the number of requests */
110    private int removeCount = 0;
111
112    /** Should we queue puts. True when optimizing. We write the queue post optimization. */
113    private boolean queueInput = false;
114
115    /** list where puts made during optimization are made */
116    private final ConcurrentSkipListSet<IndexedDiskElementDescriptor> queuedPutList =
117            new ConcurrentSkipListSet<IndexedDiskElementDescriptor>(new PositionComparator());
118
119    /** RECYLCE BIN -- array of empty spots */
120    private ConcurrentSkipListSet<IndexedDiskElementDescriptor> recycle;
121
122    /** User configurable parameters */
123    private final IndexedDiskCacheAttributes cattr;
124
125    /** How many slots have we recycled. */
126    private int recycleCnt = 0;
127
128    /** How many items were there on startup. */
129    private int startupSize = 0;
130
131    /** the number of bytes free on disk. */
132    private AtomicLong bytesFree = new AtomicLong(0);
133
134    /** mode we are working on (size or count limited **/
135    private DiskLimitType diskLimitType = DiskLimitType.COUNT;
136
137    /** simple stat */
138    private AtomicInteger hitCount = new AtomicInteger(0);
139
140    /**
141     * Use this lock to synchronize reads and writes to the underlying storage mechanism.
142     */
143    protected ReentrantReadWriteLock storageLock = new ReentrantReadWriteLock();
144
145    /**
146     * Constructor for the DiskCache object.
147     * <p>
148     *
149     * @param cacheAttributes
150     */
151    public IndexedDiskCache(IndexedDiskCacheAttributes cacheAttributes)
152    {
153        this(cacheAttributes, null);
154    }
155
156    /**
157     * Constructor for the DiskCache object.
158     * <p>
159     *
160     * @param cattr
161     * @param elementSerializer
162     *            used if supplied, the super's super will not set a null
163     */
164    public IndexedDiskCache(IndexedDiskCacheAttributes cattr, IElementSerializer elementSerializer)
165    {
166        super(cattr);
167
168        setElementSerializer(elementSerializer);
169
170        this.cattr = cattr;
171        this.maxKeySize = cattr.getMaxKeySize();
172        this.isRealTimeOptimizationEnabled = cattr.getOptimizeAtRemoveCount() > 0;
173        this.isShutdownOptimizationEnabled = cattr.isOptimizeOnShutdown();
174        this.logCacheName = "Region [" + getCacheName() + "] ";
175        this.diskLimitType = cattr.getDiskLimitType();
176        // Make a clean file name
177        this.fileName = getCacheName().replaceAll("[^a-zA-Z0-9-_\\.]", "_");
178
179        try
180        {
181            initializeFileSystem(cattr);
182
183            initializeKeysAndData(cattr);
184
185            initializeRecycleBin();
186
187            // Initialization finished successfully, so set alive to true.
188            setAlive(true);
189            if (log.isInfoEnabled())
190            {
191                log.info(logCacheName + "Indexed Disk Cache is alive.");
192            }
193
194            // TODO: Should we improve detection of whether or not the file should be optimized.
195            if (isRealTimeOptimizationEnabled && keyHash.size() > 0)
196            {
197                // Kick off a real time optimization, in case we didn't do a final optimization.
198                doOptimizeRealTime();
199            }
200        }
201        catch (IOException e)
202        {
203            log.error(
204                logCacheName + "Failure initializing for fileName: " + fileName + " and directory: "
205                    + this.rafDir.getAbsolutePath(), e);
206        }
207    }
208
209    /**
210     * Tries to create the root directory if it does not already exist.
211     * <p>
212     *
213     * @param cattr
214     */
215    private void initializeFileSystem(IndexedDiskCacheAttributes cattr)
216    {
217        this.rafDir = cattr.getDiskPath();
218        if (log.isInfoEnabled())
219        {
220            log.info(logCacheName + "Cache file root directory: " + rafDir);
221        }
222    }
223
224    /**
225     * Creates the key and data disk caches.
226     * <p>
227     * Loads any keys if they are present and ClearDiskOnStartup is false.
228     * <p>
229     *
230     * @param cattr
231     * @throws IOException
232     */
233    private void initializeKeysAndData(IndexedDiskCacheAttributes cattr) throws IOException
234    {
235        this.dataFile = new IndexedDisk(new File(rafDir, fileName + ".data"), getElementSerializer());
236        this.keyFile = new IndexedDisk(new File(rafDir, fileName + ".key"), getElementSerializer());
237
238        if (cattr.isClearDiskOnStartup())
239        {
240            if (log.isInfoEnabled())
241            {
242                log.info(logCacheName + "ClearDiskOnStartup is set to true.  Ingnoring any persisted data.");
243            }
244            initializeEmptyStore();
245        }
246        else if (keyFile.length() > 0)
247        {
248            // If the key file has contents, try to initialize the keys
249            // from it. In no keys are loaded reset the data file.
250            initializeStoreFromPersistedData();
251        }
252        else
253        {
254            // Otherwise start with a new empty map for the keys, and reset
255            // the data file if it has contents.
256            initializeEmptyStore();
257        }
258    }
259
260    /**
261     * Initializes an empty disk cache.
262     * <p>
263     *
264     * @throws IOException
265     */
266    private void initializeEmptyStore() throws IOException
267    {
268        initializeKeyMap();
269
270        if (dataFile.length() > 0)
271        {
272            dataFile.reset();
273        }
274    }
275
276    /**
277     * Loads any persisted data and checks for consistency. If there is a consistency issue, the
278     * files are cleared.
279     * <p>
280     *
281     * @throws IOException
282     */
283    private void initializeStoreFromPersistedData() throws IOException
284    {
285        loadKeys();
286
287        if (keyHash.isEmpty())
288        {
289            dataFile.reset();
290        }
291        else
292        {
293            boolean isOk = checkKeyDataConsistency(false);
294            if (!isOk)
295            {
296                keyHash.clear();
297                keyFile.reset();
298                dataFile.reset();
299                log.warn(logCacheName + "Corruption detected.  Reseting data and keys files.");
300            }
301            else
302            {
303                synchronized (this)
304                {
305                    startupSize = keyHash.size();
306                }
307            }
308        }
309    }
310
311    /**
312     * Loads the keys from the .key file. The keys are stored in a HashMap on disk. This is
313     * converted into a LRUMap.
314     */
315    protected void loadKeys()
316    {
317        if (log.isDebugEnabled())
318        {
319            log.debug(logCacheName + "Loading keys for " + keyFile.toString());
320        }
321
322        storageLock.writeLock().lock();
323
324        try
325        {
326            // create a key map to use.
327            initializeKeyMap();
328
329            HashMap<K, IndexedDiskElementDescriptor> keys = keyFile.readObject(
330                new IndexedDiskElementDescriptor(0, (int) keyFile.length() - IndexedDisk.HEADER_SIZE_BYTES));
331
332            if (keys != null)
333            {
334                if (log.isDebugEnabled())
335                {
336                    log.debug(logCacheName + "Found " + keys.size() + " in keys file.");
337                }
338
339                keyHash.putAll(keys);
340
341                if (log.isInfoEnabled())
342                {
343                    log.info(logCacheName + "Loaded keys from [" + fileName + "], key count: " + keyHash.size() + "; up to "
344                        + maxKeySize + " will be available.");
345                }
346            }
347
348            if (log.isDebugEnabled())
349            {
350                dump(false);
351            }
352        }
353        catch (Exception e)
354        {
355            log.error(logCacheName + "Problem loading keys for file " + fileName, e);
356        }
357        finally
358        {
359            storageLock.writeLock().unlock();
360        }
361    }
362
363    /**
364     * Check for minimal consistency between the keys and the datafile. Makes sure no starting
365     * positions in the keys exceed the file length.
366     * <p>
367     * The caller should take the appropriate action if the keys and data are not consistent.
368     *
369     * @param checkForDedOverlaps
370     *            if <code>true</code>, do a more thorough check by checking for
371     *            data overlap
372     * @return <code>true</code> if the test passes
373     */
374    private boolean checkKeyDataConsistency(boolean checkForDedOverlaps)
375    {
376        ElapsedTimer timer = new ElapsedTimer();
377        log.debug(logCacheName + "Performing inital consistency check");
378
379        boolean isOk = true;
380        long fileLength = 0;
381        try
382        {
383            fileLength = dataFile.length();
384
385            for (Map.Entry<K, IndexedDiskElementDescriptor> e : keyHash.entrySet())
386            {
387                IndexedDiskElementDescriptor ded = e.getValue();
388
389                isOk = ded.pos + IndexedDisk.HEADER_SIZE_BYTES + ded.len <= fileLength;
390
391                if (!isOk)
392                {
393                    log.warn(logCacheName + "The dataFile is corrupted!" + "\n raf.length() = " + fileLength + "\n ded.pos = "
394                        + ded.pos);
395                    break;
396                }
397            }
398
399            if (isOk && checkForDedOverlaps)
400            {
401                isOk = checkForDedOverlaps(createPositionSortedDescriptorList());
402            }
403        }
404        catch (IOException e)
405        {
406            log.error(e);
407            isOk = false;
408        }
409
410        if (log.isInfoEnabled())
411        {
412            log.info(logCacheName + "Finished inital consistency check, isOk = " + isOk + " in " + timer.getElapsedTimeString());
413        }
414
415        return isOk;
416    }
417
418    /**
419     * Detects any overlapping elements. This expects a sorted list.
420     * <p>
421     * The total length of an item is IndexedDisk.RECORD_HEADER + ded.len.
422     * <p>
423     *
424     * @param sortedDescriptors
425     * @return false if there are overlaps.
426     */
427    protected boolean checkForDedOverlaps(IndexedDiskElementDescriptor[] sortedDescriptors)
428    {
429        long start = System.currentTimeMillis();
430        boolean isOk = true;
431        long expectedNextPos = 0;
432        for (int i = 0; i < sortedDescriptors.length; i++)
433        {
434            IndexedDiskElementDescriptor ded = sortedDescriptors[i];
435            if (expectedNextPos > ded.pos)
436            {
437                log.error(logCacheName + "Corrupt file: overlapping deds " + ded);
438                isOk = false;
439                break;
440            }
441            else
442            {
443                expectedNextPos = ded.pos + IndexedDisk.HEADER_SIZE_BYTES + ded.len;
444            }
445        }
446        long end = System.currentTimeMillis();
447        if (log.isDebugEnabled())
448        {
449            log.debug(logCacheName + "Check for DED overlaps took " + (end - start) + " ms.");
450        }
451
452        return isOk;
453    }
454
455    /**
456     * Saves key file to disk. This converts the LRUMap to a HashMap for deserialization.
457     */
458    protected void saveKeys()
459    {
460        try
461        {
462            if (log.isInfoEnabled())
463            {
464                log.info(logCacheName + "Saving keys to: " + fileName + ", key count: " + keyHash.size());
465            }
466
467            keyFile.reset();
468
469            HashMap<K, IndexedDiskElementDescriptor> keys = new HashMap<K, IndexedDiskElementDescriptor>();
470            keys.putAll(keyHash);
471
472            if (keys.size() > 0)
473            {
474                keyFile.writeObject(keys, 0);
475            }
476
477            if (log.isInfoEnabled())
478            {
479                log.info(logCacheName + "Finished saving keys.");
480            }
481        }
482        catch (IOException e)
483        {
484            log.error(logCacheName + "Problem storing keys.", e);
485        }
486    }
487
488    /**
489     * Update the disk cache. Called from the Queue. Makes sure the Item has not been retrieved from
490     * purgatory while in queue for disk. Remove items from purgatory when they go to disk.
491     * <p>
492     *
493     * @param ce
494     *            The ICacheElement&lt;K, V&gt; to put to disk.
495     */
496    @Override
497    protected void processUpdate(ICacheElement<K, V> ce)
498    {
499        if (!isAlive())
500        {
501            log.error(logCacheName + "No longer alive; aborting put of key = " + ce.getKey());
502            return;
503        }
504
505        if (log.isDebugEnabled())
506        {
507            log.debug(logCacheName + "Storing element on disk, key: " + ce.getKey());
508        }
509
510        IndexedDiskElementDescriptor ded = null;
511
512        // old element with same key
513        IndexedDiskElementDescriptor old = null;
514
515        try
516        {
517            byte[] data = getElementSerializer().serialize(ce);
518
519            // make sure this only locks for one particular cache region
520            storageLock.writeLock().lock();
521            try
522            {
523                old = keyHash.get(ce.getKey());
524
525                // Item with the same key already exists in file.
526                // Try to reuse the location if possible.
527                if (old != null && data.length <= old.len)
528                {
529                    // Reuse the old ded. The defrag relies on ded updates by reference, not
530                    // replacement.
531                    ded = old;
532                    ded.len = data.length;
533                }
534                else
535                {
536                    // we need this to compare in the recycle bin
537                    ded = new IndexedDiskElementDescriptor(dataFile.length(), data.length);
538
539                    if (doRecycle)
540                    {
541                        IndexedDiskElementDescriptor rep = recycle.ceiling(ded);
542                        if (rep != null)
543                        {
544                            // remove element from recycle bin
545                            recycle.remove(rep);
546                            ded = rep;
547                            ded.len = data.length;
548                            recycleCnt++;
549                            this.adjustBytesFree(ded, false);
550                            if (log.isDebugEnabled())
551                            {
552                                log.debug(logCacheName + "using recycled ded " + ded.pos + " rep.len = " + rep.len + " ded.len = "
553                                    + ded.len);
554                            }
555                        }
556                    }
557
558                    // Put it in the map
559                    keyHash.put(ce.getKey(), ded);
560
561                    if (queueInput)
562                    {
563                        queuedPutList.add(ded);
564                        if (log.isDebugEnabled())
565                        {
566                            log.debug(logCacheName + "added to queued put list." + queuedPutList.size());
567                        }
568                    }
569
570                    // add the old slot to the recycle bin
571                    if (old != null)
572                    {
573                        addToRecycleBin(old);
574                    }
575                }
576
577                dataFile.write(ded, data);
578            }
579            finally
580            {
581                storageLock.writeLock().unlock();
582            }
583
584            if (log.isDebugEnabled())
585            {
586                log.debug(logCacheName + "Put to file: " + fileName + ", key: " + ce.getKey() + ", position: " + ded.pos
587                    + ", size: " + ded.len);
588            }
589        }
590        catch (IOException e)
591        {
592            log.error(logCacheName + "Failure updating element, key: " + ce.getKey() + " old: " + old, e);
593        }
594    }
595
596    /**
597     * Gets the key, then goes to disk to get the object.
598     * <p>
599     *
600     * @param key
601     * @return ICacheElement&lt;K, V&gt; or null
602     * @see AbstractDiskCache#doGet
603     */
604    @Override
605    protected ICacheElement<K, V> processGet(K key)
606    {
607        if (!isAlive())
608        {
609            log.error(logCacheName + "No longer alive so returning null for key = " + key);
610            return null;
611        }
612
613        if (log.isDebugEnabled())
614        {
615            log.debug(logCacheName + "Trying to get from disk: " + key);
616        }
617
618        ICacheElement<K, V> object = null;
619        try
620        {
621            storageLock.readLock().lock();
622            try
623            {
624                object = readElement(key);
625            }
626            finally
627            {
628                storageLock.readLock().unlock();
629            }
630
631            if (object != null)
632            {
633                hitCount.incrementAndGet();
634            }
635        }
636        catch (IOException ioe)
637        {
638            log.error(logCacheName + "Failure getting from disk, key = " + key, ioe);
639            reset();
640        }
641        return object;
642    }
643
644    /**
645     * Gets matching items from the cache.
646     * <p>
647     *
648     * @param pattern
649     * @return a map of K key to ICacheElement&lt;K, V&gt; element, or an empty map if there is no
650     *         data in cache matching keys
651     */
652    @Override
653    public Map<K, ICacheElement<K, V>> processGetMatching(String pattern)
654    {
655        Map<K, ICacheElement<K, V>> elements = new HashMap<K, ICacheElement<K, V>>();
656        Set<K> keyArray = null;
657        storageLock.readLock().lock();
658        try
659        {
660            keyArray = new HashSet<K>(keyHash.keySet());
661        }
662        finally
663        {
664            storageLock.readLock().unlock();
665        }
666
667        Set<K> matchingKeys = getKeyMatcher().getMatchingKeysFromArray(pattern, keyArray);
668
669        for (K key : matchingKeys)
670        {
671            ICacheElement<K, V> element = processGet(key);
672            if (element != null)
673            {
674                elements.put(key, element);
675            }
676        }
677        return elements;
678    }
679
680    /**
681     * Reads the item from disk.
682     * <p>
683     *
684     * @param key
685     * @return ICacheElement
686     * @throws IOException
687     */
688    private ICacheElement<K, V> readElement(K key) throws IOException
689    {
690        ICacheElement<K, V> object = null;
691
692        IndexedDiskElementDescriptor ded = keyHash.get(key);
693
694        if (ded != null)
695        {
696            if (log.isDebugEnabled())
697            {
698                log.debug(logCacheName + "Found on disk, key: " + key);
699            }
700            try
701            {
702                ICacheElement<K, V> readObject = dataFile.readObject(ded);
703                object = readObject;
704                // TODO consider checking key equality and throwing if there is a failure
705            }
706            catch (IOException e)
707            {
708                log.error(logCacheName + "IO Exception, Problem reading object from file", e);
709                throw e;
710            }
711            catch (Exception e)
712            {
713                log.error(logCacheName + "Exception, Problem reading object from file", e);
714                throw new IOException(logCacheName + "Problem reading object from disk. " + e.getMessage());
715            }
716        }
717
718        return object;
719    }
720
721    /**
722     * Return the keys in this cache.
723     * <p>
724     *
725     * @see org.apache.commons.jcs.auxiliary.disk.AbstractDiskCache#getKeySet()
726     */
727    @Override
728    public Set<K> getKeySet() throws IOException
729    {
730        HashSet<K> keys = new HashSet<K>();
731
732        storageLock.readLock().lock();
733
734        try
735        {
736            keys.addAll(this.keyHash.keySet());
737        }
738        finally
739        {
740            storageLock.readLock().unlock();
741        }
742
743        return keys;
744    }
745
746    /**
747     * Returns true if the removal was successful; or false if there is nothing to remove. Current
748     * implementation always result in a disk orphan.
749     * <p>
750     *
751     * @return true if at least one item was removed.
752     * @param key
753     */
754    @Override
755    protected boolean processRemove(K key)
756    {
757        if (!isAlive())
758        {
759            log.error(logCacheName + "No longer alive so returning false for key = " + key);
760            return false;
761        }
762
763        if (key == null)
764        {
765            return false;
766        }
767
768        boolean reset = false;
769        boolean removed = false;
770        try
771        {
772            storageLock.writeLock().lock();
773
774            if (key instanceof String && key.toString().endsWith(CacheConstants.NAME_COMPONENT_DELIMITER))
775            {
776                removed = performPartialKeyRemoval((String) key);
777            }
778            else if (key instanceof GroupAttrName && ((GroupAttrName<?>) key).attrName == null)
779            {
780                removed = performGroupRemoval(((GroupAttrName<?>) key).groupId);
781            }
782            else
783            {
784                removed = performSingleKeyRemoval(key);
785            }
786        }
787        finally
788        {
789            storageLock.writeLock().unlock();
790        }
791
792        if (reset)
793        {
794            reset();
795        }
796
797        // this increments the remove count.
798        // there is no reason to call this if an item was not removed.
799        if (removed)
800        {
801            doOptimizeRealTime();
802        }
803
804        return removed;
805    }
806
807    /**
808     * Iterates over the keyset. Builds a list of matches. Removes all the keys in the list. Does
809     * not remove via the iterator, since the map impl may not support it.
810     * <p>
811     * This operates under a lock obtained in doRemove().
812     * <p>
813     *
814     * @param key
815     * @return true if there was a match
816     */
817    private boolean performPartialKeyRemoval(String key)
818    {
819        boolean removed = false;
820
821        // remove all keys of the same name hierarchy.
822        List<K> itemsToRemove = new LinkedList<K>();
823
824        for (K k : keyHash.keySet())
825        {
826            if (k instanceof String && k.toString().startsWith(key))
827            {
828                itemsToRemove.add(k);
829            }
830        }
831
832        // remove matches.
833        for (K fullKey : itemsToRemove)
834        {
835            // Don't add to recycle bin here
836            // https://issues.apache.org/jira/browse/JCS-67
837            performSingleKeyRemoval(fullKey);
838            removed = true;
839            // TODO this needs to update the remove count separately
840        }
841
842        return removed;
843    }
844
845    /**
846     * Remove all elements from the group. This does not use the iterator to remove. It builds a
847     * list of group elements and then removes them one by one.
848     * <p>
849     * This operates under a lock obtained in doRemove().
850     * <p>
851     *
852     * @param key
853     * @return true if an element was removed
854     */
855    private boolean performGroupRemoval(GroupId key)
856    {
857        boolean removed = false;
858
859        // remove all keys of the same name group.
860        List<K> itemsToRemove = new LinkedList<K>();
861
862        // remove all keys of the same name hierarchy.
863        for (K k : keyHash.keySet())
864        {
865            if (k instanceof GroupAttrName && ((GroupAttrName<?>) k).groupId.equals(key))
866            {
867                itemsToRemove.add(k);
868            }
869        }
870
871        // remove matches.
872        for (K fullKey : itemsToRemove)
873        {
874            // Don't add to recycle bin here
875            // https://issues.apache.org/jira/browse/JCS-67
876            performSingleKeyRemoval(fullKey);
877            removed = true;
878            // TODO this needs to update the remove count separately
879        }
880
881        return removed;
882    }
883
884    /**
885     * Removes an individual key from the cache.
886     * <p>
887     * This operates under a lock obtained in doRemove().
888     * <p>
889     *
890     * @param key
891     * @return true if an item was removed.
892     */
893    private boolean performSingleKeyRemoval(K key)
894    {
895        boolean removed;
896        // remove single item.
897        IndexedDiskElementDescriptor ded = keyHash.remove(key);
898        removed = ded != null;
899        addToRecycleBin(ded);
900
901        if (log.isDebugEnabled())
902        {
903            log.debug(logCacheName + "Disk removal: Removed from key hash, key [" + key + "] removed = " + removed);
904        }
905        return removed;
906    }
907
908    /**
909     * Remove all the items from the disk cache by reseting everything.
910     */
911    @Override
912    public void processRemoveAll()
913    {
914        ICacheEvent<String> cacheEvent = createICacheEvent(getCacheName(), "all", ICacheEventLogger.REMOVEALL_EVENT);
915        try
916        {
917            reset();
918        }
919        finally
920        {
921            logICacheEvent(cacheEvent);
922        }
923    }
924
925    /**
926     * Reset effectively clears the disk cache, creating new files, recycle bins, and keymaps.
927     * <p>
928     * It can be used to handle errors by last resort, force content update, or removeall.
929     */
930    private void reset()
931    {
932        if (log.isWarnEnabled())
933        {
934            log.warn(logCacheName + "Resetting cache");
935        }
936
937        try
938        {
939            storageLock.writeLock().lock();
940
941            if (dataFile != null)
942            {
943                dataFile.close();
944            }
945            File dataFileTemp = new File(rafDir, fileName + ".data");
946            boolean result = dataFileTemp.delete();
947            if (!result && log.isDebugEnabled())
948            {
949                log.debug("Could not delete file " + dataFileTemp);
950            }
951
952            if (keyFile != null)
953            {
954                keyFile.close();
955            }
956            File keyFileTemp = new File(rafDir, fileName + ".key");
957            result = keyFileTemp.delete();
958            if (!result && log.isDebugEnabled())
959            {
960                log.debug("Could not delete file " + keyFileTemp);
961            }
962
963            dataFile = new IndexedDisk(new File(rafDir, fileName + ".data"), getElementSerializer());
964            keyFile = new IndexedDisk(new File(rafDir, fileName + ".key"), getElementSerializer());
965
966            initializeRecycleBin();
967
968            initializeKeyMap();
969        }
970        catch (IOException e)
971        {
972            log.error(logCacheName + "Failure reseting state", e);
973        }
974        finally
975        {
976            storageLock.writeLock().unlock();
977        }
978    }
979
980    /**
981     * If the maxKeySize is < 0, use 5000, no way to have an unlimited recycle bin right now, or one
982     * less than the mazKeySize.
983     */
984    private void initializeRecycleBin()
985    {
986        recycle = new ConcurrentSkipListSet<IndexedDiskElementDescriptor>();
987    }
988
989    /**
990     * Create the map for keys that contain the index position on disk.
991     */
992    private void initializeKeyMap()
993    {
994        keyHash = null;
995        if (maxKeySize >= 0)
996        {
997            if (this.diskLimitType == DiskLimitType.COUNT)
998            {
999                keyHash = new LRUMapCountLimited(maxKeySize);
1000            }
1001            else
1002            {
1003                keyHash = new LRUMapSizeLimited(maxKeySize);
1004            }
1005
1006            if (log.isInfoEnabled())
1007            {
1008                log.info(logCacheName + "Set maxKeySize to: '" + maxKeySize + "'");
1009            }
1010        }
1011        else
1012        {
1013            // If no max size, use a plain map for memory and processing efficiency.
1014            keyHash = new HashMap<K, IndexedDiskElementDescriptor>();
1015            // keyHash = Collections.synchronizedMap( new HashMap() );
1016            if (log.isInfoEnabled())
1017            {
1018                log.info(logCacheName + "Set maxKeySize to unlimited'");
1019            }
1020        }
1021    }
1022
1023    /**
1024     * Dispose of the disk cache in a background thread. Joins against this thread to put a cap on
1025     * the disposal time.
1026     * <p>
1027     * TODO make dispose window configurable.
1028     */
1029    @Override
1030    public void processDispose()
1031    {
1032        ICacheEvent<String> cacheEvent = createICacheEvent(getCacheName(), "none", ICacheEventLogger.DISPOSE_EVENT);
1033        try
1034        {
1035            Runnable disR = new Runnable()
1036            {
1037                @Override
1038                public void run()
1039                {
1040                    disposeInternal();
1041                }
1042            };
1043            Thread t = new Thread(disR, "IndexedDiskCache-DisposalThread");
1044            t.start();
1045            // wait up to 60 seconds for dispose and then quit if not done.
1046            try
1047            {
1048                t.join(60 * 1000);
1049            }
1050            catch (InterruptedException ex)
1051            {
1052                log.error(logCacheName + "Interrupted while waiting for disposal thread to finish.", ex);
1053            }
1054        }
1055        finally
1056        {
1057            logICacheEvent(cacheEvent);
1058        }
1059    }
1060
1061    /**
1062     * Internal method that handles the disposal.
1063     */
1064    protected void disposeInternal()
1065    {
1066        if (!isAlive())
1067        {
1068            log.error(logCacheName + "Not alive and dispose was called, filename: " + fileName);
1069            return;
1070        }
1071
1072        // Prevents any interaction with the cache while we're shutting down.
1073        setAlive(false);
1074
1075        Thread optimizationThread = currentOptimizationThread;
1076        if (isRealTimeOptimizationEnabled && optimizationThread != null)
1077        {
1078            // Join with the current optimization thread.
1079            if (log.isDebugEnabled())
1080            {
1081                log.debug(logCacheName + "In dispose, optimization already " + "in progress; waiting for completion.");
1082            }
1083            try
1084            {
1085                optimizationThread.join();
1086            }
1087            catch (InterruptedException e)
1088            {
1089                log.error(logCacheName + "Unable to join current optimization thread.", e);
1090            }
1091        }
1092        else if (isShutdownOptimizationEnabled && this.getBytesFree() > 0)
1093        {
1094            optimizeFile();
1095        }
1096
1097        saveKeys();
1098
1099        try
1100        {
1101            if (log.isDebugEnabled())
1102            {
1103                log.debug(logCacheName + "Closing files, base filename: " + fileName);
1104            }
1105            dataFile.close();
1106            dataFile = null;
1107            keyFile.close();
1108            keyFile = null;
1109        }
1110        catch (IOException e)
1111        {
1112            log.error(logCacheName + "Failure closing files in dispose, filename: " + fileName, e);
1113        }
1114
1115        if (log.isInfoEnabled())
1116        {
1117            log.info(logCacheName + "Shutdown complete.");
1118        }
1119    }
1120
1121    /**
1122     * Add descriptor to recycle bin if it is not null. Adds the length of the item to the bytes
1123     * free.
1124     * <p>
1125     * This is called in three places: (1) When an item is removed. All item removals funnel down to the removeSingleItem method.
1126     * (2) When an item on disk is updated with a value that will not fit in the previous slot. (3) When the max key size is
1127     * reached, the freed slot will be added.
1128     * <p>
1129     *
1130     * @param ded
1131     */
1132    protected void addToRecycleBin(IndexedDiskElementDescriptor ded)
1133    {
1134        // reuse the spot
1135        if (ded != null)
1136        {
1137            storageLock.readLock().lock();
1138
1139            try
1140            {
1141                this.adjustBytesFree(ded, true);
1142
1143                if (doRecycle)
1144                {
1145                    recycle.add(ded);
1146                    if (log.isDebugEnabled())
1147                    {
1148                        log.debug(logCacheName + "recycled ded" + ded);
1149                    }
1150
1151                }
1152            }
1153            finally
1154            {
1155                storageLock.readLock().unlock();
1156            }
1157        }
1158    }
1159
1160    /**
1161     * Performs the check for optimization, and if it is required, do it.
1162     */
1163    protected void doOptimizeRealTime()
1164    {
1165        if (isRealTimeOptimizationEnabled && !isOptimizing && removeCount++ >= cattr.getOptimizeAtRemoveCount())
1166        {
1167            isOptimizing = true;
1168
1169            if (log.isInfoEnabled())
1170            {
1171                log.info(logCacheName + "Optimizing file. removeCount [" + removeCount + "] OptimizeAtRemoveCount ["
1172                    + cattr.getOptimizeAtRemoveCount() + "]");
1173            }
1174
1175            if (currentOptimizationThread == null)
1176            {
1177                storageLock.writeLock().lock();
1178
1179                try
1180                {
1181                    if (currentOptimizationThread == null)
1182                    {
1183                        currentOptimizationThread = new Thread(new Runnable()
1184                        {
1185                            @Override
1186                            public void run()
1187                            {
1188                                optimizeFile();
1189
1190                                currentOptimizationThread = null;
1191                            }
1192                        }, "IndexedDiskCache-OptimizationThread");
1193                    }
1194                }
1195                finally
1196                {
1197                    storageLock.writeLock().unlock();
1198                }
1199
1200                if (currentOptimizationThread != null)
1201                {
1202                    currentOptimizationThread.start();
1203                }
1204            }
1205        }
1206    }
1207
1208    /**
1209     * File optimization is handled by this method. It works as follows:
1210     * <ol>
1211     * <li>Shutdown recycling and turn on queuing of puts.</li>
1212     * <li>Take a snapshot of the current descriptors. If there are any removes, ignore them, as they will be compacted during the
1213     * next optimization.</li>
1214     * <li>Optimize the snapshot. For each descriptor:
1215     * <ol>
1216     * <li>Obtain the write-lock.</li>
1217     * <li>Shift the element on the disk, in order to compact out the free space.</li>
1218     * <li>Release the write-lock. This allows elements to still be accessible during optimization.</li>
1219     * </ol>
1220     * </li>
1221     * <li>Obtain the write-lock.</li>
1222     * <li>All queued puts are made at the end of the file. Optimize these under a single write-lock.</li>
1223     * <li>Truncate the file.</li>
1224     * <li>Release the write-lock.</li>
1225     * <li>Restore system to standard operation.</li>
1226     * </ol>
1227     */
1228    protected void optimizeFile()
1229    {
1230        ElapsedTimer timer = new ElapsedTimer();
1231        timesOptimized++;
1232        if (log.isInfoEnabled())
1233        {
1234            log.info(logCacheName + "Beginning Optimization #" + timesOptimized);
1235        }
1236
1237        // CREATE SNAPSHOT
1238        IndexedDiskElementDescriptor[] defragList = null;
1239
1240        storageLock.writeLock().lock();
1241
1242        try
1243        {
1244            queueInput = true;
1245            // shut off recycle while we're optimizing,
1246            doRecycle = false;
1247            defragList = createPositionSortedDescriptorList();
1248        }
1249        finally
1250        {
1251            // Release if I acquired.
1252            storageLock.writeLock().unlock();
1253        }
1254
1255        // Defrag the file outside of the write lock. This allows a move to be made,
1256        // and yet have the element still accessible for reading or writing.
1257        long expectedNextPos = defragFile(defragList, 0);
1258
1259        // ADD THE QUEUED ITEMS to the end and then truncate
1260        storageLock.writeLock().lock();
1261
1262        try
1263        {
1264            try
1265            {
1266                if (!queuedPutList.isEmpty())
1267                {
1268                    defragList = queuedPutList.toArray(new IndexedDiskElementDescriptor[queuedPutList.size()]);
1269
1270                    // pack them at the end
1271                    expectedNextPos = defragFile(defragList, expectedNextPos);
1272                }
1273                // TRUNCATE THE FILE
1274                dataFile.truncate(expectedNextPos);
1275            }
1276            catch (IOException e)
1277            {
1278                log.error(logCacheName + "Error optimizing queued puts.", e);
1279            }
1280
1281            // RESTORE NORMAL OPERATION
1282            removeCount = 0;
1283            resetBytesFree();
1284            initializeRecycleBin();
1285            queuedPutList.clear();
1286            queueInput = false;
1287            // turn recycle back on.
1288            doRecycle = true;
1289            isOptimizing = false;
1290        }
1291        finally
1292        {
1293            storageLock.writeLock().unlock();
1294        }
1295
1296        if (log.isInfoEnabled())
1297        {
1298            log.info(logCacheName + "Finished #" + timesOptimized + " Optimization took " + timer.getElapsedTimeString());
1299        }
1300    }
1301
1302    /**
1303     * Defragments the file in place by compacting out the free space (i.e., moving records
1304     * forward). If there were no gaps the resulting file would be the same size as the previous
1305     * file. This must be supplied an ordered defragList.
1306     * <p>
1307     *
1308     * @param defragList
1309     *            sorted list of descriptors for optimization
1310     * @param startingPos
1311     *            the start position in the file
1312     * @return this is the potential new file end
1313     */
1314    private long defragFile(IndexedDiskElementDescriptor[] defragList, long startingPos)
1315    {
1316        ElapsedTimer timer = new ElapsedTimer();
1317        long preFileSize = 0;
1318        long postFileSize = 0;
1319        long expectedNextPos = 0;
1320        try
1321        {
1322            preFileSize = this.dataFile.length();
1323            // find the first gap in the disk and start defragging.
1324            expectedNextPos = startingPos;
1325            for (int i = 0; i < defragList.length; i++)
1326            {
1327                storageLock.writeLock().lock();
1328                try
1329                {
1330                    if (expectedNextPos != defragList[i].pos)
1331                    {
1332                        dataFile.move(defragList[i], expectedNextPos);
1333                    }
1334                    expectedNextPos = defragList[i].pos + IndexedDisk.HEADER_SIZE_BYTES + defragList[i].len;
1335                }
1336                finally
1337                {
1338                    storageLock.writeLock().unlock();
1339                }
1340            }
1341
1342            postFileSize = this.dataFile.length();
1343
1344            // this is the potential new file end
1345            return expectedNextPos;
1346        }
1347        catch (IOException e)
1348        {
1349            log.error(logCacheName + "Error occurred during defragmentation.", e);
1350        }
1351        finally
1352        {
1353            if (log.isInfoEnabled())
1354            {
1355                log.info(logCacheName + "Defragmentation took " + timer.getElapsedTimeString() + ". File Size (before="
1356                    + preFileSize + ") (after=" + postFileSize + ") (truncating to " + expectedNextPos + ")");
1357            }
1358        }
1359
1360        return 0;
1361    }
1362
1363    /**
1364     * Creates a snapshot of the IndexedDiskElementDescriptors in the keyHash and returns them
1365     * sorted by position in the dataFile.
1366     * <p>
1367     * TODO fix values() method on the LRU map.
1368     * <p>
1369     *
1370     * @return IndexedDiskElementDescriptor[]
1371     */
1372    private IndexedDiskElementDescriptor[] createPositionSortedDescriptorList()
1373    {
1374        IndexedDiskElementDescriptor[] defragList = new IndexedDiskElementDescriptor[keyHash.size()];
1375        Iterator<Map.Entry<K, IndexedDiskElementDescriptor>> iterator = keyHash.entrySet().iterator();
1376        for (int i = 0; iterator.hasNext(); i++)
1377        {
1378            Map.Entry<K, IndexedDiskElementDescriptor> next = iterator.next();
1379            defragList[i] = next.getValue();
1380        }
1381
1382        Arrays.sort(defragList, new PositionComparator());
1383
1384        return defragList;
1385    }
1386
1387    /**
1388     * Returns the current cache size.
1389     * <p>
1390     *
1391     * @return The size value
1392     */
1393    @Override
1394    public int getSize()
1395    {
1396        return keyHash.size();
1397    }
1398
1399    /**
1400     * Returns the size of the recycle bin in number of elements.
1401     * <p>
1402     *
1403     * @return The number of items in the bin.
1404     */
1405    protected int getRecyleBinSize()
1406    {
1407        return this.recycle.size();
1408    }
1409
1410    /**
1411     * Returns the number of times we have used spots from the recycle bin.
1412     * <p>
1413     *
1414     * @return The number of spots used.
1415     */
1416    protected int getRecyleCount()
1417    {
1418        return this.recycleCnt;
1419    }
1420
1421    /**
1422     * Returns the number of bytes that are free. When an item is removed, its length is recorded.
1423     * When a spot is used form the recycle bin, the length of the item stored is recorded.
1424     * <p>
1425     *
1426     * @return The number bytes free on the disk file.
1427     */
1428    protected long getBytesFree()
1429    {
1430        return this.bytesFree.get();
1431    }
1432
1433    /**
1434     * Resets the number of bytes that are free.
1435     */
1436    private void resetBytesFree()
1437    {
1438        this.bytesFree.set(0);
1439    }
1440
1441    /**
1442     * To subtract you can pass in false for add..
1443     * <p>
1444     *
1445     * @param ded
1446     * @param add
1447     */
1448    private void adjustBytesFree(IndexedDiskElementDescriptor ded, boolean add)
1449    {
1450        if (ded != null)
1451        {
1452            int amount = ded.len + IndexedDisk.HEADER_SIZE_BYTES;
1453
1454            if (add)
1455            {
1456                this.bytesFree.addAndGet(amount);
1457            }
1458            else
1459            {
1460                this.bytesFree.addAndGet(-amount);
1461            }
1462        }
1463    }
1464
1465    /**
1466     * This is for debugging and testing.
1467     * <p>
1468     *
1469     * @return the length of the data file.
1470     * @throws IOException
1471     */
1472    protected long getDataFileSize() throws IOException
1473    {
1474        long size = 0;
1475
1476        storageLock.readLock().lock();
1477
1478        try
1479        {
1480            if (dataFile != null)
1481            {
1482                size = dataFile.length();
1483            }
1484        }
1485        finally
1486        {
1487            storageLock.readLock().unlock();
1488        }
1489
1490        return size;
1491    }
1492
1493    /**
1494     * For debugging. This dumps the values by default.
1495     */
1496    public void dump()
1497    {
1498        dump(true);
1499    }
1500
1501    /**
1502     * For debugging.
1503     * <p>
1504     *
1505     * @param dumpValues
1506     *            A boolean indicating if values should be dumped.
1507     */
1508    public void dump(boolean dumpValues)
1509    {
1510        if (log.isDebugEnabled())
1511        {
1512            log.debug(logCacheName + "[dump] Number of keys: " + keyHash.size());
1513
1514            for (Map.Entry<K, IndexedDiskElementDescriptor> e : keyHash.entrySet())
1515            {
1516                K key = e.getKey();
1517                IndexedDiskElementDescriptor ded = e.getValue();
1518
1519                log.debug(logCacheName + "[dump] Disk element, key: " + key + ", pos: " + ded.pos + ", ded.len" + ded.len
1520                    + (dumpValues ? ", val: " + get(key) : ""));
1521            }
1522        }
1523    }
1524
1525    /**
1526     * @return Returns the AuxiliaryCacheAttributes.
1527     */
1528    @Override
1529    public AuxiliaryCacheAttributes getAuxiliaryCacheAttributes()
1530    {
1531        return this.cattr;
1532    }
1533
1534    /**
1535     * Returns info about the disk cache.
1536     * <p>
1537     *
1538     * @see org.apache.commons.jcs.auxiliary.AuxiliaryCache#getStatistics()
1539     */
1540    @Override
1541    public synchronized IStats getStatistics()
1542    {
1543        IStats stats = new Stats();
1544        stats.setTypeName("Indexed Disk Cache");
1545
1546        ArrayList<IStatElement<?>> elems = new ArrayList<IStatElement<?>>();
1547
1548        elems.add(new StatElement<Boolean>("Is Alive", Boolean.valueOf(isAlive())));
1549        elems.add(new StatElement<Integer>("Key Map Size", Integer.valueOf(this.keyHash != null ? this.keyHash.size() : -1)));
1550        try
1551        {
1552            elems
1553                .add(new StatElement<Long>("Data File Length", Long.valueOf(this.dataFile != null ? this.dataFile.length() : -1L)));
1554        }
1555        catch (IOException e)
1556        {
1557            log.error(e);
1558        }
1559        elems.add(new StatElement<Integer>("Max Key Size", this.maxKeySize));
1560        elems.add(new StatElement<AtomicInteger>("Hit Count", this.hitCount));
1561        elems.add(new StatElement<AtomicLong>("Bytes Free", this.bytesFree));
1562        elems.add(new StatElement<Integer>("Optimize Operation Count", Integer.valueOf(this.removeCount)));
1563        elems.add(new StatElement<Integer>("Times Optimized", Integer.valueOf(this.timesOptimized)));
1564        elems.add(new StatElement<Integer>("Recycle Count", Integer.valueOf(this.recycleCnt)));
1565        elems.add(new StatElement<Integer>("Recycle Bin Size", Integer.valueOf(this.recycle.size())));
1566        elems.add(new StatElement<Integer>("Startup Size", Integer.valueOf(this.startupSize)));
1567
1568        // get the stats from the super too
1569        IStats sStats = super.getStatistics();
1570        elems.addAll(sStats.getStatElements());
1571
1572        stats.setStatElements(elems);
1573
1574        return stats;
1575    }
1576
1577    /**
1578     * This is exposed for testing.
1579     * <p>
1580     *
1581     * @return Returns the timesOptimized.
1582     */
1583    protected int getTimesOptimized()
1584    {
1585        return timesOptimized;
1586    }
1587
1588    /**
1589     * This is used by the event logging.
1590     * <p>
1591     *
1592     * @return the location of the disk, either path or ip.
1593     */
1594    @Override
1595    protected String getDiskLocation()
1596    {
1597        return dataFile.getFilePath();
1598    }
1599
1600    /**
1601     * Compares IndexedDiskElementDescriptor based on their position.
1602     * <p>
1603     */
1604    protected static final class PositionComparator implements Comparator<IndexedDiskElementDescriptor>, Serializable
1605    {
1606        /** serialVersionUID */
1607        private static final long serialVersionUID = -8387365338590814113L;
1608
1609        /**
1610         * Compares two descriptors based on position.
1611         * <p>
1612         *
1613         * @see java.util.Comparator#compare(java.lang.Object, java.lang.Object)
1614         */
1615        @Override
1616        public int compare(IndexedDiskElementDescriptor ded1, IndexedDiskElementDescriptor ded2)
1617        {
1618            if (ded1.pos < ded2.pos)
1619            {
1620                return -1;
1621            }
1622            else if (ded1.pos == ded2.pos)
1623            {
1624                return 0;
1625            }
1626            else
1627            {
1628                return 1;
1629            }
1630        }
1631    }
1632
1633    /**
1634     * Class for recycling and lru. This implements the LRU overflow callback, so we can add items
1635     * to the recycle bin. This class counts the size element to decide, when to throw away an element
1636     */
1637    public class LRUMapSizeLimited extends AbstractLRUMap<K, IndexedDiskElementDescriptor>
1638    {
1639        /**
1640         * <code>tag</code> tells us which map we are working on.
1641         */
1642        public static final String TAG = "orig";
1643
1644        // size of the content in kB
1645        private AtomicInteger contentSize;
1646        private int maxSize;
1647
1648        /**
1649         * Default
1650         */
1651        public LRUMapSizeLimited()
1652        {
1653            this(-1);
1654        }
1655
1656        /**
1657         * @param maxKeySize
1658         */
1659        public LRUMapSizeLimited(int maxKeySize)
1660        {
1661            super();
1662            this.maxSize = maxKeySize;
1663            this.contentSize = new AtomicInteger(0);
1664        }
1665
1666        // keep the content size in kB, so 2^31 kB is reasonable value
1667        private void subLengthFromCacheSize(IndexedDiskElementDescriptor value)
1668        {
1669            contentSize.addAndGet((value.len + IndexedDisk.HEADER_SIZE_BYTES) / -1024 - 1);
1670        }
1671
1672        // keep the content size in kB, so 2^31 kB is reasonable value
1673        private void addLengthToCacheSize(IndexedDiskElementDescriptor value)
1674        {
1675            contentSize.addAndGet((value.len + IndexedDisk.HEADER_SIZE_BYTES) / 1024 + 1);
1676        }
1677
1678        @Override
1679        public IndexedDiskElementDescriptor put(K key, IndexedDiskElementDescriptor value)
1680        {
1681            IndexedDiskElementDescriptor oldValue = null;
1682
1683            try
1684            {
1685                oldValue = super.put(key, value);
1686            }
1687            finally
1688            {
1689                // keep the content size in kB, so 2^31 kB is reasonable value
1690                if (value != null)
1691                {
1692                    addLengthToCacheSize(value);
1693                }
1694                if (oldValue != null)
1695                {
1696                    subLengthFromCacheSize(oldValue);
1697                }
1698            }
1699
1700            return oldValue;
1701        }
1702
1703        @Override
1704        public IndexedDiskElementDescriptor remove(Object key)
1705        {
1706            IndexedDiskElementDescriptor value = null;
1707
1708            try
1709            {
1710                value = super.remove(key);
1711                return value;
1712            }
1713            finally
1714            {
1715                if (value != null)
1716                {
1717                    subLengthFromCacheSize(value);
1718                }
1719            }
1720        }
1721
1722        /**
1723         * This is called when the may key size is reached. The least recently used item will be
1724         * passed here. We will store the position and size of the spot on disk in the recycle bin.
1725         * <p>
1726         *
1727         * @param key
1728         * @param value
1729         */
1730        @Override
1731        protected void processRemovedLRU(K key, IndexedDiskElementDescriptor value)
1732        {
1733            if (value != null)
1734            {
1735                subLengthFromCacheSize(value);
1736            }
1737
1738            addToRecycleBin(value);
1739
1740            if (log.isDebugEnabled())
1741            {
1742                log.debug(logCacheName + "Removing key: [" + key + "] from key store.");
1743                log.debug(logCacheName + "Key store size: [" + this.size() + "].");
1744            }
1745
1746            doOptimizeRealTime();
1747        }
1748
1749        @Override
1750        protected boolean shouldRemove()
1751        {
1752            return maxSize > 0 && contentSize.get() > maxSize && this.size() > 0;
1753        }
1754    }
1755
1756    /**
1757     * Class for recycling and lru. This implements the LRU overflow callback, so we can add items
1758     * to the recycle bin. This class counts the elements to decide, when to throw away an element
1759     */
1760
1761    public class LRUMapCountLimited extends LRUMap<K, IndexedDiskElementDescriptor>
1762    // implements Serializable
1763    {
1764        public LRUMapCountLimited(int maxKeySize)
1765        {
1766            super(maxKeySize);
1767        }
1768
1769        /**
1770         * This is called when the may key size is reached. The least recently used item will be
1771         * passed here. We will store the position and size of the spot on disk in the recycle bin.
1772         * <p>
1773         *
1774         * @param key
1775         * @param value
1776         */
1777        @Override
1778        protected void processRemovedLRU(K key, IndexedDiskElementDescriptor value)
1779        {
1780            addToRecycleBin(value);
1781            if (log.isDebugEnabled())
1782            {
1783                log.debug(logCacheName + "Removing key: [" + key + "] from key store.");
1784                log.debug(logCacheName + "Key store size: [" + this.size() + "].");
1785            }
1786
1787            doOptimizeRealTime();
1788        }
1789    }
1790}