001package org.apache.commons.jcs.auxiliary.disk.block;
002
003/*
004 * Licensed to the Apache Software Foundation (ASF) under one
005 * or more contributor license agreements.  See the NOTICE file
006 * distributed with this work for additional information
007 * regarding copyright ownership.  The ASF licenses this file
008 * to you under the Apache License, Version 2.0 (the
009 * "License"); you may not use this file except in compliance
010 * with the License.  You may obtain a copy of the License at
011 *
012 *   http://www.apache.org/licenses/LICENSE-2.0
013 *
014 * Unless required by applicable law or agreed to in writing,
015 * software distributed under the License is distributed on an
016 * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
017 * KIND, either express or implied.  See the License for the
018 * specific language governing permissions and limitations
019 * under the License.
020 */
021
022import java.io.File;
023import java.io.IOException;
024import java.util.ArrayList;
025import java.util.Arrays;
026import java.util.HashMap;
027import java.util.HashSet;
028import java.util.Iterator;
029import java.util.LinkedList;
030import java.util.List;
031import java.util.Map;
032import java.util.Set;
033import java.util.concurrent.ScheduledExecutorService;
034import java.util.concurrent.ScheduledFuture;
035import java.util.concurrent.TimeUnit;
036import java.util.concurrent.locks.ReentrantReadWriteLock;
037
038import org.apache.commons.jcs.auxiliary.AuxiliaryCacheAttributes;
039import org.apache.commons.jcs.auxiliary.disk.AbstractDiskCache;
040import org.apache.commons.jcs.engine.CacheConstants;
041import org.apache.commons.jcs.engine.behavior.ICacheElement;
042import org.apache.commons.jcs.engine.behavior.IElementSerializer;
043import org.apache.commons.jcs.engine.behavior.IRequireScheduler;
044import org.apache.commons.jcs.engine.control.group.GroupAttrName;
045import org.apache.commons.jcs.engine.control.group.GroupId;
046import org.apache.commons.jcs.engine.stats.StatElement;
047import org.apache.commons.jcs.engine.stats.Stats;
048import org.apache.commons.jcs.engine.stats.behavior.IStatElement;
049import org.apache.commons.jcs.engine.stats.behavior.IStats;
050import org.apache.commons.logging.Log;
051import org.apache.commons.logging.LogFactory;
052
053/**
054 * There is one BlockDiskCache per region. It manages the key and data store.
055 * <p>
056 * @author Aaron Smuts
057 */
058public class BlockDiskCache<K, V>
059    extends AbstractDiskCache<K, V>
060    implements IRequireScheduler
061{
062    /** The logger. */
063    private static final Log log = LogFactory.getLog( BlockDiskCache.class );
064
065    /** The name to prefix all log messages with. */
066    private final String logCacheName;
067
068    /** The name of the file to store data. */
069    private final String fileName;
070
071    /** The data access object */
072    private BlockDisk dataFile;
073
074    /** Attributes governing the behavior of the block disk cache. */
075    private final BlockDiskCacheAttributes blockDiskCacheAttributes;
076
077    /** The root directory for keys and data. */
078    private final File rootDirectory;
079
080    /** Store, loads, and persists the keys */
081    private BlockDiskKeyStore<K> keyStore;
082
083    /**
084     * Use this lock to synchronize reads and writes to the underlying storage mechanism. We don't
085     * need a reentrant lock, since we only lock one level.
086     */
087    private final ReentrantReadWriteLock storageLock = new ReentrantReadWriteLock();
088
089    private ScheduledFuture<?> future;
090
091    /**
092     * Constructs the BlockDisk after setting up the root directory.
093     * <p>
094     * @param cacheAttributes
095     */
096    public BlockDiskCache( BlockDiskCacheAttributes cacheAttributes )
097    {
098        this( cacheAttributes, null );
099    }
100
101    /**
102     * Constructs the BlockDisk after setting up the root directory.
103     * <p>
104     * @param cacheAttributes
105     * @param elementSerializer used if supplied, the super's super will not set a null
106     */
107    public BlockDiskCache( BlockDiskCacheAttributes cacheAttributes, IElementSerializer elementSerializer )
108    {
109        super( cacheAttributes );
110        setElementSerializer( elementSerializer );
111
112        this.blockDiskCacheAttributes = cacheAttributes;
113        this.logCacheName = "Region [" + getCacheName() + "] ";
114
115        if ( log.isInfoEnabled() )
116        {
117            log.info( logCacheName + "Constructing BlockDiskCache with attributes " + cacheAttributes );
118        }
119
120        // Make a clean file name
121        this.fileName = getCacheName().replaceAll("[^a-zA-Z0-9-_\\.]", "_");
122        this.rootDirectory = cacheAttributes.getDiskPath();
123
124        if ( log.isInfoEnabled() )
125        {
126            log.info( logCacheName + "Cache file root directory: [" + rootDirectory + "]");
127        }
128
129        try
130        {
131            if ( this.blockDiskCacheAttributes.getBlockSizeBytes() > 0 )
132            {
133                this.dataFile = new BlockDisk( new File( rootDirectory, fileName + ".data" ),
134                                               this.blockDiskCacheAttributes.getBlockSizeBytes(),
135                                               getElementSerializer() );
136            }
137            else
138            {
139                this.dataFile = new BlockDisk( new File( rootDirectory, fileName + ".data" ),
140                                               getElementSerializer() );
141            }
142
143            keyStore = new BlockDiskKeyStore<K>( this.blockDiskCacheAttributes, this );
144
145            boolean alright = verifyDisk();
146
147            if ( keyStore.size() == 0 || !alright )
148            {
149                this.reset();
150            }
151
152            // Initialization finished successfully, so set alive to true.
153            setAlive(true);
154            if ( log.isInfoEnabled() )
155            {
156                log.info( logCacheName + "Block Disk Cache is alive." );
157            }
158        }
159        catch ( IOException e )
160        {
161            log.error( logCacheName + "Failure initializing for fileName: " + fileName + " and root directory: "
162                + rootDirectory, e );
163        }
164    }
165
166    /**
167     * @see org.apache.commons.jcs.engine.behavior.IRequireScheduler#setScheduledExecutorService(java.util.concurrent.ScheduledExecutorService)
168     */
169    @Override
170    public void setScheduledExecutorService(ScheduledExecutorService scheduledExecutor)
171    {
172        // add this region to the persistence thread.
173        // TODO we might need to stagger this a bit.
174        if ( this.blockDiskCacheAttributes.getKeyPersistenceIntervalSeconds() > 0 )
175        {
176            future = scheduledExecutor.scheduleAtFixedRate(
177                    new Runnable()
178                    {
179                        @Override
180                        public void run()
181                        {
182                            keyStore.saveKeys();
183                        }
184                    },
185                    this.blockDiskCacheAttributes.getKeyPersistenceIntervalSeconds(),
186                    this.blockDiskCacheAttributes.getKeyPersistenceIntervalSeconds(),
187                    TimeUnit.SECONDS);
188        }
189    }
190
191    /**
192     * We need to verify that the file on disk uses the same block size and that the file is the
193     * proper size.
194     * <p>
195     * @return true if it looks ok
196     */
197    protected boolean verifyDisk()
198    {
199        boolean alright = false;
200        // simply try to read a few. If it works, then the file is probably ok.
201        // TODO add more.
202
203        storageLock.readLock().lock();
204
205        try
206        {
207            int maxToTest = 100;
208            int count = 0;
209            Iterator<Map.Entry<K, int[]>> it = this.keyStore.entrySet().iterator();
210            while ( it.hasNext() && count < maxToTest )
211            {
212                count++;
213                Map.Entry<K, int[]> entry = it.next();
214                Object data = this.dataFile.read( entry.getValue() );
215                if ( data == null )
216                {
217                    throw new Exception( logCacheName + "Couldn't find data for key [" + entry.getKey() + "]" );
218                }
219            }
220            alright = true;
221        }
222        catch ( Exception e )
223        {
224            log.warn( logCacheName + "Problem verifying disk.  Message [" + e.getMessage() + "]" );
225            alright = false;
226        }
227        finally
228        {
229            storageLock.readLock().unlock();
230        }
231
232        return alright;
233    }
234
235    /**
236     * Return the keys in this cache.
237     * <p>
238     * @see org.apache.commons.jcs.auxiliary.disk.AbstractDiskCache#getKeySet()
239     */
240    @Override
241    public Set<K> getKeySet() throws IOException
242    {
243        HashSet<K> keys = new HashSet<K>();
244
245        storageLock.readLock().lock();
246
247        try
248        {
249            keys.addAll(this.keyStore.keySet());
250        }
251        finally
252        {
253            storageLock.readLock().unlock();
254        }
255
256        return keys;
257    }
258
259    /**
260     * Gets matching items from the cache.
261     * <p>
262     * @param pattern
263     * @return a map of K key to ICacheElement&lt;K, V&gt; element, or an empty map if there is no
264     *         data in cache matching keys
265     */
266    @Override
267    public Map<K, ICacheElement<K, V>> processGetMatching( String pattern )
268    {
269        Map<K, ICacheElement<K, V>> elements = new HashMap<K, ICacheElement<K, V>>();
270
271        Set<K> keyArray = null;
272        storageLock.readLock().lock();
273        try
274        {
275            keyArray = new HashSet<K>(keyStore.keySet());
276        }
277        finally
278        {
279            storageLock.readLock().unlock();
280        }
281
282        Set<K> matchingKeys = getKeyMatcher().getMatchingKeysFromArray( pattern, keyArray );
283
284        for (K key : matchingKeys)
285        {
286            ICacheElement<K, V> element = processGet( key );
287            if ( element != null )
288            {
289                elements.put( key, element );
290            }
291        }
292
293        return elements;
294    }
295
296    /**
297     * Returns the number of keys.
298     * <p>
299     * (non-Javadoc)
300     * @see org.apache.commons.jcs.auxiliary.disk.AbstractDiskCache#getSize()
301     */
302    @Override
303    public int getSize()
304    {
305        return this.keyStore.size();
306    }
307
308    /**
309     * Gets the ICacheElement&lt;K, V&gt; for the key if it is in the cache. The program flow is as follows:
310     * <ol>
311     * <li>Make sure the disk cache is alive.</li> <li>Get a read lock.</li> <li>See if the key is
312     * in the key store.</li> <li>If we found a key, ask the BlockDisk for the object at the
313     * blocks..</li> <li>Release the lock.</li>
314     * </ol>
315     * @param key
316     * @return ICacheElement
317     * @see org.apache.commons.jcs.auxiliary.disk.AbstractDiskCache#get(Object)
318     */
319    @Override
320    protected ICacheElement<K, V> processGet( K key )
321    {
322        if ( !isAlive() )
323        {
324            if ( log.isDebugEnabled() )
325            {
326                log.debug( logCacheName + "No longer alive so returning null for key = " + key );
327            }
328            return null;
329        }
330
331        if ( log.isDebugEnabled() )
332        {
333            log.debug( logCacheName + "Trying to get from disk: " + key );
334        }
335
336        ICacheElement<K, V> object = null;
337
338
339        try
340        {
341            storageLock.readLock().lock();
342            try {
343                int[] ded = this.keyStore.get( key );
344                if ( ded != null )
345                {
346                    object = this.dataFile.read( ded );
347                }
348            } finally {
349                storageLock.readLock().unlock();
350            }
351
352        }
353        catch ( IOException ioe )
354        {
355            log.error( logCacheName + "Failure getting from disk--IOException, key = " + key, ioe );
356            reset();
357        }
358        catch ( Exception e )
359        {
360            log.error( logCacheName + "Failure getting from disk, key = " + key, e );
361        }
362        return object;
363    }
364
365    /**
366     * Writes an element to disk. The program flow is as follows:
367     * <ol>
368     * <li>Acquire write lock.</li> <li>See id an item exists for this key.</li> <li>If an item
369     * already exists, add its blocks to the remove list.</li> <li>Have the Block disk write the
370     * item.</li> <li>Create a descriptor and add it to the key map.</li> <li>Release the write
371     * lock.</li>
372     * </ol>
373     * @param element
374     * @see org.apache.commons.jcs.auxiliary.disk.AbstractDiskCache#update(ICacheElement)
375     */
376    @Override
377    protected void processUpdate( ICacheElement<K, V> element )
378    {
379        if ( !isAlive() )
380        {
381            if ( log.isDebugEnabled() )
382            {
383                log.debug( logCacheName + "No longer alive; aborting put of key = " + element.getKey() );
384            }
385            return;
386        }
387
388        int[] old = null;
389
390        // make sure this only locks for one particular cache region
391        storageLock.writeLock().lock();
392
393        try
394        {
395            old = this.keyStore.get( element.getKey() );
396
397            if ( old != null )
398            {
399                this.dataFile.freeBlocks( old );
400            }
401
402            int[] blocks = this.dataFile.write( element );
403
404            this.keyStore.put( element.getKey(), blocks );
405
406            if ( log.isDebugEnabled() )
407            {
408                log.debug( logCacheName + "Put to file [" + fileName + "] key [" + element.getKey() + "]" );
409            }
410        }
411        catch ( IOException e )
412        {
413            log.error( logCacheName + "Failure updating element, key: " + element.getKey() + " old: " + Arrays.toString(old), e );
414        }
415        finally
416        {
417            storageLock.writeLock().unlock();
418        }
419
420        if ( log.isDebugEnabled() )
421        {
422            log.debug( logCacheName + "Storing element on disk, key: " + element.getKey() );
423        }
424    }
425
426    /**
427     * Returns true if the removal was successful; or false if there is nothing to remove. Current
428     * implementation always result in a disk orphan.
429     * <p>
430     * @param key
431     * @return true if removed anything
432     * @see org.apache.commons.jcs.auxiliary.disk.AbstractDiskCache#remove(Object)
433     */
434    @Override
435    protected boolean processRemove( K key )
436    {
437        if ( !isAlive() )
438        {
439            if ( log.isDebugEnabled() )
440            {
441                log.debug( logCacheName + "No longer alive so returning false for key = " + key );
442            }
443            return false;
444        }
445
446        boolean reset = false;
447        boolean removed = false;
448
449        storageLock.writeLock().lock();
450
451        try
452        {
453            if (key instanceof String && key.toString().endsWith(CacheConstants.NAME_COMPONENT_DELIMITER))
454            {
455                removed = performPartialKeyRemoval((String) key);
456            }
457            else if (key instanceof GroupAttrName && ((GroupAttrName<?>) key).attrName == null)
458            {
459                removed = performGroupRemoval(((GroupAttrName<?>) key).groupId);
460            }
461            else
462            {
463                removed = performSingleKeyRemoval(key);
464            }
465        }
466        catch ( Exception e )
467        {
468            log.error( logCacheName + "Problem removing element.", e );
469            reset = true;
470        }
471        finally
472        {
473            storageLock.writeLock().unlock();
474        }
475
476        if ( reset )
477        {
478            reset();
479        }
480
481        return removed;
482    }
483
484    /**
485     * Remove all elements from the group. This does not use the iterator to remove. It builds a
486     * list of group elements and then removes them one by one.
487     * <p>
488     * This operates under a lock obtained in doRemove().
489     * <p>
490     *
491     * @param key
492     * @return true if an element was removed
493     */
494    private boolean performGroupRemoval(GroupId key)
495    {
496        boolean removed = false;
497
498        // remove all keys of the same name group.
499        List<K> itemsToRemove = new LinkedList<K>();
500
501        // remove all keys of the same name hierarchy.
502        for (K k : keyStore.keySet())
503        {
504            if (k instanceof GroupAttrName && ((GroupAttrName<?>) k).groupId.equals(key))
505            {
506                itemsToRemove.add(k);
507            }
508        }
509
510        // remove matches.
511        for (K fullKey : itemsToRemove)
512        {
513            // Don't add to recycle bin here
514            // https://issues.apache.org/jira/browse/JCS-67
515            performSingleKeyRemoval(fullKey);
516            removed = true;
517            // TODO this needs to update the remove count separately
518        }
519
520        return removed;
521    }
522
523    /**
524     * Iterates over the keyset. Builds a list of matches. Removes all the keys in the list. Does
525     * not remove via the iterator, since the map impl may not support it.
526     * <p>
527     * This operates under a lock obtained in doRemove().
528     * <p>
529     *
530     * @param key
531     * @return true if there was a match
532     */
533    private boolean performPartialKeyRemoval(String key)
534    {
535        boolean removed = false;
536
537        // remove all keys of the same name hierarchy.
538        List<K> itemsToRemove = new LinkedList<K>();
539
540        for (K k : keyStore.keySet())
541        {
542            if (k instanceof String && k.toString().startsWith(key))
543            {
544                itemsToRemove.add(k);
545            }
546        }
547
548        // remove matches.
549        for (K fullKey : itemsToRemove)
550        {
551            // Don't add to recycle bin here
552            // https://issues.apache.org/jira/browse/JCS-67
553            performSingleKeyRemoval(fullKey);
554            removed = true;
555            // TODO this needs to update the remove count separately
556        }
557
558        return removed;
559    }
560
561
562        private boolean performSingleKeyRemoval(K key) {
563                boolean removed;
564                // remove single item.
565                int[] ded = this.keyStore.remove( key );
566                removed = ded != null;
567                if ( removed )
568                {
569                    this.dataFile.freeBlocks( ded );
570                }
571
572                if ( log.isDebugEnabled() )
573                {
574                    log.debug( logCacheName + "Disk removal: Removed from key hash, key [" + key + "] removed = "
575                        + removed );
576                }
577                return removed;
578        }
579
580    /**
581     * Resets the keyfile, the disk file, and the memory key map.
582     * <p>
583     * @see org.apache.commons.jcs.auxiliary.disk.AbstractDiskCache#removeAll()
584     */
585    @Override
586    protected void processRemoveAll()
587    {
588        reset();
589    }
590
591    /**
592     * Dispose of the disk cache in a background thread. Joins against this thread to put a cap on
593     * the disposal time.
594     * <p>
595     * TODO make dispose window configurable.
596     */
597    @Override
598    public void processDispose()
599    {
600        Runnable disR = new Runnable()
601        {
602            @Override
603            public void run()
604            {
605                try
606                {
607                    disposeInternal();
608                }
609                catch ( InterruptedException e )
610                {
611                    log.warn( "Interrupted while diposing." );
612                }
613            }
614        };
615        Thread t = new Thread( disR, "BlockDiskCache-DisposalThread" );
616        t.start();
617        // wait up to 60 seconds for dispose and then quit if not done.
618        try
619        {
620            t.join( 60 * 1000 );
621        }
622        catch ( InterruptedException ex )
623        {
624            log.error( logCacheName + "Interrupted while waiting for disposal thread to finish.", ex );
625        }
626    }
627
628    /**
629     * Internal method that handles the disposal.
630     * @throws InterruptedException
631     */
632    protected void disposeInternal()
633        throws InterruptedException
634    {
635        if ( !isAlive() )
636        {
637            log.error( logCacheName + "Not alive and dispose was called, filename: " + fileName );
638            return;
639        }
640        storageLock.writeLock().lock();
641        try
642        {
643            // Prevents any interaction with the cache while we're shutting down.
644            setAlive(false);
645            this.keyStore.saveKeys();
646
647            if (future != null)
648            {
649                future.cancel(true);
650            }
651
652            try
653            {
654                if ( log.isDebugEnabled() )
655                {
656                    log.debug( logCacheName + "Closing files, base filename: " + fileName );
657                }
658                dataFile.close();
659                // dataFile = null;
660
661                // TOD make a close
662                // keyFile.close();
663                // keyFile = null;
664            }
665            catch ( IOException e )
666            {
667                log.error( logCacheName + "Failure closing files in dispose, filename: " + fileName, e );
668            }
669        }
670        finally
671        {
672            storageLock.writeLock().unlock();
673        }
674
675        if ( log.isInfoEnabled() )
676        {
677            log.info( logCacheName + "Shutdown complete." );
678        }
679    }
680
681    /**
682     * Returns the attributes.
683     * <p>
684     * @see org.apache.commons.jcs.auxiliary.AuxiliaryCache#getAuxiliaryCacheAttributes()
685     */
686    @Override
687    public AuxiliaryCacheAttributes getAuxiliaryCacheAttributes()
688    {
689        return this.blockDiskCacheAttributes;
690    }
691
692    /**
693     * Reset effectively clears the disk cache, creating new files, recyclebins, and keymaps.
694     * <p>
695     * It can be used to handle errors by last resort, force content update, or removeall.
696     */
697    private void reset()
698    {
699        if ( log.isWarnEnabled() )
700        {
701            log.warn( logCacheName + "Resetting cache" );
702        }
703
704        try
705        {
706            storageLock.writeLock().lock();
707
708            this.keyStore.reset();
709
710            if ( dataFile != null )
711            {
712                dataFile.reset();
713            }
714        }
715        catch ( IOException e )
716        {
717            log.error( logCacheName + "Failure resetting state", e );
718        }
719        finally
720        {
721            storageLock.writeLock().unlock();
722        }
723    }
724
725    /**
726     * Add these blocks to the emptyBlock list.
727     * <p>
728     * @param blocksToFree
729     */
730    protected void freeBlocks( int[] blocksToFree )
731    {
732        this.dataFile.freeBlocks( blocksToFree );
733    }
734
735    /**
736     * Returns info about the disk cache.
737     * <p>
738     * @see org.apache.commons.jcs.auxiliary.AuxiliaryCache#getStatistics()
739     */
740    @Override
741    public IStats getStatistics()
742    {
743        IStats stats = new Stats();
744        stats.setTypeName( "Block Disk Cache" );
745
746        ArrayList<IStatElement<?>> elems = new ArrayList<IStatElement<?>>();
747
748        elems.add(new StatElement<Boolean>( "Is Alive", Boolean.valueOf(isAlive()) ) );
749        elems.add(new StatElement<Integer>( "Key Map Size", Integer.valueOf(this.keyStore.size()) ) );
750
751        if (this.dataFile != null)
752        {
753            try
754            {
755                elems.add(new StatElement<Long>( "Data File Length", Long.valueOf(this.dataFile.length()) ) );
756            }
757            catch ( IOException e )
758            {
759                log.error( e );
760            }
761
762            elems.add(new StatElement<Integer>( "Block Size Bytes",
763                    Integer.valueOf(this.dataFile.getBlockSizeBytes()) ) );
764            elems.add(new StatElement<Integer>( "Number Of Blocks",
765                    Integer.valueOf(this.dataFile.getNumberOfBlocks()) ) );
766            elems.add(new StatElement<Long>( "Average Put Size Bytes",
767                    Long.valueOf(this.dataFile.getAveragePutSizeBytes()) ) );
768            elems.add(new StatElement<Integer>( "Empty Blocks",
769                    Integer.valueOf(this.dataFile.getEmptyBlocks()) ) );
770        }
771
772        // get the stats from the super too
773        IStats sStats = super.getStatistics();
774        elems.addAll(sStats.getStatElements());
775
776        stats.setStatElements( elems );
777
778        return stats;
779    }
780
781    /**
782     * This is used by the event logging.
783     * <p>
784     * @return the location of the disk, either path or ip.
785     */
786    @Override
787    protected String getDiskLocation()
788    {
789        return dataFile.getFilePath();
790    }
791}