001package org.apache.commons.jcs.auxiliary.disk;
002
003/*
004 * Licensed to the Apache Software Foundation (ASF) under one
005 * or more contributor license agreements.  See the NOTICE file
006 * distributed with this work for additional information
007 * regarding copyright ownership.  The ASF licenses this file
008 * to you under the Apache License, Version 2.0 (the
009 * "License"); you may not use this file except in compliance
010 * with the License.  You may obtain a copy of the License at
011 *
012 *   http://www.apache.org/licenses/LICENSE-2.0
013 *
014 * Unless required by applicable law or agreed to in writing,
015 * software distributed under the License is distributed on an
016 * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
017 * KIND, either express or implied.  See the License for the
018 * specific language governing permissions and limitations
019 * under the License.
020 */
021
022import java.io.IOException;
023import java.util.ArrayList;
024import java.util.HashMap;
025import java.util.HashSet;
026import java.util.Map;
027import java.util.Set;
028import java.util.concurrent.locks.ReentrantReadWriteLock;
029
030import org.apache.commons.jcs.auxiliary.AbstractAuxiliaryCacheEventLogging;
031import org.apache.commons.jcs.auxiliary.AuxiliaryCache;
032import org.apache.commons.jcs.auxiliary.disk.behavior.IDiskCacheAttributes;
033import org.apache.commons.jcs.engine.CacheEventQueueFactory;
034import org.apache.commons.jcs.engine.CacheInfo;
035import org.apache.commons.jcs.engine.CacheStatus;
036import org.apache.commons.jcs.engine.behavior.ICache;
037import org.apache.commons.jcs.engine.behavior.ICacheElement;
038import org.apache.commons.jcs.engine.behavior.ICacheEventQueue;
039import org.apache.commons.jcs.engine.behavior.ICacheListener;
040import org.apache.commons.jcs.engine.stats.StatElement;
041import org.apache.commons.jcs.engine.stats.Stats;
042import org.apache.commons.jcs.engine.stats.behavior.IStatElement;
043import org.apache.commons.jcs.engine.stats.behavior.IStats;
044import org.apache.commons.jcs.utils.struct.LRUMap;
045import org.apache.commons.logging.Log;
046import org.apache.commons.logging.LogFactory;
047
048/**
049 * Abstract class providing a base implementation of a disk cache, which can be easily extended to
050 * implement a disk cache for a specific persistence mechanism.
051 *
052 * When implementing the abstract methods note that while this base class handles most things, it
053 * does not acquire or release any locks. Implementations should do so as necessary. This is mainly
054 * done to minimize the time spent in critical sections.
055 *
056 * Error handling in this class needs to be addressed. Currently if an exception is thrown by the
057 * persistence mechanism, this class destroys the event queue. Should it also destroy purgatory?
058 * Should it dispose itself?
059 */
060public abstract class AbstractDiskCache<K, V>
061    extends AbstractAuxiliaryCacheEventLogging<K, V>
062{
063    /** The logger */
064    private static final Log log = LogFactory.getLog( AbstractDiskCache.class );
065
066    /** Generic disk cache attributes */
067    private IDiskCacheAttributes diskCacheAttributes = null;
068
069    /**
070     * Map where elements are stored between being added to this cache and actually spooled to disk.
071     * This allows puts to the disk cache to return quickly, and the more expensive operation of
072     * serializing the elements to persistent storage queued for later.
073     *
074     * If the elements are pulled into the memory cache while the are still in purgatory, writing to
075     * disk can be canceled.
076     */
077    private Map<K, PurgatoryElement<K, V>> purgatory;
078
079    /**
080     * The CacheEventQueue where changes will be queued for asynchronous updating of the persistent
081     * storage.
082     */
083    private ICacheEventQueue<K, V> cacheEventQueue;
084
085    /**
086     * Indicates whether the cache is 'alive': initialized, but not yet disposed. Child classes must
087     * set this to true.
088     */
089    private boolean alive = false;
090
091    /** Every cache will have a name, subclasses must set this when they are initialized. */
092    private String cacheName;
093
094    /** DEBUG: Keeps a count of the number of purgatory hits for debug messages */
095    private int purgHits = 0;
096
097    /**
098     * We lock here, so that we cannot get an update after a remove all. an individual removal locks
099     * the item.
100     */
101    private final ReentrantReadWriteLock removeAllLock = new ReentrantReadWriteLock();
102
103    // ----------------------------------------------------------- constructors
104
105    /**
106     * Construct the abstract disk cache, create event queues and purgatory. Child classes should
107     * set the alive flag to true after they are initialized.
108     *
109     * @param attr
110     */
111    protected AbstractDiskCache( IDiskCacheAttributes attr )
112    {
113        this.diskCacheAttributes = attr;
114        this.cacheName = attr.getCacheName();
115
116        // create queue
117        CacheEventQueueFactory<K, V> fact = new CacheEventQueueFactory<K, V>();
118        this.cacheEventQueue = fact.createCacheEventQueue( new MyCacheListener(), CacheInfo.listenerId, cacheName,
119                                                           diskCacheAttributes.getEventQueuePoolName(),
120                                                           diskCacheAttributes.getEventQueueType() );
121
122        // create purgatory
123        initPurgatory();
124    }
125
126    /**
127     * @return true if the cache is alive
128     */
129    public boolean isAlive()
130    {
131        return alive;
132    }
133
134    /**
135     * @param alive set the alive status
136     */
137    public void setAlive(boolean alive)
138    {
139        this.alive = alive;
140    }
141
142    /**
143     * Purgatory size of -1 means to use a HashMap with no size limit. Anything greater will use an
144     * LRU map of some sort.
145     *
146     * TODO Currently setting this to 0 will cause nothing to be put to disk, since it will assume
147     *       that if an item is not in purgatory, then it must have been plucked. We should make 0
148     *       work, a way to not use purgatory.
149     */
150    private void initPurgatory()
151    {
152        // we need this so we can stop the updates from happening after a
153        // removeall
154        removeAllLock.writeLock().lock();
155
156        try
157        {
158            synchronized (this)
159            {
160                if ( diskCacheAttributes.getMaxPurgatorySize() >= 0 )
161                {
162                    purgatory = new LRUMap<K, PurgatoryElement<K, V>>( diskCacheAttributes.getMaxPurgatorySize() );
163                }
164                else
165                {
166                    purgatory = new HashMap<K, PurgatoryElement<K, V>>();
167                }
168            }
169        }
170        finally
171        {
172            removeAllLock.writeLock().unlock();
173        }
174    }
175
176    // ------------------------------------------------------- interface ICache
177
178    /**
179     * Adds the provided element to the cache. Element will be added to purgatory, and then queued
180     * for later writing to the serialized storage mechanism.
181     *
182     * An update results in a put event being created. The put event will call the handlePut method
183     * defined here. The handlePut method calls the implemented doPut on the child.
184     *
185     * @param cacheElement
186     * @throws IOException
187     * @see org.apache.commons.jcs.engine.behavior.ICache#update
188     */
189    @Override
190    public final void update( ICacheElement<K, V> cacheElement )
191        throws IOException
192    {
193        if ( log.isDebugEnabled() )
194        {
195            log.debug( "Putting element in purgatory, cacheName: " + cacheName + ", key: " + cacheElement.getKey() );
196        }
197
198        try
199        {
200            // Wrap the CacheElement in a PurgatoryElement
201            PurgatoryElement<K, V> pe = new PurgatoryElement<K, V>( cacheElement );
202
203            // Indicates the the element is eligible to be spooled to disk,
204            // this will remain true unless the item is pulled back into
205            // memory.
206            pe.setSpoolable( true );
207
208            // Add the element to purgatory
209            synchronized ( purgatory )
210            {
211                purgatory.put( pe.getKey(), pe );
212            }
213
214            // Queue element for serialization
215            cacheEventQueue.addPutEvent( pe );
216        }
217        catch ( IOException ex )
218        {
219            log.error( "Problem adding put event to queue.", ex );
220
221            cacheEventQueue.destroy();
222        }
223    }
224
225    /**
226     * Check to see if the item is in purgatory. If so, return it. If not, check to see if we have
227     * it on disk.
228     *
229     * @param key
230     * @return ICacheElement&lt;K, V&gt; or null
231     * @see AuxiliaryCache#get
232     */
233    @Override
234    public final ICacheElement<K, V> get( K key )
235    {
236        // If not alive, always return null.
237
238        if ( !alive )
239        {
240            if ( log.isDebugEnabled() )
241            {
242                log.debug( "get was called, but the disk cache is not alive." );
243            }
244            return null;
245        }
246
247        PurgatoryElement<K, V> pe = null;
248        synchronized ( purgatory )
249        {
250            pe = purgatory.get( key );
251        }
252
253        // If the element was found in purgatory
254        if ( pe != null )
255        {
256            purgHits++;
257
258            if ( log.isDebugEnabled() )
259            {
260                if ( purgHits % 100 == 0 )
261                {
262                    log.debug( "Purgatory hits = " + purgHits );
263                }
264            }
265
266            // Since the element will go back to the memory cache, we could set
267            // spoolable to false, which will prevent the queue listener from
268            // serializing the element. This would not match the disk cache
269            // behavior and the behavior of other auxiliaries. Gets never remove
270            // items from auxiliaries.
271            // Beyond consistency, the items should stay in purgatory and get
272            // spooled since the mem cache may be set to 0. If an item is
273            // active, it will keep getting put into purgatory and removed. The
274            // CompositeCache now does not put an item to memory from disk if
275            // the size is 0.
276            // Do not set spoolable to false. Just let it go to disk. This
277            // will allow the memory size = 0 setting to work well.
278
279            if ( log.isDebugEnabled() )
280            {
281                log.debug( "Found element in purgatory, cacheName: " + cacheName + ", key: " + key );
282            }
283
284            return pe.getCacheElement();
285        }
286
287        // If we reach this point, element was not found in purgatory, so get
288        // it from the cache.
289        try
290        {
291            return doGet( key );
292        }
293        catch ( Exception e )
294        {
295            log.error( e );
296
297            cacheEventQueue.destroy();
298        }
299
300        return null;
301    }
302
303    /**
304     * Gets items from the cache matching the given pattern. Items from memory will replace those
305     * from remote sources.
306     *
307     * This only works with string keys. It's too expensive to do a toString on every key.
308     *
309     * Auxiliaries will do their best to handle simple expressions. For instance, the JDBC disk
310     * cache will convert * to % and . to _
311     *
312     * @param pattern
313     * @return a map of K key to ICacheElement&lt;K, V&gt; element, or an empty map if there is no
314     *         data matching the pattern.
315     * @throws IOException
316     */
317    @Override
318    public Map<K, ICacheElement<K, V>> getMatching( String pattern )
319        throws IOException
320    {
321        // Get the keys from purgatory
322        Set<K> keyArray = null;
323
324        // this avoids locking purgatory, but it uses more memory
325        synchronized ( purgatory )
326        {
327            keyArray = new HashSet<K>(purgatory.keySet());
328        }
329
330        Set<K> matchingKeys = getKeyMatcher().getMatchingKeysFromArray( pattern, keyArray );
331
332        // call getMultiple with the set
333        Map<K, ICacheElement<K, V>> result = processGetMultiple( matchingKeys );
334
335        // Get the keys from disk
336        Map<K, ICacheElement<K, V>> diskMatches = doGetMatching( pattern );
337
338        result.putAll( diskMatches );
339
340        return result;
341    }
342
343    /**
344     * Gets multiple items from the cache based on the given set of keys.
345     *
346     * @param keys
347     * @return a map of K key to ICacheElement&lt;K, V&gt; element, or an empty map if there is no
348     *         data in cache for any of these keys
349     */
350    @Override
351    public Map<K, ICacheElement<K, V>> processGetMultiple(Set<K> keys)
352    {
353        Map<K, ICacheElement<K, V>> elements = new HashMap<K, ICacheElement<K, V>>();
354
355        if ( keys != null && !keys.isEmpty() )
356        {
357            for (K key : keys)
358            {
359                ICacheElement<K, V> element = get( key );
360
361                if ( element != null )
362                {
363                    elements.put( key, element );
364                }
365            }
366        }
367
368        return elements;
369    }
370
371    /**
372     * The keys in the cache.
373     *
374     * @see org.apache.commons.jcs.auxiliary.AuxiliaryCache#getKeySet()
375     */
376    @Override
377    public abstract Set<K> getKeySet() throws IOException;
378
379    /**
380     * Removes are not queued. A call to remove is immediate.
381     *
382     * @param key
383     * @return whether the item was present to be removed.
384     * @throws IOException
385     * @see org.apache.commons.jcs.engine.behavior.ICache#remove
386     */
387    @Override
388    public final boolean remove( K key )
389        throws IOException
390    {
391        PurgatoryElement<K, V> pe = null;
392
393        synchronized ( purgatory )
394        {
395            // I'm getting the object, so I can lock on the element
396            // Remove element from purgatory if it is there
397            pe = purgatory.get( key );
398        }
399
400        if ( pe != null )
401        {
402            synchronized ( pe.getCacheElement() )
403            {
404                synchronized ( purgatory )
405                {
406                    purgatory.remove( key );
407                }
408
409                // no way to remove from queue, just make sure it doesn't get on
410                // disk and then removed right afterwards
411                pe.setSpoolable( false );
412
413                // Remove from persistent store immediately
414                doRemove( key );
415            }
416        }
417        else
418        {
419            // Remove from persistent store immediately
420            doRemove( key );
421        }
422
423        return false;
424    }
425
426    /**
427     * @throws IOException
428     * @see org.apache.commons.jcs.engine.behavior.ICache#removeAll
429     */
430    @Override
431    public final void removeAll()
432        throws IOException
433    {
434        if ( this.diskCacheAttributes.isAllowRemoveAll() )
435        {
436            // Replace purgatory with a new empty hashtable
437            initPurgatory();
438
439            // Remove all from persistent store immediately
440            doRemoveAll();
441        }
442        else
443        {
444            if ( log.isInfoEnabled() )
445            {
446                log.info( "RemoveAll was requested but the request was not fulfilled: allowRemoveAll is set to false." );
447            }
448        }
449    }
450
451    /**
452     * Adds a dispose request to the disk cache.
453     *
454     * Disposal proceeds in several steps.
455     * <ol>
456     * <li>Prior to this call the Composite cache dumped the memory into the disk cache. If it is
457     * large then we need to wait for the event queue to finish.</li>
458     * <li>Wait until the event queue is empty of until the configured ShutdownSpoolTimeLimit is
459     * reached.</li>
460     * <li>Call doDispose on the concrete impl.</li>
461     * </ol>
462     * @throws IOException
463     */
464    @Override
465    public final void dispose()
466        throws IOException
467    {
468        Runnable disR = new Runnable()
469        {
470            @Override
471            public void run()
472            {
473                boolean keepGoing = true;
474                // long total = 0;
475                long interval = 100;
476                while ( keepGoing )
477                {
478                    keepGoing = !cacheEventQueue.isEmpty();
479                    try
480                    {
481                        Thread.sleep( interval );
482                        // total += interval;
483                        // log.info( "total = " + total );
484                    }
485                    catch ( InterruptedException e )
486                    {
487                        break;
488                    }
489                }
490                log.info( "No longer waiting for event queue to finish: " + cacheEventQueue.getStatistics() );
491            }
492        };
493        Thread t = new Thread( disR );
494        t.start();
495        // wait up to 60 seconds for dispose and then quit if not done.
496        try
497        {
498            t.join( this.diskCacheAttributes.getShutdownSpoolTimeLimit() * 1000L );
499        }
500        catch ( InterruptedException ex )
501        {
502            log.error( "The Shutdown Spool Process was interrupted.", ex );
503        }
504
505        log.info( "In dispose, destroying event queue." );
506        // This stops the processor thread.
507        cacheEventQueue.destroy();
508
509        // Invoke any implementation specific disposal code
510        // need to handle the disposal first.
511        doDispose();
512
513        alive = false;
514    }
515
516    /**
517     * @return the region name.
518     * @see ICache#getCacheName
519     */
520    @Override
521    public String getCacheName()
522    {
523        return cacheName;
524    }
525
526    /**
527     * Gets basic stats for the abstract disk cache.
528     *
529     * @return String
530     */
531    @Override
532    public String getStats()
533    {
534        return getStatistics().toString();
535    }
536
537    /**
538     * Returns semi-structured data.
539     *
540     * @see org.apache.commons.jcs.auxiliary.AuxiliaryCache#getStatistics()
541     */
542    @Override
543    public IStats getStatistics()
544    {
545        IStats stats = new Stats();
546        stats.setTypeName( "Abstract Disk Cache" );
547
548        ArrayList<IStatElement<?>> elems = new ArrayList<IStatElement<?>>();
549
550        elems.add(new StatElement<Integer>( "Purgatory Hits", Integer.valueOf(purgHits) ) );
551        elems.add(new StatElement<Integer>( "Purgatory Size", Integer.valueOf(purgatory.size()) ) );
552
553        // get the stats from the event queue too
554        IStats eqStats = this.cacheEventQueue.getStatistics();
555        elems.addAll(eqStats.getStatElements());
556
557        stats.setStatElements( elems );
558
559        return stats;
560    }
561
562    /**
563     * @return the status -- alive or disposed from CacheConstants
564     * @see ICache#getStatus
565     */
566    @Override
567    public CacheStatus getStatus()
568    {
569        return ( alive ? CacheStatus.ALIVE : CacheStatus.DISPOSED );
570    }
571
572    /**
573     * Size cannot be determined without knowledge of the cache implementation, so subclasses will
574     * need to implement this method.
575     *
576     * @return the number of items.
577     * @see ICache#getSize
578     */
579    @Override
580    public abstract int getSize();
581
582    /**
583     * @see org.apache.commons.jcs.engine.behavior.ICacheType#getCacheType
584     * @return Always returns DISK_CACHE since subclasses should all be of that type.
585     */
586    @Override
587    public CacheType getCacheType()
588    {
589        return CacheType.DISK_CACHE;
590    }
591
592    /**
593     * Cache that implements the CacheListener interface, and calls appropriate methods in its
594     * parent class.
595     */
596    protected class MyCacheListener
597        implements ICacheListener<K, V>
598    {
599        /** Id of the listener */
600        private long listenerId = 0;
601
602        /**
603         * @return cacheElement.getElementAttributes();
604         * @throws IOException
605         * @see ICacheListener#getListenerId
606         */
607        @Override
608        public long getListenerId()
609            throws IOException
610        {
611            return this.listenerId;
612        }
613
614        /**
615         * @param id
616         * @throws IOException
617         * @see ICacheListener#setListenerId
618         */
619        @Override
620        public void setListenerId( long id )
621            throws IOException
622        {
623            this.listenerId = id;
624        }
625
626        /**
627         * @param element
628         * @throws IOException
629         * @see ICacheListener#handlePut NOTE: This checks if the element is a puratory element and
630         *      behaves differently depending. However since we have control over how elements are
631         *      added to the cache event queue, that may not be needed ( they are always
632         *      PurgatoryElements ).
633         */
634        @Override
635        public void handlePut( ICacheElement<K, V> element )
636            throws IOException
637        {
638            if ( alive )
639            {
640                // If the element is a PurgatoryElement<K, V> we must check to see
641                // if it is still spoolable, and remove it from purgatory.
642                if ( element instanceof PurgatoryElement )
643                {
644                    PurgatoryElement<K, V> pe = (PurgatoryElement<K, V>) element;
645
646                    synchronized ( pe.getCacheElement() )
647                    {
648                        // TODO consider a timeout.
649                        // we need this so that we can have multiple update
650                        // threads and still have removeAll requests come in that
651                        // always win
652                        removeAllLock.readLock().lock();
653
654                        try
655                        {
656                            // TODO consider changing purgatory sync
657                            // String keyAsString = element.getKey().toString();
658                            synchronized ( purgatory )
659                            {
660                                // If the element has already been removed from
661                                // purgatory do nothing
662                                if ( !purgatory.containsKey( pe.getKey() ) )
663                                {
664                                    return;
665                                }
666
667                                element = pe.getCacheElement();
668                            }
669
670                            // I took this out of the purgatory sync block.
671                            // If the element is still eligible, spool it.
672                            if ( pe.isSpoolable() )
673                            {
674                                doUpdate( element );
675                            }
676                        }
677                        finally
678                        {
679                            removeAllLock.readLock().unlock();
680                        }
681
682                        synchronized ( purgatory )
683                        {
684                            // After the update has completed, it is safe to
685                            // remove the element from purgatory.
686                            purgatory.remove( element.getKey() );
687                        }
688                    }
689                }
690                else
691                {
692                    // call the child's implementation
693                    doUpdate( element );
694                }
695            }
696            else
697            {
698                /*
699                 * The cache is not alive, hence the element should be removed from purgatory. All
700                 * elements should be removed eventually. Perhaps, the alive check should have been
701                 * done before it went in the queue. This block handles the case where the disk
702                 * cache fails during normal operations.
703                 */
704                synchronized ( purgatory )
705                {
706                    purgatory.remove( element.getKey() );
707                }
708            }
709        }
710
711        /**
712         * @param cacheName
713         * @param key
714         * @throws IOException
715         * @see ICacheListener#handleRemove
716         */
717        @Override
718        public void handleRemove( String cacheName, K key )
719            throws IOException
720        {
721            if ( alive )
722            {
723                if ( doRemove( key ) )
724                {
725                    log.debug( "Element removed, key: " + key );
726                }
727            }
728        }
729
730        /**
731         * @param cacheName
732         * @throws IOException
733         * @see ICacheListener#handleRemoveAll
734         */
735        @Override
736        public void handleRemoveAll( String cacheName )
737            throws IOException
738        {
739            if ( alive )
740            {
741                doRemoveAll();
742            }
743        }
744
745        /**
746         * @param cacheName
747         * @throws IOException
748         * @see ICacheListener#handleDispose
749         */
750        @Override
751        public void handleDispose( String cacheName )
752            throws IOException
753        {
754            if ( alive )
755            {
756                doDispose();
757            }
758        }
759    }
760
761    /**
762     * Before the event logging layer, the subclasses implemented the do* methods. Now the do*
763     * methods call the *WithEventLogging method on the super. The *WithEventLogging methods call
764     * the abstract process* methods. The children implement the process methods.
765     *
766     * ex. doGet calls getWithEventLogging, which calls processGet
767     */
768
769    /**
770     * Get a value from the persistent store.
771     *
772     * Before the event logging layer, the subclasses implemented the do* methods. Now the do*
773     * methods call the *EventLogging method on the super. The *WithEventLogging methods call the
774     * abstract process* methods. The children implement the process methods.
775     *
776     * @param key Key to locate value for.
777     * @return An object matching key, or null.
778     * @throws IOException
779     */
780    protected final ICacheElement<K, V> doGet( K key )
781        throws IOException
782    {
783        return super.getWithEventLogging( key );
784    }
785
786    /**
787     * Get a value from the persistent store.
788     *
789     * Before the event logging layer, the subclasses implemented the do* methods. Now the do*
790     * methods call the *EventLogging method on the super. The *WithEventLogging methods call the
791     * abstract process* methods. The children implement the process methods.
792     *
793     * @param pattern Used to match keys.
794     * @return A map of matches..
795     * @throws IOException
796     */
797    protected final Map<K, ICacheElement<K, V>> doGetMatching( String pattern )
798        throws IOException
799    {
800        return super.getMatchingWithEventLogging( pattern );
801    }
802
803    /**
804     * Add a cache element to the persistent store.
805     *
806     * Before the event logging layer, the subclasses implemented the do* methods. Now the do*
807     * methods call the *EventLogging method on the super. The *WithEventLogging methods call the
808     * abstract process* methods. The children implement the process methods.
809     *
810     * @param cacheElement
811     * @throws IOException
812     */
813    protected final void doUpdate( ICacheElement<K, V> cacheElement )
814        throws IOException
815    {
816        super.updateWithEventLogging( cacheElement );
817    }
818
819    /**
820     * Remove an object from the persistent store if found.
821     *
822     * Before the event logging layer, the subclasses implemented the do* methods. Now the do*
823     * methods call the *EventLogging method on the super. The *WithEventLogging methods call the
824     * abstract process* methods. The children implement the process methods.
825     *
826     * @param key Key of object to remove.
827     * @return whether or no the item was present when removed
828     * @throws IOException
829     */
830    protected final boolean doRemove( K key )
831        throws IOException
832    {
833        return super.removeWithEventLogging( key );
834    }
835
836    /**
837     * Remove all objects from the persistent store.
838     *
839     * Before the event logging layer, the subclasses implemented the do* methods. Now the do*
840     * methods call the *EventLogging method on the super. The *WithEventLogging methods call the
841     * abstract process* methods. The children implement the process methods.
842     *
843     * @throws IOException
844     */
845    protected final void doRemoveAll()
846        throws IOException
847    {
848        super.removeAllWithEventLogging();
849    }
850
851    /**
852     * Dispose of the persistent store. Note that disposal of purgatory and setting alive to false
853     * does NOT need to be done by this method.
854     *
855     * Before the event logging layer, the subclasses implemented the do* methods. Now the do*
856     * methods call the *EventLogging method on the super. The *WithEventLogging methods call the
857     * abstract process* methods. The children implement the process methods.
858     *
859     * @throws IOException
860     */
861    protected final void doDispose()
862        throws IOException
863    {
864        super.disposeWithEventLogging();
865    }
866
867    /**
868     * Gets the extra info for the event log.
869     *
870     * @return disk location
871     */
872    @Override
873    public String getEventLoggingExtraInfo()
874    {
875        return getDiskLocation();
876    }
877
878    /**
879     * This is used by the event logging.
880     *
881     * @return the location of the disk, either path or ip.
882     */
883    protected abstract String getDiskLocation();
884}