View Javadoc
1   package org.apache.commons.jcs3.engine.memory;
2   
3   /*
4    * Licensed to the Apache Software Foundation (ASF) under one
5    * or more contributor license agreements.  See the NOTICE file
6    * distributed with this work for additional information
7    * regarding copyright ownership.  The ASF licenses this file
8    * to you under the Apache License, Version 2.0 (the
9    * "License"); you may not use this file except in compliance
10   * with the License.  You may obtain a copy of the License at
11   *
12   *   http://www.apache.org/licenses/LICENSE-2.0
13   *
14   * Unless required by applicable law or agreed to in writing,
15   * software distributed under the License is distributed on an
16   * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
17   * KIND, either express or implied.  See the License for the
18   * specific language governing permissions and limitations
19   * under the License.
20   */
21  
22  import java.io.IOException;
23  import java.util.List;
24  import java.util.concurrent.ConcurrentHashMap;
25  import java.util.concurrent.ConcurrentMap;
26  
27  import org.apache.commons.jcs3.engine.behavior.ICacheElement;
28  import org.apache.commons.jcs3.engine.control.CompositeCache;
29  import org.apache.commons.jcs3.engine.control.group.GroupAttrName;
30  import org.apache.commons.jcs3.engine.memory.util.MemoryElementDescriptor;
31  import org.apache.commons.jcs3.engine.stats.StatElement;
32  import org.apache.commons.jcs3.engine.stats.behavior.IStatElement;
33  import org.apache.commons.jcs3.engine.stats.behavior.IStats;
34  import org.apache.commons.jcs3.log.Log;
35  import org.apache.commons.jcs3.log.LogManager;
36  import org.apache.commons.jcs3.utils.struct.DoubleLinkedList;
37  
38  /**
39   * This class contains methods that are common to memory caches using the double linked list, such
40   * as the LRU, MRU, FIFO, and LIFO caches.
41   * <p>
42   * Children can control the expiration algorithm by controlling the update and get. The last item in the list will be the one
43   * removed when the list fills. For instance LRU should more items to the front as they are used. FIFO should simply add new items
44   * to the front of the list.
45   */
46  public abstract class AbstractDoubleLinkedListMemoryCache<K, V> extends AbstractMemoryCache<K, V>
47  {
48      /** The logger. */
49      private static final Log log = LogManager.getLog(AbstractDoubleLinkedListMemoryCache.class);
50  
51      /** thread-safe double linked list for lru */
52      protected DoubleLinkedList<MemoryElementDescriptor<K, V>> list; // TODO privatise
53  
54      /**
55       * For post reflection creation initialization.
56       * <p>
57       *
58       * @param hub
59       */
60      @Override
61      public void initialize(final CompositeCache<K, V> hub)
62      {
63          super.initialize(hub);
64          list = new DoubleLinkedList<>();
65          log.info("initialized MemoryCache for {0}", this::getCacheName);
66      }
67  
68      /**
69       * This is called by super initialize.
70       *
71       * NOTE: should return a thread safe map
72       *
73       * <p>
74       *
75       * @return new ConcurrentHashMap()
76       */
77      @Override
78      public ConcurrentMap<K, MemoryElementDescriptor<K, V>> createMap()
79      {
80          return new ConcurrentHashMap<>();
81      }
82  
83      /**
84       * Calls the abstract method updateList.
85       * <p>
86       * If the max size is reached, an element will be put to disk.
87       * <p>
88       *
89       * @param ce
90       *            The cache element, or entry wrapper
91       * @throws IOException
92       */
93      @Override
94      public final void update(final ICacheElement<K, V> ce) throws IOException
95      {
96          putCnt.incrementAndGet();
97  
98          lock.lock();
99          try
100         {
101             final MemoryElementDescriptor<K, V> newNode = adjustListForUpdate(ce);
102 
103             // this should be synchronized if we were not using a ConcurrentHashMap
104             final K key = newNode.getCacheElement().getKey();
105             final MemoryElementDescriptor<K, V> oldNode = map.put(key, newNode);
106 
107             // If the node was the same as an existing node, remove it.
108             if (oldNode != null && key.equals(oldNode.getCacheElement().getKey()))
109             {
110                 list.remove(oldNode);
111             }
112         }
113         finally
114         {
115             lock.unlock();
116         }
117 
118         // If we are over the max spool some
119         spoolIfNeeded();
120     }
121 
122     /**
123      * Children implement this to control the cache expiration algorithm
124      * <p>
125      *
126      * @param ce
127      * @return MemoryElementDescriptor the new node
128      * @throws IOException
129      */
130     protected abstract MemoryElementDescriptor<K, V> adjustListForUpdate(ICacheElement<K, V> ce) throws IOException;
131 
132     /**
133      * If the max size has been reached, spool.
134      * <p>
135      *
136      * @throws Error
137      */
138     private void spoolIfNeeded() throws Error
139     {
140         final int size = map.size();
141         // If the element limit is reached, we need to spool
142 
143         if (size <= this.getCacheAttributes().getMaxObjects())
144         {
145             return;
146         }
147 
148         log.debug("In memory limit reached, spooling");
149 
150         // Write the last 'chunkSize' items to disk.
151         final int chunkSizeCorrected = Math.min(size, chunkSize);
152 
153         log.debug("About to spool to disk cache, map size: {0}, max objects: {1}, "
154                 + "maximum items to spool: {2}", () -> size,
155                 this.getCacheAttributes()::getMaxObjects,
156                 () -> chunkSizeCorrected);
157 
158         // The spool will put them in a disk event queue, so there is no
159         // need to pre-queue the queuing. This would be a bit wasteful
160         // and wouldn't save much time in this synchronous call.
161         lock.lock();
162 
163         try
164         {
165             freeElements(chunkSizeCorrected);
166 
167             // If this is out of the sync block it can detect a mismatch
168             // where there is none.
169             if (log.isDebugEnabled() && map.size() != list.size())
170             {
171                 log.debug("update: After spool, size mismatch: map.size() = {0}, "
172                         + "linked list size = {1}", map.size(), list.size());
173             }
174         }
175         finally
176         {
177             lock.unlock();
178         }
179 
180         log.debug("update: After spool map size: {0} linked list size = {1}",
181                 () -> map.size(), () -> list.size());
182     }
183 
184     /**
185      * This instructs the memory cache to remove the <i>numberToFree</i> according to its eviction
186      * policy. For example, the LRUMemoryCache will remove the <i>numberToFree</i> least recently
187      * used items. These will be spooled to disk if a disk auxiliary is available.
188      * <p>
189      *
190      * @param numberToFree
191      * @return the number that were removed. if you ask to free 5, but there are only 3, you will
192      *         get 3.
193      */
194     @Override
195     public int freeElements(final int numberToFree)
196     {
197         int freed = 0;
198 
199         lock.lock();
200 
201         try
202         {
203             for (; freed < numberToFree; freed++)
204             {
205                 final ICacheElement<K, V> element = spoolLastElement();
206                 if (element == null)
207                 {
208                     break;
209                 }
210             }
211         }
212         finally
213         {
214             lock.unlock();
215         }
216 
217         return freed;
218     }
219 
220     /**
221      * This spools the last element in the LRU, if one exists.
222      * The method is called guarded by the lock
223      * <p>
224      *
225      * @return ICacheElement&lt;K, V&gt; if there was a last element, else null.
226      * @throws Error
227      */
228     private ICacheElement<K, V> spoolLastElement() throws Error
229     {
230         ICacheElement<K, V> toSpool = null;
231 
232         final MemoryElementDescriptor<K, V> last = list.getLast();
233         if (last != null)
234         {
235             toSpool = last.getCacheElement();
236             if (toSpool == null)
237             {
238                 throw new Error("update: last.ce is null!");
239             }
240             getCompositeCache().spoolToDisk(toSpool);
241             if (map.remove(toSpool.getKey()) == null)
242             {
243                 log.warn("update: remove failed for key: {0}", toSpool.getKey());
244 
245                 if (log.isTraceEnabled())
246                 {
247                     verifyCache();
248                 }
249             }
250 
251             list.remove(last);
252         }
253 
254         return toSpool;
255     }
256 
257     /**
258      * @see org.apache.commons.jcs3.engine.memory.AbstractMemoryCache#get(Object)
259      */
260     @Override
261     public ICacheElement<K, V> get(final K key) throws IOException
262     {
263         final ICacheElement<K, V> ce = super.get(key);
264 
265         if (log.isTraceEnabled())
266         {
267             verifyCache();
268         }
269 
270         return ce;
271     }
272 
273     /**
274      * Adjust the list as needed for a get. This allows children to control the algorithm
275      * <p>
276      *
277      * @param me
278      */
279     protected abstract void adjustListForGet(MemoryElementDescriptor<K, V> me);
280 
281     /**
282      * Update control structures after get
283      * (guarded by the lock)
284      *
285      * @param me the memory element descriptor
286      */
287     @Override
288     protected void lockedGetElement(final MemoryElementDescriptor<K, V> me)
289     {
290         adjustListForGet(me);
291     }
292 
293     /**
294      * Remove element from control structure
295      * (guarded by the lock)
296      *
297      * @param me the memory element descriptor
298      */
299     @Override
300     protected void lockedRemoveElement(final MemoryElementDescriptor<K, V> me)
301     {
302         list.remove(me);
303     }
304 
305     /**
306      * Removes all cached items from the cache control structures.
307      * (guarded by the lock)
308      */
309     @Override
310     protected void lockedRemoveAll()
311     {
312         list.removeAll();
313     }
314 
315     // --------------------------- internal methods (linked list implementation)
316     /**
317      * Adds a new node to the start of the link list.
318      * <p>
319      *
320      * @param ce
321      *            The feature to be added to the First
322      * @return MemoryElementDescriptor
323      */
324     protected MemoryElementDescriptor<K, V> addFirst(final ICacheElement<K, V> ce)
325     {
326         lock.lock();
327         try
328         {
329             final MemoryElementDescriptor<K, V> me = new MemoryElementDescriptor<>(ce);
330             list.addFirst(me);
331             if ( log.isTraceEnabled() )
332             {
333                 verifyCache(ce.getKey());
334             }
335             return me;
336         }
337         finally
338         {
339             lock.unlock();
340         }
341     }
342 
343     /**
344      * Adds a new node to the end of the link list.
345      * <p>
346      *
347      * @param ce
348      *            The feature to be added to the First
349      * @return MemoryElementDescriptor
350      */
351     protected MemoryElementDescriptor<K, V> addLast(final ICacheElement<K, V> ce)
352     {
353         lock.lock();
354         try
355         {
356             final MemoryElementDescriptor<K, V> me = new MemoryElementDescriptor<>(ce);
357             list.addLast(me);
358             if ( log.isTraceEnabled() )
359             {
360                 verifyCache(ce.getKey());
361             }
362             return me;
363         }
364         finally
365         {
366             lock.unlock();
367         }
368     }
369 
370     // ---------------------------------------------------------- debug methods
371 
372     /**
373      * Dump the cache entries from first to list for debugging.
374      */
375     private void dumpCacheEntries()
376     {
377         log.trace("dumpingCacheEntries");
378         for (MemoryElementDescriptor<K, V> me = list.getFirst(); me != null; me = (MemoryElementDescriptor<K, V>) me.next)
379         {
380             log.trace("dumpCacheEntries> key={0}, val={1}",
381                     me.getCacheElement().getKey(), me.getCacheElement().getVal());
382         }
383     }
384 
385     /**
386      * Checks to see if all the items that should be in the cache are. Checks consistency between
387      * List and map.
388      */
389     private void verifyCache()
390     {
391         boolean found = false;
392         log.trace("verifycache[{0}]: map contains {1} elements, linked list "
393                 + "contains {2} elements", getCacheName(), map.size(),
394                 list.size());
395         log.trace("verifycache: checking linked list by key ");
396         for (MemoryElementDescriptor<K, V> li = list.getFirst(); li != null; li = (MemoryElementDescriptor<K, V>) li.next)
397         {
398             final K key = li.getCacheElement().getKey();
399             if (!map.containsKey(key))
400             {
401                 log.error("verifycache[{0}]: map does not contain key : {1}",
402                         getCacheName(), key);
403                 log.error("key class={0}", key.getClass());
404                 log.error("key hashCode={0}", key.hashCode());
405                 log.error("key toString={0}", key.toString());
406                 if (key instanceof GroupAttrName)
407                 {
408                     final GroupAttrName<?> name = (GroupAttrName<?>) key;
409                     log.error("GroupID hashCode={0}", name.groupId.hashCode());
410                     log.error("GroupID.class={0}", name.groupId.getClass());
411                     log.error("AttrName hashCode={0}", name.attrName.hashCode());
412                     log.error("AttrName.class={0}", name.attrName.getClass());
413                 }
414                 dumpMap();
415             }
416             else if (map.get(key) == null)
417             {
418                 log.error("verifycache[{0}]: linked list retrieval returned "
419                         + "null for key: {1}", getCacheName(), key);
420             }
421         }
422 
423         log.trace("verifycache: checking linked list by value ");
424         for (MemoryElementDescriptor<K, V> li = list.getFirst(); li != null; li = (MemoryElementDescriptor<K, V>) li.next)
425         {
426             if (!map.containsValue(li))
427             {
428                 log.error("verifycache[{0}]: map does not contain value: {1}",
429                         getCacheName(), li);
430                 dumpMap();
431             }
432         }
433 
434         log.trace("verifycache: checking via keysets!");
435         for (final Object val : map.keySet())
436         {
437             found = false;
438 
439             for (MemoryElementDescriptor<K, V> li = list.getFirst(); li != null; li = (MemoryElementDescriptor<K, V>) li.next)
440             {
441                 if (val.equals(li.getCacheElement().getKey()))
442                 {
443                     found = true;
444                     break;
445                 }
446             }
447             if (!found)
448             {
449                 log.error("verifycache[{0}]: key not found in list : {1}",
450                         getCacheName(), val);
451                 dumpCacheEntries();
452                 if (map.containsKey(val))
453                 {
454                     log.error("verifycache: map contains key");
455                 }
456                 else
457                 {
458                     log.error("verifycache: map does NOT contain key, what the HECK!");
459                 }
460             }
461         }
462     }
463 
464     /**
465      * Logs an error if an element that should be in the cache is not.
466      * <p>
467      *
468      * @param key
469      */
470     private void verifyCache(final K key)
471     {
472         boolean found = false;
473 
474         // go through the linked list looking for the key
475         for (MemoryElementDescriptor<K, V> li = list.getFirst(); li != null; li = (MemoryElementDescriptor<K, V>) li.next)
476         {
477             if (li.getCacheElement().getKey() == key)
478             {
479                 found = true;
480                 log.trace("verifycache(key) key match: {0}", key);
481                 break;
482             }
483         }
484         if (!found)
485         {
486             log.error("verifycache(key)[{0}], couldn't find key! : {1}",
487                     getCacheName(), key);
488         }
489     }
490 
491     /**
492      * This returns semi-structured information on the memory cache, such as the size, put count,
493      * hit count, and miss count.
494      * <p>
495      *
496      * @see org.apache.commons.jcs3.engine.memory.behavior.IMemoryCache#getStatistics()
497      */
498     @Override
499     public IStats getStatistics()
500     {
501         final IStats stats = super.getStatistics();
502         stats.setTypeName( /* add algorithm name */"Memory Cache");
503 
504         final List<IStatElement<?>> elems = stats.getStatElements();
505 
506         elems.add(new StatElement<>("List Size", Integer.valueOf(list.size())));
507 
508         return stats;
509     }
510 }