001/*
002 * The contents of this file are subject to the terms of the Common Development and
003 * Distribution License (the License). You may not use this file except in compliance with the
004 * License.
005 *
006 * You can obtain a copy of the License at legal/CDDLv1.0.txt. See the License for the
007 * specific language governing permission and limitations under the License.
008 *
009 * When distributing Covered Software, include this CDDL Header Notice in each file and include
010 * the License file at legal/CDDLv1.0.txt. If applicable, add the following below the CDDL
011 * Header, with the fields enclosed by brackets [] replaced by your own identifying
012 * information: "Portions Copyright [year] [name of copyright owner]".
013 *
014 * Copyright 2006-2008 Sun Microsystems, Inc.
015 * Portions Copyright 2011-2016 ForgeRock AS.
016 */
017package org.opends.server.extensions;
018
019import static org.opends.messages.ExtensionMessages.*;
020
021import java.util.ArrayList;
022import java.util.HashMap;
023import java.util.Iterator;
024import java.util.LinkedHashMap;
025import java.util.List;
026import java.util.Map;
027import java.util.Set;
028import java.util.concurrent.TimeUnit;
029import java.util.concurrent.locks.Lock;
030import java.util.concurrent.locks.ReadWriteLock;
031import java.util.concurrent.locks.ReentrantReadWriteLock;
032
033import org.forgerock.i18n.LocalizableMessage;
034import org.forgerock.i18n.slf4j.LocalizedLogger;
035import org.forgerock.opendj.config.server.ConfigChangeResult;
036import org.forgerock.opendj.config.server.ConfigException;
037import org.forgerock.opendj.ldap.DN;
038import org.forgerock.util.Utils;
039import org.forgerock.opendj.config.server.ConfigurationChangeListener;
040import org.forgerock.opendj.server.config.server.EntryCacheCfg;
041import org.forgerock.opendj.server.config.server.FIFOEntryCacheCfg;
042import org.opends.server.api.Backend;
043import org.opends.server.api.EntryCache;
044import org.opends.server.api.MonitorData;
045import org.opends.server.core.DirectoryServer;
046import org.opends.server.types.CacheEntry;
047import org.opends.server.types.Entry;
048import org.opends.server.types.InitializationException;
049import org.opends.server.types.SearchFilter;
050import org.opends.server.util.ServerConstants;
051
052/**
053 * This class defines a Directory Server entry cache that uses a FIFO to keep
054 * track of the entries.  Entries that have been in the cache the longest are
055 * the most likely candidates for purging if space is needed.  In contrast to
056 * other cache structures, the selection of entries to purge is not based on
057 * how frequently or recently the entries have been accessed.  This requires
058 * significantly less locking (it will only be required when an entry is added
059 * or removed from the cache, rather than each time an entry is accessed).
060 * <BR><BR>
061 * Cache sizing is based on the percentage of free memory within the JVM, such
062 * that if enough memory is free, then adding an entry to the cache will not
063 * require purging, but if more than a specified percentage of the available
064 * memory within the JVM is already consumed, then one or more entries will need
065 * to be removed in order to make room for a new entry.  It is also possible to
066 * configure a maximum number of entries for the cache.  If this is specified,
067 * then the number of entries will not be allowed to exceed this value, but it
068 * may not be possible to hold this many entries if the available memory fills
069 * up first.
070 * <BR><BR>
071 * Other configurable parameters for this cache include the maximum length of
072 * time to block while waiting to acquire a lock, and a set of filters that may
073 * be used to define criteria for determining which entries are stored in the
074 * cache.  If a filter list is provided, then only entries matching at least one
075 * of the given filters will be stored in the cache.
076 */
077public class FIFOEntryCache
078       extends EntryCache <FIFOEntryCacheCfg>
079       implements ConfigurationChangeListener<FIFOEntryCacheCfg>
080{
081  private static final LocalizedLogger logger = LocalizedLogger.getLoggerForThisClass();
082
083  /** The reference to the Java runtime used to determine the amount of memory currently in use. */
084  private static final Runtime runtime = Runtime.getRuntime();
085
086  /** The mapping between entry backends/IDs and entries. */
087  private Map<String, Map<Long, CacheEntry>> idMap;
088
089  /** The mapping between DNs and entries. */
090  private LinkedHashMap<DN,CacheEntry> dnMap;
091
092  /** The lock used to provide threadsafe access when changing the contents of the cache. */
093  private ReadWriteLock cacheLock;
094  private Lock cacheWriteLock;
095  private Lock cacheReadLock;
096
097  /**
098   * The maximum amount of memory in bytes that the JVM will be allowed to use
099   * before we need to start purging entries.
100   */
101  private long maxAllowedMemory;
102
103  /** The maximum number of entries that may be held in the cache. */
104  private long maxEntries;
105
106  /** Currently registered configuration object. */
107  private FIFOEntryCacheCfg registeredConfiguration;
108
109  /** The maximum length of time to try to obtain a lock before giving up. */
110  private long lockTimeout = 2000;
111
112  /** Creates a new instance of this FIFO entry cache. */
113  public FIFOEntryCache()
114  {
115    super();
116    // All initialization should be performed in the initializeEntryCache.
117  }
118
119  @Override
120  public void initializeEntryCache(FIFOEntryCacheCfg configuration)
121      throws ConfigException, InitializationException
122  {
123    registeredConfiguration = configuration;
124    configuration.addFIFOChangeListener (this);
125
126    // Initialize the cache structures.
127    idMap = new HashMap<>();
128    dnMap = new LinkedHashMap<>();
129
130    // Initialize locks.
131    cacheLock = new ReentrantReadWriteLock(true);
132    cacheWriteLock = cacheLock.writeLock();
133    cacheReadLock = cacheLock.readLock();
134
135    // Read configuration and apply changes.
136    boolean applyChanges = true;
137    List<LocalizableMessage> errorMessages = new ArrayList<>();
138    EntryCacheCommon.ConfigErrorHandler errorHandler =
139      EntryCacheCommon.getConfigErrorHandler (
140          EntryCacheCommon.ConfigPhase.PHASE_INIT, null, errorMessages
141          );
142    if (!processEntryCacheConfig(configuration, applyChanges, errorHandler)) {
143      String buffer = Utils.joinAsString(".  ", errorMessages);
144      throw new ConfigException(ERR_FIFOCACHE_CANNOT_INITIALIZE.get(buffer));
145    }
146  }
147
148  @Override
149  public void finalizeEntryCache()
150  {
151    cacheWriteLock.lock();
152
153    try {
154      registeredConfiguration.removeFIFOChangeListener(this);
155
156      // Release all memory currently in use by this cache.
157      try {
158        idMap.clear();
159        dnMap.clear();
160      } catch (Exception e) {
161        // This should never happen.
162        logger.traceException(e);
163      }
164    } finally {
165      cacheWriteLock.unlock();
166    }
167  }
168
169  @Override
170  public boolean containsEntry(DN entryDN)
171  {
172    if (entryDN == null) {
173      return false;
174    }
175
176    // Indicate whether the DN map contains the specified DN.
177    cacheReadLock.lock();
178    try {
179      return dnMap.containsKey(entryDN);
180    } finally {
181      cacheReadLock.unlock();
182    }
183  }
184
185  @Override
186  public Entry getEntry(DN entryDN)
187  {
188    // Simply return the entry from the DN map.
189    cacheReadLock.lock();
190    try {
191      CacheEntry e = dnMap.get(entryDN);
192      if (e == null) {
193        // Indicate cache miss.
194        cacheMisses.getAndIncrement();
195        return null;
196      }
197      // Indicate cache hit.
198      cacheHits.getAndIncrement();
199      return e.getEntry();
200    } finally {
201      cacheReadLock.unlock();
202    }
203  }
204
205  @Override
206  public long getEntryID(DN entryDN)
207  {
208    // Simply return the ID from the DN map.
209    cacheReadLock.lock();
210    try {
211      CacheEntry e = dnMap.get(entryDN);
212      return e != null ? e.getEntryID() : -1;
213    } finally {
214      cacheReadLock.unlock();
215    }
216  }
217
218  @Override
219  public DN getEntryDN(String backendID, long entryID)
220  {
221    // Locate specific backend map and return the entry DN by ID.
222    cacheReadLock.lock();
223    try {
224      Map<Long, CacheEntry> backendMap = idMap.get(backendID);
225      if (backendMap != null) {
226        CacheEntry e = backendMap.get(entryID);
227        if (e != null) {
228          return e.getDN();
229        }
230      }
231      return null;
232    } finally {
233      cacheReadLock.unlock();
234    }
235  }
236
237  @Override
238  public void putEntry(Entry entry, String backendID, long entryID)
239  {
240    // Create the cache entry based on the provided information.
241    CacheEntry cacheEntry = new CacheEntry(entry, backendID, entryID);
242
243    // Obtain a lock on the cache.  If this fails, then don't do anything.
244    try
245    {
246      if (!cacheWriteLock.tryLock(lockTimeout, TimeUnit.MILLISECONDS))
247      {
248        return;
249      }
250    }
251    catch (Exception e)
252    {
253      logger.traceException(e);
254
255      return;
256    }
257
258    // At this point, we hold the lock.  No matter what, we must release the
259    // lock before leaving this method, so do that in a finally block.
260    try
261    {
262      // See if the current memory usage is within acceptable constraints.  If
263      // so, then add the entry to the cache (or replace it if it is already
264      // present).  If not, then remove an existing entry and don't add the new
265      // entry.
266      long usedMemory = runtime.totalMemory() - runtime.freeMemory();
267      if (usedMemory > maxAllowedMemory)
268      {
269        CacheEntry cachedEntry = dnMap.remove(entry.getName());
270        if (cachedEntry == null)
271        {
272          // The current entry wasn't there, let's remove an existing entry.
273          Iterator<CacheEntry> iterator = dnMap.values().iterator();
274          if (iterator.hasNext())
275          {
276            CacheEntry ce = iterator.next();
277            iterator.remove();
278
279            Map<Long,CacheEntry> m = idMap.get(ce.getBackendID());
280            if (m != null)
281            {
282              m.remove(ce.getEntryID());
283            }
284          }
285        }
286        else
287        {
288          // Try to remove the entry from the ID list as well.
289          Map<Long,CacheEntry> map = idMap.get(backendID);
290          if (map != null)
291          {
292            map.remove(cacheEntry.getEntryID());
293            // If this backend becomes empty now remove it from the idMap map.
294            if (map.isEmpty())
295            {
296              idMap.remove(backendID);
297            }
298          }
299        }
300      }
301      else
302      {
303        // Add the entry to the cache.  This will replace it if it is already
304        // present and add it if it isn't.
305        dnMap.put(entry.getName(), cacheEntry);
306
307        Map<Long,CacheEntry> map = idMap.get(backendID);
308        if (map == null)
309        {
310          map = new HashMap<>();
311          map.put(entryID, cacheEntry);
312          idMap.put(backendID, map);
313        }
314        else
315        {
316          map.put(entryID, cacheEntry);
317        }
318
319        // See if a cap has been placed on the maximum number of entries in the
320        // cache.  If so, then see if we have exceeded it and we need to purge
321        // entries until we're within the limit.
322        int entryCount = dnMap.size();
323        if (maxEntries > 0 && entryCount > maxEntries)
324        {
325          Iterator<CacheEntry> iterator = dnMap.values().iterator();
326          while (iterator.hasNext() && entryCount > maxEntries)
327          {
328            CacheEntry ce = iterator.next();
329            iterator.remove();
330
331            Map<Long,CacheEntry> m = idMap.get(ce.getBackendID());
332            if (m != null)
333            {
334              m.remove(ce.getEntryID());
335            }
336
337            entryCount--;
338          }
339        }
340      }
341    }
342    catch (Exception e)
343    {
344      logger.traceException(e);
345    }
346    finally
347    {
348      cacheWriteLock.unlock();
349    }
350  }
351
352  @Override
353  public boolean putEntryIfAbsent(Entry entry, String backendID, long entryID)
354  {
355    // Create the cache entry based on the provided information.
356    CacheEntry cacheEntry = new CacheEntry(entry, backendID, entryID);
357
358    // Obtain a lock on the cache.  If this fails, then don't do anything.
359    try
360    {
361      if (!cacheWriteLock.tryLock(lockTimeout, TimeUnit.MILLISECONDS))
362      {
363        // We can't rule out the possibility of a conflict, so return false.
364        return false;
365      }
366    }
367    catch (Exception e)
368    {
369      logger.traceException(e);
370
371      // We can't rule out the possibility of a conflict, so return false.
372      return false;
373    }
374
375    // At this point, we hold the lock.  No matter what, we must release the
376    // lock before leaving this method, so do that in a finally block.
377    try
378    {
379      // See if the entry already exists in the cache.  If it does, then we will
380      // fail and not actually store the entry.
381      if (dnMap.containsKey(entry.getName()))
382      {
383        return false;
384      }
385
386      // See if the current memory usage is within acceptable constraints.  If
387      // so, then add the entry to the cache (or replace it if it is already
388      // present).  If not, then remove an existing entry and don't add the new
389      // entry.
390      long usedMemory = runtime.totalMemory() - runtime.freeMemory();
391      if (usedMemory > maxAllowedMemory)
392      {
393        Iterator<CacheEntry> iterator = dnMap.values().iterator();
394        if (iterator.hasNext())
395        {
396          CacheEntry ce = iterator.next();
397          iterator.remove();
398
399          Map<Long,CacheEntry> m = idMap.get(ce.getBackendID());
400          if (m != null)
401          {
402            m.remove(ce.getEntryID());
403          }
404        }
405      }
406      else
407      {
408        // Add the entry to the cache.  This will replace it if it is already
409        // present and add it if it isn't.
410        dnMap.put(entry.getName(), cacheEntry);
411
412        Map<Long,CacheEntry> map = idMap.get(backendID);
413        if (map == null)
414        {
415          map = new HashMap<>();
416          map.put(entryID, cacheEntry);
417          idMap.put(backendID, map);
418        }
419        else
420        {
421          map.put(entryID, cacheEntry);
422        }
423
424        // See if a cap has been placed on the maximum number of entries in the
425        // cache.  If so, then see if we have exceeded it and we need to purge
426        // entries until we're within the limit.
427        int entryCount = dnMap.size();
428        if (maxEntries > 0 && entryCount > maxEntries)
429        {
430          Iterator<CacheEntry> iterator = dnMap.values().iterator();
431          while (iterator.hasNext() && entryCount > maxEntries)
432          {
433            CacheEntry ce = iterator.next();
434            iterator.remove();
435
436            Map<Long,CacheEntry> m = idMap.get(ce.getBackendID());
437            if (m != null)
438            {
439              m.remove(ce.getEntryID());
440            }
441
442            entryCount--;
443          }
444        }
445      }
446
447      // We'll always return true in this case, even if we didn't actually add
448      // the entry due to memory constraints.
449      return true;
450    }
451    catch (Exception e)
452    {
453      logger.traceException(e);
454
455      // We can't be sure there wasn't a conflict, so return false.
456      return false;
457    }
458    finally
459    {
460      cacheWriteLock.unlock();
461    }
462  }
463
464  @Override
465  public void removeEntry(DN entryDN)
466  {
467    // Acquire the lock on the cache.  We should not return until the entry is
468    // removed, so we will block until we can obtain the lock.
469    // FIXME -- An alternate approach could be to block for a maximum length of
470    // time and then if it fails then put it in a queue for processing by some
471    // other thread before it releases the lock.
472    cacheWriteLock.lock();
473
474    // At this point, it is absolutely critical that we always release the lock
475    // before leaving this method, so do so in a finally block.
476    try
477    {
478      // Check the DN cache to see if the entry exists.  If not, then don't do
479      // anything.
480      CacheEntry entry = dnMap.remove(entryDN);
481      if (entry == null)
482      {
483        return;
484      }
485
486      final String backendID = entry.getBackendID();
487
488      // Try to remove the entry from the ID list as well.
489      Map<Long,CacheEntry> map = idMap.get(backendID);
490      if (map == null)
491      {
492        // This should't happen, but the entry isn't cached in the ID map so
493        // we can return.
494        return;
495      }
496
497      map.remove(entry.getEntryID());
498
499      // If this backend becomes empty now remove it from the idMap map.
500      if (map.isEmpty())
501      {
502        idMap.remove(backendID);
503      }
504    }
505    catch (Exception e)
506    {
507      logger.traceException(e);
508
509      // This shouldn't happen, but there's not much that we can do if it does.
510    }
511    finally
512    {
513      cacheWriteLock.unlock();
514    }
515  }
516
517  @Override
518  public void clear()
519  {
520    // Acquire a lock on the cache.  We should not return until the cache has
521    // been cleared, so we will block until we can obtain the lock.
522    cacheWriteLock.lock();
523
524    // At this point, it is absolutely critical that we always release the lock
525    // before leaving this method, so do so in a finally block.
526    try
527    {
528      // Clear the DN cache.
529      dnMap.clear();
530
531      // Clear the ID cache.
532      idMap.clear();
533    }
534    catch (Exception e)
535    {
536      logger.traceException(e);
537
538      // This shouldn't happen, but there's not much that we can do if it does.
539    }
540    finally
541    {
542      cacheWriteLock.unlock();
543    }
544  }
545
546  @Override
547  public void clearBackend(String backendID)
548  {
549    // Acquire a lock on the cache.  We should not return until the cache has
550    // been cleared, so we will block until we can obtain the lock.
551    cacheWriteLock.lock();
552
553    // At this point, it is absolutely critical that we always release the lock
554    // before leaving this method, so do so in a finally block.
555    try
556    {
557      // Remove all references to entries for this backend from the ID cache.
558      Map<Long,CacheEntry> map = idMap.remove(backendID);
559      if (map == null)
560      {
561        // No entries were in the cache for this backend, so we can return
562        // without doing anything.
563        return;
564      }
565
566      // Unfortunately, there is no good way to dump the entries from the DN
567      // cache based on their backend, so we will need to iterate through the
568      // entries in the ID map and do it manually.  Since this could take a
569      // while, we'll periodically release and re-acquire the lock in case
570      // anyone else is waiting on it so this doesn't become a stop-the-world
571      // event as far as the cache is concerned.
572      int entriesDeleted = 0;
573      for (CacheEntry e : map.values())
574      {
575        dnMap.remove(e.getEntry().getName());
576        entriesDeleted++;
577
578        if ((entriesDeleted % 1000)  == 0)
579        {
580          cacheWriteLock.unlock();
581          Thread.yield();
582          cacheWriteLock.lock();
583        }
584      }
585    }
586    catch (Exception e)
587    {
588      logger.traceException(e);
589
590      // This shouldn't happen, but there's not much that we can do if it does.
591    }
592    finally
593    {
594      cacheWriteLock.unlock();
595    }
596  }
597
598  @Override
599  public void clearSubtree(DN baseDN)
600  {
601    // Determine which backend should be used for the provided base DN.  If
602    // there is none, then we don't need to do anything.
603    Backend<?> backend = DirectoryServer.getBackend(baseDN);
604    if (backend == null)
605    {
606      return;
607    }
608
609    // Acquire a lock on the cache.  We should not return until the cache has
610    // been cleared, so we will block until we can obtain the lock.
611    cacheWriteLock.lock();
612
613    // At this point, it is absolutely critical that we always release the lock
614    // before leaving this method, so do so in a finally block.
615    try
616    {
617      clearSubtree(baseDN, backend);
618    }
619    catch (Exception e)
620    {
621      logger.traceException(e);
622
623      // This shouldn't happen, but there's not much that we can do if it does.
624    }
625    finally
626    {
627      cacheWriteLock.unlock();
628    }
629  }
630
631  /**
632   * Clears all entries at or below the specified base DN that are associated
633   * with the given backend.  The caller must already hold the cache lock.
634   *
635   * @param  baseDN   The base DN below which all entries should be flushed.
636   * @param  backend  The backend for which to remove the appropriate entries.
637   */
638  private void clearSubtree(DN baseDN, Backend<?> backend)
639  {
640    // See if there are any entries for the provided backend in the cache.  If
641    // not, then return.
642    Map<Long,CacheEntry> map = idMap.get(backend.getBackendID());
643    if (map == null)
644    {
645      // No entries were in the cache for this backend, so we can return without
646      // doing anything.
647      return;
648    }
649
650    // Since the provided base DN could hold a subset of the information in the
651    // specified backend, we will have to do this by iterating through all the
652    // entries for that backend.  Since this could take a while, we'll
653    // periodically release and re-acquire the lock in case anyone else is
654    // waiting on it so this doesn't become a stop-the-world event as far as the
655    // cache is concerned.
656    int entriesExamined = 0;
657    Iterator<CacheEntry> iterator = map.values().iterator();
658    while (iterator.hasNext())
659    {
660      CacheEntry e = iterator.next();
661      DN entryDN = e.getEntry().getName();
662      if (entryDN.isSubordinateOrEqualTo(baseDN))
663      {
664        iterator.remove();
665        dnMap.remove(entryDN);
666      }
667
668      entriesExamined++;
669      if ((entriesExamined % 1000) == 0)
670      {
671        cacheWriteLock.unlock();
672        Thread.yield();
673        cacheWriteLock.lock();
674      }
675    }
676
677    // See if the backend has any subordinate backends.  If so, then process
678    // them recursively.
679    for (Backend<?> subBackend : backend.getSubordinateBackends())
680    {
681      boolean isAppropriate = false;
682      for (DN subBase : subBackend.getBaseDNs())
683      {
684        if (subBase.isSubordinateOrEqualTo(baseDN))
685        {
686          isAppropriate = true;
687          break;
688        }
689      }
690
691      if (isAppropriate)
692      {
693        clearSubtree(baseDN, subBackend);
694      }
695    }
696  }
697
698  @Override
699  public void handleLowMemory()
700  {
701    // Grab the lock on the cache and wait until we have it.
702    cacheWriteLock.lock();
703
704    // At this point, it is absolutely critical that we always release the lock
705    // before leaving this method, so do so in a finally block.
706    try
707    {
708      // See how many entries are in the cache.  If there are less than 1000,
709      // then we'll dump all of them.  Otherwise, we'll dump 10% of the entries.
710      int numEntries = dnMap.size();
711      if (numEntries < 1000)
712      {
713        dnMap.clear();
714        idMap.clear();
715      }
716      else
717      {
718        int numToDrop = numEntries / 10;
719        Iterator<CacheEntry> iterator = dnMap.values().iterator();
720        while (iterator.hasNext() && numToDrop > 0)
721        {
722          CacheEntry entry = iterator.next();
723          iterator.remove();
724
725          Map<Long,CacheEntry> m = idMap.get(entry.getBackendID());
726          if (m != null)
727          {
728            m.remove(entry.getEntryID());
729          }
730
731          numToDrop--;
732        }
733      }
734    }
735    catch (Exception e)
736    {
737      logger.traceException(e);
738
739      // This shouldn't happen, but there's not much that we can do if it does.
740    }
741    finally
742    {
743      cacheWriteLock.unlock();
744    }
745  }
746
747  @Override
748  public boolean isConfigurationAcceptable(EntryCacheCfg configuration,
749                                           List<LocalizableMessage> unacceptableReasons)
750  {
751    FIFOEntryCacheCfg config = (FIFOEntryCacheCfg) configuration;
752    return isConfigurationChangeAcceptable(config, unacceptableReasons);
753  }
754
755  @Override
756  public boolean isConfigurationChangeAcceptable(
757      FIFOEntryCacheCfg configuration,
758      List<LocalizableMessage> unacceptableReasons
759      )
760  {
761    boolean applyChanges = false;
762    EntryCacheCommon.ConfigErrorHandler errorHandler =
763      EntryCacheCommon.getConfigErrorHandler (
764          EntryCacheCommon.ConfigPhase.PHASE_ACCEPTABLE,
765          unacceptableReasons,
766          null
767        );
768    processEntryCacheConfig (configuration, applyChanges, errorHandler);
769
770    return errorHandler.getIsAcceptable();
771  }
772
773  @Override
774  public ConfigChangeResult applyConfigurationChange(      FIFOEntryCacheCfg configuration      )
775  {
776    boolean applyChanges = true;
777    List<LocalizableMessage> errorMessages = new ArrayList<>();
778    EntryCacheCommon.ConfigErrorHandler errorHandler =
779      EntryCacheCommon.getConfigErrorHandler (
780          EntryCacheCommon.ConfigPhase.PHASE_APPLY, null, errorMessages
781          );
782
783    // Do not apply changes unless this cache is enabled.
784    if (configuration.isEnabled()) {
785      processEntryCacheConfig (configuration, applyChanges, errorHandler);
786    }
787
788    final ConfigChangeResult changeResult = new ConfigChangeResult();
789    changeResult.setResultCode(errorHandler.getResultCode());
790    changeResult.setAdminActionRequired(errorHandler.getIsAdminActionRequired());
791    changeResult.getMessages().addAll(errorHandler.getErrorMessages());
792    return changeResult;
793  }
794
795  /**
796   * Parses the provided configuration and configure the entry cache.
797   *
798   * @param configuration  The new configuration containing the changes.
799   * @param applyChanges   If true then take into account the new configuration.
800   * @param errorHandler   An handler used to report errors.
801   *
802   * @return  <CODE>true</CODE> if configuration is acceptable,
803   *          or <CODE>false</CODE> otherwise.
804   */
805  private boolean processEntryCacheConfig(
806      FIFOEntryCacheCfg                   configuration,
807      boolean                             applyChanges,
808      EntryCacheCommon.ConfigErrorHandler errorHandler
809      )
810  {
811    // Local variables to read configuration.
812    Set<SearchFilter> newIncludeFilters = null;
813    Set<SearchFilter> newExcludeFilters = null;
814
815    // Read configuration.
816    DN newConfigEntryDN = configuration.dn();
817    long newLockTimeout = configuration.getLockTimeout();
818    long newMaxEntries  = configuration.getMaxEntries();
819
820    // Maximum memory the cache can use.
821    int newMaxMemoryPercent  = configuration.getMaxMemoryPercent();
822    long maxJvmHeapSize      = Runtime.getRuntime().maxMemory();
823    long newMaxAllowedMemory = (maxJvmHeapSize / 100) * newMaxMemoryPercent;
824
825    // Get include and exclude filters.
826    switch (errorHandler.getConfigPhase())
827    {
828    case PHASE_INIT:
829    case PHASE_ACCEPTABLE:
830    case PHASE_APPLY:
831      newIncludeFilters = EntryCacheCommon.getFilters (
832          configuration.getIncludeFilter(),
833          ERR_CACHE_INVALID_INCLUDE_FILTER,
834          errorHandler,
835          newConfigEntryDN
836          );
837      newExcludeFilters = EntryCacheCommon.getFilters (
838          configuration.getExcludeFilter(),
839          ERR_CACHE_INVALID_EXCLUDE_FILTER,
840          errorHandler,
841          newConfigEntryDN
842          );
843      break;
844    }
845
846    if (applyChanges && errorHandler.getIsAcceptable())
847    {
848      maxEntries       = newMaxEntries;
849      maxAllowedMemory = newMaxAllowedMemory;
850      lockTimeout = newLockTimeout;
851      setIncludeFilters(newIncludeFilters);
852      setExcludeFilters(newExcludeFilters);
853      registeredConfiguration = configuration;
854    }
855
856    return errorHandler.getIsAcceptable();
857  }
858
859  @Override
860  public MonitorData getMonitorData()
861  {
862    try {
863      return EntryCacheCommon.getGenericMonitorData(
864        cacheHits.longValue(),
865        // If cache misses is maintained by default cache
866        // get it from there and if not point to itself.
867        DirectoryServer.getEntryCache().getCacheMisses(),
868        null,
869        maxAllowedMemory,
870        Long.valueOf(dnMap.size()),
871        Long.valueOf(
872            (maxEntries != Integer.MAX_VALUE && maxEntries != Long.MAX_VALUE) ? maxEntries : 0)
873        );
874    } catch (Exception e) {
875      logger.traceException(e);
876      return new MonitorData(0);
877    }
878  }
879
880  @Override
881  public Long getCacheCount()
882  {
883    return Long.valueOf(dnMap.size());
884  }
885
886  @Override
887  public String toVerboseString()
888  {
889    StringBuilder sb = new StringBuilder();
890
891    Map<DN,CacheEntry> dnMapCopy;
892    Map<String, Map<Long, CacheEntry>> idMapCopy;
893
894    // Grab cache lock to prevent any modifications
895    // to the cache maps until a snapshot is taken.
896    cacheWriteLock.lock();
897    try {
898      // Examining the real maps will hold the lock and can cause map
899      // modifications in case of any access order maps, make copies
900      // instead.
901      dnMapCopy = new LinkedHashMap<>(dnMap);
902      idMapCopy = new HashMap<>(idMap);
903    } finally {
904      cacheWriteLock.unlock();
905    }
906
907    // Check dnMap first.
908    for (DN dn : dnMapCopy.keySet()) {
909      final CacheEntry cacheEntry = dnMapCopy.get(dn);
910      sb.append(dn);
911      sb.append(":");
912      sb.append(cacheEntry != null ? Long.toString(cacheEntry.getEntryID()) : null);
913      sb.append(":");
914      sb.append(cacheEntry != null ? cacheEntry.getBackendID() : null);
915      sb.append(ServerConstants.EOL);
916    }
917
918    // See if there is anything on idMap that is not reflected on
919    // dnMap in case maps went out of sync.
920    for (Map.Entry<String,  Map<Long, CacheEntry>> backendCache : idMapCopy.entrySet()) {
921      final String backendID = backendCache.getKey();
922      for (Map.Entry<Long, CacheEntry> entry : backendCache.getValue().entrySet()) {
923        final CacheEntry cacheEntry = entry.getValue();
924        if (cacheEntry == null || !dnMapCopy.containsKey(cacheEntry.getDN())) {
925          sb.append(cacheEntry != null ? cacheEntry.getDN() : null);
926          sb.append(":");
927          sb.append(entry.getKey());
928          sb.append(":");
929          sb.append(backendID);
930          sb.append(ServerConstants.EOL);
931        }
932      }
933    }
934
935    String verboseString = sb.toString();
936    return verboseString.length() > 0 ? verboseString : null;
937  }
938}