001/* 002 * The contents of this file are subject to the terms of the Common Development and 003 * Distribution License (the License). You may not use this file except in compliance with the 004 * License. 005 * 006 * You can obtain a copy of the License at legal/CDDLv1.0.txt. See the License for the 007 * specific language governing permission and limitations under the License. 008 * 009 * When distributing Covered Software, include this CDDL Header Notice in each file and include 010 * the License file at legal/CDDLv1.0.txt. If applicable, add the following below the CDDL 011 * Header, with the fields enclosed by brackets [] replaced by your own identifying 012 * information: "Portions Copyright [year] [name of copyright owner]". 013 * 014 * Copyright 2006-2010 Sun Microsystems, Inc. 015 * Portions Copyright 2011-2016 ForgeRock AS. 016 * Portions copyright 2013 Manuel Gaupp 017 */ 018package org.opends.server.backends.pluggable; 019 020import static org.forgerock.util.Utils.*; 021import static org.opends.messages.BackendMessages.*; 022import static org.opends.server.backends.pluggable.DnKeyFormat.*; 023import static org.opends.server.backends.pluggable.IndexFilter.*; 024import static org.opends.server.backends.pluggable.VLVIndex.*; 025import static org.opends.server.core.DirectoryServer.*; 026import static org.opends.server.protocols.ldap.LDAPResultCode.*; 027import static org.opends.server.types.AdditionalLogItem.*; 028import static org.opends.server.util.StaticUtils.*; 029 030import java.util.ArrayList; 031import java.util.Arrays; 032import java.util.Collection; 033import java.util.Collections; 034import java.util.HashMap; 035import java.util.List; 036import java.util.Map; 037import java.util.NoSuchElementException; 038import java.util.Objects; 039import java.util.TreeMap; 040import java.util.concurrent.locks.Lock; 041import java.util.concurrent.locks.ReentrantReadWriteLock; 042 043import org.forgerock.i18n.LocalizableMessage; 044import org.forgerock.i18n.LocalizableMessageBuilder; 045import org.forgerock.i18n.slf4j.LocalizedLogger; 046import org.forgerock.opendj.config.server.ConfigChangeResult; 047import org.forgerock.opendj.config.server.ConfigException; 048import org.forgerock.opendj.config.server.ConfigurationAddListener; 049import org.forgerock.opendj.config.server.ConfigurationChangeListener; 050import org.forgerock.opendj.config.server.ConfigurationDeleteListener; 051import org.forgerock.opendj.ldap.ByteSequence; 052import org.forgerock.opendj.ldap.ByteString; 053import org.forgerock.opendj.ldap.ByteStringBuilder; 054import org.forgerock.opendj.ldap.DN; 055import org.forgerock.opendj.ldap.ResultCode; 056import org.forgerock.opendj.ldap.SearchScope; 057import org.forgerock.opendj.ldap.SortKey; 058import org.forgerock.opendj.ldap.schema.AttributeType; 059import org.forgerock.opendj.server.config.server.BackendIndexCfg; 060import org.forgerock.opendj.server.config.server.BackendVLVIndexCfg; 061import org.forgerock.opendj.server.config.server.PluggableBackendCfg; 062import org.forgerock.util.Pair; 063import org.opends.messages.CoreMessages; 064import org.opends.server.api.ClientConnection; 065import org.opends.server.api.EntryCache; 066import org.opends.server.api.VirtualAttributeProvider; 067import org.opends.server.api.plugin.PluginResult.SubordinateDelete; 068import org.opends.server.api.plugin.PluginResult.SubordinateModifyDN; 069import org.opends.server.backends.pluggable.spi.AccessMode; 070import org.opends.server.backends.pluggable.spi.Cursor; 071import org.opends.server.backends.pluggable.spi.ReadOperation; 072import org.opends.server.backends.pluggable.spi.ReadableTransaction; 073import org.opends.server.backends.pluggable.spi.SequentialCursor; 074import org.opends.server.backends.pluggable.spi.Storage; 075import org.opends.server.backends.pluggable.spi.StorageRuntimeException; 076import org.opends.server.backends.pluggable.spi.TreeName; 077import org.opends.server.backends.pluggable.spi.WriteOperation; 078import org.opends.server.backends.pluggable.spi.WriteableTransaction; 079import org.opends.server.controls.PagedResultsControl; 080import org.opends.server.controls.ServerSideSortRequestControl; 081import org.opends.server.controls.ServerSideSortResponseControl; 082import org.opends.server.controls.SubtreeDeleteControl; 083import org.opends.server.controls.VLVRequestControl; 084import org.opends.server.controls.VLVResponseControl; 085import org.opends.server.core.AddOperation; 086import org.opends.server.core.DeleteOperation; 087import org.opends.server.core.DirectoryServer; 088import org.opends.server.core.ModifyDNOperation; 089import org.opends.server.core.ModifyOperation; 090import org.opends.server.core.SearchOperation; 091import org.opends.server.core.ServerContext; 092import org.opends.server.crypto.CryptoSuite; 093import org.opends.server.types.Attribute; 094import org.opends.server.types.Attributes; 095import org.opends.server.types.CanceledOperationException; 096import org.opends.server.types.Control; 097import org.opends.server.types.DirectoryException; 098import org.opends.server.types.Entry; 099import org.opends.server.types.Modification; 100import org.opends.server.types.Operation; 101import org.opends.server.types.Privilege; 102import org.opends.server.types.SearchFilter; 103import org.opends.server.types.VirtualAttributeRule; 104import org.opends.server.util.ServerConstants; 105import org.opends.server.util.StaticUtils; 106 107/** 108 * Storage container for LDAP entries. Each base DN of a backend is given 109 * its own entry container. The entry container is the object that implements 110 * the guts of the backend API methods for LDAP operations. 111 */ 112public class EntryContainer 113 implements SuffixContainer, ConfigurationChangeListener<PluggableBackendCfg> 114{ 115 private static final LocalizedLogger logger = LocalizedLogger.getLoggerForThisClass(); 116 117 /** The name of the entry tree. */ 118 private static final String ID2ENTRY_TREE_NAME = ID2ENTRY_INDEX_NAME; 119 /** The name of the DN tree. */ 120 private static final String DN2ID_TREE_NAME = DN2ID_INDEX_NAME; 121 /** The name of the children index tree. */ 122 private static final String ID2CHILDREN_COUNT_TREE_NAME = ID2CHILDREN_COUNT_NAME; 123 /** The name of the referral tree. */ 124 private static final String REFERRAL_TREE_NAME = REFERRAL_INDEX_NAME; 125 /** The name of the state tree. */ 126 private static final String STATE_TREE_NAME = STATE_INDEX_NAME; 127 128 /** The attribute index configuration manager. */ 129 private final AttributeIndexCfgManager attributeIndexCfgManager; 130 /** The vlv index configuration manager. */ 131 private final VLVIndexCfgManager vlvIndexCfgManager; 132 133 /** The backend configuration. */ 134 private PluggableBackendCfg config; 135 /** ID of the backend to which this entry container belongs. */ 136 private final String backendID; 137 /** The baseDN this entry container is responsible for. */ 138 private final DN baseDN; 139 /** The root container in which this entryContainer belongs. */ 140 private final RootContainer rootContainer; 141 /** The tree storage. */ 142 private final Storage storage; 143 144 /** The DN tree maps a normalized DN string to an entry ID (8 bytes). */ 145 private final DN2ID dn2id; 146 /** The entry tree maps an entry ID (8 bytes) to a complete encoded entry. */ 147 private ID2Entry id2entry; 148 /** Store the number of children for each entry. */ 149 private final ID2ChildrenCount id2childrenCount; 150 /** The referral tree maps a normalized DN string to labeled URIs. */ 151 private final DN2URI dn2uri; 152 /** The state tree maps a config DN to config entries. */ 153 private final State state; 154 155 /** The set of attribute indexes. */ 156 private final Map<AttributeType, AttributeIndex> attrIndexMap = new HashMap<>(); 157 158 private final Map<AttributeType, CryptoSuite> attrCryptoMap = new HashMap<>(); 159 /** The set of VLV (Virtual List View) indexes. */ 160 private final Map<String, VLVIndex> vlvIndexMap = new HashMap<>(); 161 162 /** 163 * Prevents name clashes for common indexes (like id2entry) across multiple suffixes. 164 * For example when a root container contains multiple suffixes. 165 */ 166 private final String treePrefix; 167 168 private final ServerContext serverContext; 169 170 /** 171 * This class is responsible for managing the configuration for attribute 172 * indexes used within this entry container. 173 */ 174 private class AttributeIndexCfgManager implements 175 ConfigurationAddListener<BackendIndexCfg>, 176 ConfigurationDeleteListener<BackendIndexCfg> 177 { 178 @Override 179 public boolean isConfigurationAddAcceptable(final BackendIndexCfg cfg, List<LocalizableMessage> unacceptableReasons) 180 { 181 try 182 { 183 newAttributeIndex(cfg, null); 184 return true; 185 } 186 catch(Exception e) 187 { 188 unacceptableReasons.add(LocalizableMessage.raw(e.getLocalizedMessage())); 189 return false; 190 } 191 } 192 193 @Override 194 public ConfigChangeResult applyConfigurationAdd(final BackendIndexCfg cfg) 195 { 196 final ConfigChangeResult ccr = new ConfigChangeResult(); 197 try 198 { 199 final CryptoSuite cryptoSuite = newCryptoSuite(cfg.isConfidentialityEnabled()); 200 final AttributeIndex index = newAttributeIndex(cfg, cryptoSuite); 201 storage.write(new WriteOperation() 202 { 203 @Override 204 public void run(WriteableTransaction txn) throws Exception 205 { 206 index.open(txn, true); 207 if (!index.isTrusted()) 208 { 209 ccr.setAdminActionRequired(true); 210 ccr.addMessage(NOTE_INDEX_ADD_REQUIRES_REBUILD.get(cfg.getAttribute().getNameOrOID())); 211 } 212 attrIndexMap.put(cfg.getAttribute(), index); 213 attrCryptoMap.put(cfg.getAttribute(), cryptoSuite); 214 } 215 }); 216 } 217 catch(Exception e) 218 { 219 ccr.setResultCode(DirectoryServer.getServerErrorResultCode()); 220 ccr.addMessage(LocalizableMessage.raw(e.getLocalizedMessage())); 221 } 222 return ccr; 223 } 224 225 @Override 226 public boolean isConfigurationDeleteAcceptable( 227 BackendIndexCfg cfg, List<LocalizableMessage> unacceptableReasons) 228 { 229 // TODO: validate more before returning true? 230 return true; 231 } 232 233 @Override 234 public ConfigChangeResult applyConfigurationDelete(final BackendIndexCfg cfg) 235 { 236 final ConfigChangeResult ccr = new ConfigChangeResult(); 237 238 exclusiveLock.lock(); 239 try 240 { 241 storage.write(new WriteOperation() 242 { 243 @Override 244 public void run(WriteableTransaction txn) throws Exception 245 { 246 attrIndexMap.remove(cfg.getAttribute()).closeAndDelete(txn); 247 attrCryptoMap.remove(cfg.getAttribute()); 248 } 249 }); 250 } 251 catch (Exception de) 252 { 253 ccr.setResultCode(getServerErrorResultCode()); 254 ccr.addMessage(LocalizableMessage.raw(StaticUtils.stackTraceToSingleLineString(de))); 255 } 256 finally 257 { 258 exclusiveLock.unlock(); 259 } 260 261 return ccr; 262 } 263 } 264 265 /** 266 * This class is responsible for managing the configuration for VLV indexes 267 * used within this entry container. 268 */ 269 private class VLVIndexCfgManager implements 270 ConfigurationAddListener<BackendVLVIndexCfg>, 271 ConfigurationDeleteListener<BackendVLVIndexCfg> 272 { 273 @Override 274 public boolean isConfigurationAddAcceptable(BackendVLVIndexCfg cfg, List<LocalizableMessage> unacceptableReasons) 275 { 276 return VLVIndex.isConfigurationAddAcceptable(cfg, unacceptableReasons); 277 } 278 279 @Override 280 public ConfigChangeResult applyConfigurationAdd(final BackendVLVIndexCfg cfg) 281 { 282 final ConfigChangeResult ccr = new ConfigChangeResult(); 283 try 284 { 285 storage.write(new WriteOperation() 286 { 287 @Override 288 public void run(WriteableTransaction txn) throws Exception 289 { 290 VLVIndex vlvIndex = new VLVIndex(cfg, state, storage, EntryContainer.this, txn); 291 vlvIndex.open(txn, true); 292 if(!vlvIndex.isTrusted()) 293 { 294 ccr.setAdminActionRequired(true); 295 ccr.addMessage(NOTE_INDEX_ADD_REQUIRES_REBUILD.get(cfg.getName())); 296 } 297 vlvIndexMap.put(cfg.getName().toLowerCase(), vlvIndex); 298 } 299 }); 300 } 301 catch(Exception e) 302 { 303 ccr.setResultCode(DirectoryServer.getServerErrorResultCode()); 304 ccr.addMessage(LocalizableMessage.raw(StaticUtils.stackTraceToSingleLineString(e))); 305 } 306 return ccr; 307 } 308 309 @Override 310 public boolean isConfigurationDeleteAcceptable(BackendVLVIndexCfg cfg, List<LocalizableMessage> unacceptableReasons) 311 { 312 // TODO: validate more before returning true? 313 return true; 314 } 315 316 @Override 317 public ConfigChangeResult applyConfigurationDelete(final BackendVLVIndexCfg cfg) 318 { 319 final ConfigChangeResult ccr = new ConfigChangeResult(); 320 exclusiveLock.lock(); 321 try 322 { 323 storage.write(new WriteOperation() 324 { 325 @Override 326 public void run(WriteableTransaction txn) throws Exception 327 { 328 vlvIndexMap.remove(cfg.getName().toLowerCase()).closeAndDelete(txn); 329 } 330 }); 331 } 332 catch (Exception e) 333 { 334 ccr.setResultCode(getServerErrorResultCode()); 335 ccr.addMessage(LocalizableMessage.raw(StaticUtils.stackTraceToSingleLineString(e))); 336 } 337 finally 338 { 339 exclusiveLock.unlock(); 340 } 341 return ccr; 342 } 343 } 344 345 /** A read write lock to handle schema changes and bulk changes. */ 346 private final ReentrantReadWriteLock lock = new ReentrantReadWriteLock(); 347 final Lock sharedLock = lock.readLock(); 348 final Lock exclusiveLock = lock.writeLock(); 349 350 EntryContainer(DN baseDN, String backendID, PluggableBackendCfg config, Storage storage, RootContainer rootContainer, 351 ServerContext serverContext) throws ConfigException 352 { 353 this.backendID = backendID; 354 this.baseDN = baseDN; 355 this.config = config; 356 this.storage = storage; 357 this.rootContainer = rootContainer; 358 this.serverContext = serverContext; 359 this.treePrefix = baseDN.toNormalizedUrlSafeString(); 360 this.id2childrenCount = new ID2ChildrenCount(getIndexName(ID2CHILDREN_COUNT_TREE_NAME)); 361 this.dn2id = new DN2ID(getIndexName(DN2ID_TREE_NAME), baseDN); 362 this.dn2uri = new DN2URI(getIndexName(REFERRAL_TREE_NAME), this); 363 this.state = new State(getIndexName(STATE_TREE_NAME)); 364 365 config.addPluggableChangeListener(this); 366 367 attributeIndexCfgManager = new AttributeIndexCfgManager(); 368 config.addBackendIndexAddListener(attributeIndexCfgManager); 369 config.addBackendIndexDeleteListener(attributeIndexCfgManager); 370 371 vlvIndexCfgManager = new VLVIndexCfgManager(); 372 config.addBackendVLVIndexAddListener(vlvIndexCfgManager); 373 config.addBackendVLVIndexDeleteListener(vlvIndexCfgManager); 374 } 375 376 private CryptoSuite newCryptoSuite(boolean confidentiality) 377 { 378 return serverContext.getCryptoManager().newCryptoSuite(config.getCipherTransformation(), 379 config.getCipherKeyLength(), confidentiality); 380 } 381 382 private AttributeIndex newAttributeIndex(BackendIndexCfg cfg, CryptoSuite cryptoSuite) throws ConfigException 383 { 384 return new AttributeIndex(cfg, state, this, cryptoSuite); 385 } 386 387 private DataConfig newDataConfig(PluggableBackendCfg config) 388 { 389 return new DataConfig.Builder() 390 .compress(config.isEntriesCompressed()) 391 .encode(config.isCompactEncoding()) 392 .encrypt(config.isConfidentialityEnabled()) 393 .cryptoSuite(serverContext.getCryptoManager().newCryptoSuite(config.getCipherTransformation(), 394 config.getCipherKeyLength(),config.isConfidentialityEnabled())) 395 .schema(rootContainer.getCompressedSchema()) 396 .build(); 397 } 398 399 private TreeName getIndexName(String indexId) 400 { 401 return new TreeName(treePrefix, indexId); 402 } 403 404 /** 405 * Opens the entryContainer for reading and writing. 406 * 407 * @param txn a non null transaction 408 * @param accessMode specifies how the container has to be opened (read-write or read-only) 409 * @throws StorageRuntimeException If an error occurs in the storage. 410 * @throws ConfigException if a configuration related error occurs. 411 */ 412 void open(WriteableTransaction txn, AccessMode accessMode) throws StorageRuntimeException, ConfigException 413 { 414 boolean shouldCreate = accessMode.isWriteable(); 415 try 416 { 417 id2entry = new ID2Entry(getIndexName(ID2ENTRY_TREE_NAME), newDataConfig(config)); 418 id2entry.open(txn, shouldCreate); 419 id2childrenCount.open(txn, shouldCreate); 420 dn2id.open(txn, shouldCreate); 421 state.open(txn, shouldCreate); 422 dn2uri.open(txn, shouldCreate); 423 424 for (String idx : config.listBackendIndexes()) 425 { 426 BackendIndexCfg indexCfg = config.getBackendIndex(idx); 427 428 CryptoSuite cryptoSuite = newCryptoSuite(indexCfg.isConfidentialityEnabled()); 429 final AttributeIndex index = newAttributeIndex(indexCfg, cryptoSuite); 430 index.open(txn, shouldCreate); 431 if(!index.isTrusted()) 432 { 433 logger.info(NOTE_INDEX_ADD_REQUIRES_REBUILD, index.getName()); 434 } 435 attrIndexMap.put(indexCfg.getAttribute(), index); 436 attrCryptoMap.put(indexCfg.getAttribute(), cryptoSuite); 437 } 438 439 for (String idx : config.listBackendVLVIndexes()) 440 { 441 BackendVLVIndexCfg vlvIndexCfg = config.getBackendVLVIndex(idx); 442 443 VLVIndex vlvIndex = new VLVIndex(vlvIndexCfg, state, storage, this, txn); 444 vlvIndex.open(txn, shouldCreate); 445 if(!vlvIndex.isTrusted()) 446 { 447 logger.info(NOTE_INDEX_ADD_REQUIRES_REBUILD, vlvIndex.getName()); 448 } 449 450 vlvIndexMap.put(vlvIndexCfg.getName().toLowerCase(), vlvIndex); 451 } 452 } 453 catch (StorageRuntimeException de) 454 { 455 logger.traceException(de); 456 close(); 457 throw de; 458 } 459 } 460 461 /** 462 * Closes the entry container. 463 * 464 * @throws StorageRuntimeException If an error occurs in the storage. 465 */ 466 @Override 467 public void close() throws StorageRuntimeException 468 { 469 closeSilently(attrIndexMap.values()); 470 closeSilently(vlvIndexMap.values()); 471 472 // Deregister any listeners. 473 config.removePluggableChangeListener(this); 474 config.removeBackendIndexAddListener(attributeIndexCfgManager); 475 config.removeBackendIndexDeleteListener(attributeIndexCfgManager); 476 config.removeBackendVLVIndexAddListener(vlvIndexCfgManager); 477 config.removeBackendVLVIndexDeleteListener(vlvIndexCfgManager); 478 } 479 480 /** 481 * Retrieves a reference to the root container in which this entry container 482 * exists. 483 * 484 * @return A reference to the root container in which this entry container 485 * exists. 486 */ 487 RootContainer getRootContainer() 488 { 489 return rootContainer; 490 } 491 492 /** 493 * Get the DN tree used by this entry container. 494 * The entryContainer must have been opened. 495 * 496 * @return The DN tree. 497 */ 498 DN2ID getDN2ID() 499 { 500 return dn2id; 501 } 502 503 /** 504 * Get the entry tree used by this entry container. 505 * The entryContainer must have been opened. 506 * 507 * @return The entry tree. 508 */ 509 ID2Entry getID2Entry() 510 { 511 return id2entry; 512 } 513 514 /** 515 * Get the referral tree used by this entry container. 516 * The entryContainer must have been opened. 517 * 518 * @return The referral tree. 519 */ 520 DN2URI getDN2URI() 521 { 522 return dn2uri; 523 } 524 525 /** 526 * Get the children tree used by this entry container. 527 * The entryContainer must have been opened. 528 * 529 * @return The children tree. 530 */ 531 ID2ChildrenCount getID2ChildrenCount() 532 { 533 return id2childrenCount; 534 } 535 536 /** 537 * Look for an attribute index for the given attribute type. 538 * 539 * @param attrType The attribute type for which an attribute index is needed. 540 * @return The attribute index or null if there is none for that type. 541 */ 542 AttributeIndex getAttributeIndex(AttributeType attrType) 543 { 544 return attrIndexMap.get(attrType); 545 } 546 547 /** 548 * Look for a VLV index for the given index name. 549 * 550 * @param vlvIndexName The vlv index name for which an vlv index is needed. 551 * @return The VLV index or null if there is none with that name. 552 */ 553 VLVIndex getVLVIndex(String vlvIndexName) 554 { 555 return vlvIndexMap.get(vlvIndexName); 556 } 557 558 /** 559 * Retrieve all attribute indexes. 560 * 561 * @return All attribute indexes defined in this entry container. 562 */ 563 Collection<AttributeIndex> getAttributeIndexes() 564 { 565 return attrIndexMap.values(); 566 } 567 568 /** 569 * Retrieve all VLV indexes. 570 * 571 * @return The collection of VLV indexes defined in this entry container. 572 */ 573 Collection<VLVIndex> getVLVIndexes() 574 { 575 return vlvIndexMap.values(); 576 } 577 578 /** 579 * Determine the highest entryID in the entryContainer. 580 * The entryContainer must already be open. 581 * 582 * @param txn a non null transaction 583 * @return The highest entry ID. 584 * @throws StorageRuntimeException If an error occurs in the storage. 585 */ 586 EntryID getHighestEntryID(ReadableTransaction txn) throws StorageRuntimeException 587 { 588 try (Cursor<ByteString, ByteString> cursor = txn.openCursor(id2entry.getName())) 589 { 590 // Position a cursor on the last data item, and the key should give the highest ID. 591 if (cursor.positionToLastKey()) 592 { 593 return new EntryID(cursor.getKey()); 594 } 595 return new EntryID(0); 596 } 597 } 598 599 boolean hasSubordinates(final DN dn) 600 { 601 try 602 { 603 return storage.read(new ReadOperation<Boolean>() 604 { 605 @Override 606 public Boolean run(final ReadableTransaction txn) throws Exception 607 { 608 try (final SequentialCursor<?, ?> cursor = dn2id.openChildrenCursor(txn, dn)) 609 { 610 return cursor.next(); 611 } 612 } 613 }); 614 } 615 catch (Exception e) 616 { 617 throw new StorageRuntimeException(e); 618 } 619 } 620 621 /** 622 * Determine the number of children entries for a given entry. 623 * 624 * @param entryDN The distinguished name of the entry. 625 * @return The number of children entries for the given entry or -1 if 626 * the entry does not exist. 627 * @throws StorageRuntimeException If an error occurs in the storage. 628 */ 629 long getNumberOfChildren(final DN entryDN) throws StorageRuntimeException 630 { 631 try 632 { 633 return storage.read(new ReadOperation<Long>() 634 { 635 @Override 636 public Long run(ReadableTransaction txn) throws Exception 637 { 638 final EntryID entryID = dn2id.get(txn, entryDN); 639 return entryID != null ? id2childrenCount.getCount(txn, entryID) : -1; 640 } 641 }); 642 } 643 catch (Exception e) 644 { 645 throw new StorageRuntimeException(e); 646 } 647 } 648 649 /** 650 * Processes the specified search in this entryContainer. 651 * Matching entries should be provided back to the core server using the 652 * <CODE>SearchOperation.returnEntry</CODE> method. 653 * 654 * @param searchOperation The search operation to be processed. 655 * @throws DirectoryException 656 * If a problem occurs while processing the 657 * search. 658 * @throws StorageRuntimeException If an error occurs in the storage. 659 * @throws CanceledOperationException if this operation should be cancelled. 660 */ 661 void search(final SearchOperation searchOperation) 662 throws DirectoryException, StorageRuntimeException, CanceledOperationException 663 { 664 try 665 { 666 storage.read(new ReadOperation<Void>() 667 { 668 @Override 669 public Void run(final ReadableTransaction txn) throws Exception 670 { 671 DN aBaseDN = searchOperation.getBaseDN(); 672 SearchScope searchScope = searchOperation.getScope(); 673 674 PagedResultsControl pageRequest = searchOperation.getRequestControl(PagedResultsControl.DECODER); 675 ServerSideSortRequestControl sortRequest = 676 searchOperation.getRequestControl(ServerSideSortRequestControl.DECODER); 677 if (sortRequest != null && !sortRequest.containsSortKeys() && sortRequest.isCritical()) 678 { 679 /* 680 * If the control's criticality field is true then the server SHOULD 681 * do the following: return unavailableCriticalExtension as a return 682 * code in the searchResultDone message; include the 683 * sortKeyResponseControl in the searchResultDone message, and not 684 * send back any search result entries. 685 */ 686 addServerSideSortControl(searchOperation, NO_SUCH_ATTRIBUTE); 687 searchOperation.setResultCode(ResultCode.UNAVAILABLE_CRITICAL_EXTENSION); 688 return null; 689 } 690 691 VLVRequestControl vlvRequest = searchOperation.getRequestControl(VLVRequestControl.DECODER); 692 if (vlvRequest != null && pageRequest != null) 693 { 694 throw new DirectoryException( 695 ResultCode.CONSTRAINT_VIOLATION, ERR_SEARCH_CANNOT_MIX_PAGEDRESULTS_AND_VLV.get()); 696 } 697 698 // Handle client abandon of paged results. 699 if (pageRequest != null) 700 { 701 if (pageRequest.getSize() == 0) 702 { 703 addPagedResultsControl(searchOperation, pageRequest, null); 704 return null; 705 } 706 if (searchOperation.getSizeLimit() > 0 && pageRequest.getSize() >= searchOperation.getSizeLimit()) 707 { 708 // The RFC says : "If the page size is greater than or equal to the 709 // sizeLimit value, the server should ignore the control as the 710 // request can be satisfied in a single page" 711 pageRequest = null; 712 } 713 } 714 715 // Handle base-object search first. 716 if (searchScope == SearchScope.BASE_OBJECT) 717 { 718 searchBaseObject(txn, searchOperation, pageRequest); 719 return null; 720 } 721 722 // Check whether the client requested debug information about the 723 // contribution of the indexes to the search. 724 StringBuilder debugBuffer = null; 725 if (searchOperation.getAttributes().contains(ATTR_DEBUG_SEARCH_INDEX)) 726 { 727 debugBuffer = new StringBuilder(); 728 } 729 730 EntryIDSet candidateEntryIDs = null; 731 boolean candidatesAreInScope = false; 732 if (sortRequest != null) 733 { 734 for (VLVIndex vlvIndex : vlvIndexMap.values()) 735 { 736 try 737 { 738 candidateEntryIDs = vlvIndex.evaluate(txn, searchOperation, sortRequest, vlvRequest, debugBuffer); 739 if (candidateEntryIDs != null) 740 { 741 addServerSideSortControl(searchOperation, SUCCESS); 742 candidatesAreInScope = true; 743 break; 744 } 745 } 746 catch (DirectoryException de) 747 { 748 serverSideSortControlError(searchOperation, sortRequest, de); 749 } 750 } 751 } 752 753 // Combining server-side sort with paged result controls 754 // requires us to use an entryIDSet where the entryIDs are ordered 755 // so further paging can restart where it previously stopped 756 long[] reorderedCandidateEntryIDs; 757 if (candidateEntryIDs == null) 758 { 759 if (processSearchWithVirtualAttributeRule(searchOperation, true)) 760 { 761 return null; 762 } 763 764 // Create an index filter to get the search result candidate entries 765 IndexFilter indexFilter = new IndexFilter( 766 EntryContainer.this, txn, searchOperation, debugBuffer, rootContainer.getMonitorProvider()); 767 768 // Evaluate the filter against the attribute indexes. 769 candidateEntryIDs = indexFilter.evaluate(); 770 if (!isBelowFilterThreshold(candidateEntryIDs)) 771 { 772 final int idSetLimit = getEntryIDSetLimit(searchOperation); 773 final EntryIDSet scopeSet = getIDSetFromScope(txn, aBaseDN, searchScope, idSetLimit); 774 candidateEntryIDs.retainAll(scopeSet); 775 if (debugBuffer != null) 776 { 777 debugBuffer.append(" scope=").append(searchScope); 778 scopeSet.toString(debugBuffer); 779 } 780 if (scopeSet.isDefined()) 781 { 782 // In this case we know that every candidate is in scope. 783 candidatesAreInScope = true; 784 } 785 } 786 787 if (sortRequest != null) 788 { 789 // If the sort key is not present, the sorting will generate the 790 // default ordering. VLV search request goes through as if 791 // this sort key was not found in the user entry. 792 try 793 { 794 List<SortKey> sortKeys = sortRequest.getSortKeys(); 795 reorderedCandidateEntryIDs = sort(txn, candidateEntryIDs, searchOperation, sortKeys, vlvRequest); 796 } 797 catch (DirectoryException de) 798 { 799 reorderedCandidateEntryIDs = candidateEntryIDs.toLongArray(); 800 serverSideSortControlError(searchOperation, sortRequest, de); 801 } 802 try 803 { 804 if (sortRequest.containsSortKeys()) 805 { 806 addServerSideSortControl(searchOperation, SUCCESS); 807 } 808 else 809 { 810 /* 811 * There is no sort key associated with the sort control. 812 * Since it came here it means that the criticality is false 813 * so let the server return all search results unsorted and 814 * include the sortKeyResponseControl in the searchResultDone 815 * message. 816 */ 817 addServerSideSortControl(searchOperation, NO_SUCH_ATTRIBUTE); 818 } 819 } 820 catch (DirectoryException de) 821 { 822 serverSideSortControlError(searchOperation, sortRequest, de); 823 } 824 } 825 else 826 { 827 reorderedCandidateEntryIDs = candidateEntryIDs.toLongArray(); 828 } 829 } 830 else 831 { 832 reorderedCandidateEntryIDs = candidateEntryIDs.toLongArray(); 833 } 834 835 // If requested, construct and return a fictitious entry containing 836 // debug information, and no other entries. 837 if (debugBuffer != null) 838 { 839 debugBuffer.append(" final="); 840 candidateEntryIDs.toString(debugBuffer); 841 842 Entry debugEntry = buildDebugSearchIndexEntry(debugBuffer); 843 searchOperation.returnEntry(debugEntry, null); 844 return null; 845 } 846 847 if (reorderedCandidateEntryIDs != null) 848 { 849 rootContainer.getMonitorProvider().incrementIndexedSearchCount(); 850 searchIndexed(txn, reorderedCandidateEntryIDs, candidatesAreInScope, searchOperation, pageRequest); 851 } 852 else 853 { 854 rootContainer.getMonitorProvider().incrementUnindexedSearchCount(); 855 856 searchOperation.addAdditionalLogItem(keyOnly(getClass(), "unindexed")); 857 858 if (processSearchWithVirtualAttributeRule(searchOperation, false)) 859 { 860 return null; 861 } 862 863 ClientConnection clientConnection = searchOperation.getClientConnection(); 864 if (!clientConnection.hasPrivilege(Privilege.UNINDEXED_SEARCH, searchOperation)) 865 { 866 throw new DirectoryException( 867 ResultCode.INSUFFICIENT_ACCESS_RIGHTS, ERR_SEARCH_UNINDEXED_INSUFFICIENT_PRIVILEGES.get()); 868 } 869 870 if (sortRequest != null) 871 { 872 // FIXME OPENDJ-2628: Add support for sorting unindexed searches using indexes like DSEE currently does 873 addServerSideSortControl(searchOperation, UNWILLING_TO_PERFORM); 874 if (sortRequest.isCritical()) 875 { 876 throw new DirectoryException( 877 ResultCode.UNAVAILABLE_CRITICAL_EXTENSION, ERR_SEARCH_CANNOT_SORT_UNINDEXED.get()); 878 } 879 } 880 881 searchNotIndexed(txn, searchOperation, pageRequest); 882 } 883 return null; 884 } 885 886 private int getEntryIDSetLimit(final SearchOperation searchOperation) 887 { 888 final int lookThroughLimit = searchOperation.getClientConnection().getLookthroughLimit(); 889 final int indexLimit = config.getIndexEntryLimit() == 0 ? CURSOR_ENTRY_LIMIT : config.getIndexEntryLimit(); 890 return lookThroughLimit > 0 ? Math.min(indexLimit, lookThroughLimit) : indexLimit; 891 } 892 893 private void searchBaseObject(ReadableTransaction txn, SearchOperation searchOperation, 894 PagedResultsControl pageRequest) throws DirectoryException 895 { 896 final Entry baseEntry = fetchBaseEntry(txn, searchOperation.getBaseDN(), searchOperation.getScope()); 897 if (!isManageDsaITOperation(searchOperation)) 898 { 899 dn2uri.checkTargetForReferral(baseEntry, searchOperation.getScope()); 900 } 901 902 if (searchOperation.getFilter().matchesEntry(baseEntry)) 903 { 904 searchOperation.returnEntry(baseEntry, null); 905 } 906 907 // Indicate no more pages. 908 addPagedResultsControl(searchOperation, pageRequest, null); 909 } 910 911 private void serverSideSortControlError(final SearchOperation searchOperation, 912 ServerSideSortRequestControl sortRequest, DirectoryException de) throws DirectoryException 913 { 914 addServerSideSortControl(searchOperation, de.getResultCode().intValue()); 915 if (sortRequest.isCritical()) 916 { 917 throw de; 918 } 919 } 920 921 private void addServerSideSortControl(SearchOperation searchOp, int resultCode) 922 { 923 searchOp.addResponseControl(new ServerSideSortResponseControl(resultCode, null)); 924 } 925 926 private EntryIDSet getIDSetFromScope(final ReadableTransaction txn, DN aBaseDN, SearchScope searchScope, 927 int idSetLimit) throws DirectoryException 928 { 929 final EntryIDSet scopeSet; 930 try 931 { 932 switch (searchScope.asEnum()) 933 { 934 case BASE_OBJECT: 935 try (final SequentialCursor<?, EntryID> scopeCursor = dn2id.openCursor(txn, aBaseDN)) 936 { 937 scopeSet = EntryIDSet.newDefinedSet(scopeCursor.getValue().longValue()); 938 } 939 break; 940 case SINGLE_LEVEL: 941 try (final SequentialCursor<?, EntryID> scopeCursor = dn2id.openChildrenCursor(txn, aBaseDN)) 942 { 943 scopeSet = newIDSetFromCursor(scopeCursor, false, idSetLimit); 944 } 945 break; 946 case SUBORDINATES: 947 case WHOLE_SUBTREE: 948 try (final SequentialCursor<?, EntryID> scopeCursor = dn2id.openSubordinatesCursor(txn, aBaseDN)) 949 { 950 scopeSet = newIDSetFromCursor(scopeCursor, searchScope.equals(SearchScope.WHOLE_SUBTREE), idSetLimit); 951 } 952 break; 953 default: 954 throw new DirectoryException(ResultCode.UNWILLING_TO_PERFORM, 955 CoreMessages.INFO_ERROR_SEARCH_SCOPE_NOT_ALLOWED.get()); 956 } 957 } 958 catch (NoSuchElementException e) 959 { 960 throw new DirectoryException(ResultCode.NO_SUCH_OBJECT, ERR_SEARCH_NO_SUCH_OBJECT.get(aBaseDN), 961 getMatchedDN(txn, aBaseDN), e); 962 } 963 return scopeSet; 964 } 965 }); 966 } 967 catch (Exception e) 968 { 969 throwAllowedExceptionTypes(e, DirectoryException.class, CanceledOperationException.class); 970 } 971 } 972 973 private static EntryIDSet newIDSetFromCursor(SequentialCursor<?, EntryID> cursor, boolean includeCurrent, 974 int idSetLimit) 975 { 976 long entryIDs[] = new long[idSetLimit]; 977 int offset = 0; 978 if (includeCurrent) 979 { 980 entryIDs[offset++] = cursor.getValue().longValue(); 981 } 982 983 while(offset < idSetLimit && cursor.next()) 984 { 985 entryIDs[offset++] = cursor.getValue().longValue(); 986 } 987 988 if (offset == idSetLimit && cursor.next()) 989 { 990 return EntryIDSet.newUndefinedSet(); 991 } 992 else if (offset != idSetLimit) 993 { 994 entryIDs = Arrays.copyOf(entryIDs, offset); 995 } 996 Arrays.sort(entryIDs); 997 998 return EntryIDSet.newDefinedSet(entryIDs); 999 } 1000 1001 private <E1 extends Exception, E2 extends Exception> 1002 void throwAllowedExceptionTypes(Exception e, Class<E1> clazz1, Class<E2> clazz2) 1003 throws E1, E2 1004 { 1005 throwIfPossible(e, clazz1, clazz2); 1006 if (e.getCause() != null) 1007 { 1008 throwIfPossible(e.getCause(), clazz1, clazz2); 1009 } 1010 else if (e instanceof StorageRuntimeException) 1011 { 1012 throw (StorageRuntimeException) e; 1013 } 1014 throw new StorageRuntimeException(e); 1015 } 1016 1017 private static <E1 extends Exception, E2 extends Exception> void throwIfPossible(final Throwable cause, 1018 Class<E1> clazz1, Class<E2> clazz2) throws E1, E2 1019 { 1020 if (clazz1.isAssignableFrom(cause.getClass())) 1021 { 1022 throw clazz1.cast(cause); 1023 } 1024 else if (clazz2.isAssignableFrom(cause.getClass())) 1025 { 1026 throw clazz2.cast(cause); 1027 } 1028 } 1029 1030 private static boolean processSearchWithVirtualAttributeRule(final SearchOperation searchOperation, 1031 boolean isPreIndexed) 1032 { 1033 for (VirtualAttributeRule rule : DirectoryServer.getVirtualAttributes()) 1034 { 1035 VirtualAttributeProvider<?> provider = rule.getProvider(); 1036 if (provider.isSearchable(rule, searchOperation, isPreIndexed)) 1037 { 1038 provider.processSearch(rule, searchOperation); 1039 return true; 1040 } 1041 } 1042 return false; 1043 } 1044 1045 private static Entry buildDebugSearchIndexEntry(StringBuilder debugBuffer) throws DirectoryException 1046 { 1047 Attribute attr = Attributes.create(ATTR_DEBUG_SEARCH_INDEX, debugBuffer.toString()); 1048 Entry entry = new Entry(DN.valueOf("cn=debugsearch"), null, null, null); 1049 entry.addAttribute(attr, new ArrayList<ByteString>()); 1050 return entry; 1051 } 1052 1053 /** 1054 * We were not able to obtain a set of candidate entry IDs for the 1055 * search from the indexes. 1056 * <p> 1057 * Here we are relying on the DN key order to ensure children are 1058 * returned after their parents. 1059 * <ul> 1060 * <li>iterate through a subtree range of the DN tree 1061 * <li>discard non-children DNs if the search scope is single level 1062 * <li>fetch the entry by ID from the entry cache or the entry tree 1063 * <li>return the entry if it matches the filter 1064 * </ul> 1065 * 1066 * @param searchOperation The search operation. 1067 * @param pageRequest A Paged Results control, or null if none. 1068 * @throws DirectoryException If an error prevented the search from being 1069 * processed. 1070 */ 1071 private void searchNotIndexed(ReadableTransaction txn, SearchOperation searchOperation, 1072 PagedResultsControl pageRequest) throws DirectoryException, CanceledOperationException 1073 { 1074 DN aBaseDN = searchOperation.getBaseDN(); 1075 SearchScope searchScope = searchOperation.getScope(); 1076 boolean manageDsaIT = isManageDsaITOperation(searchOperation); 1077 1078 // The base entry must already have been processed if this is 1079 // a request for the next page in paged results. So we skip 1080 // the base entry processing if the cookie is set. 1081 if (pageRequest == null || pageRequest.getCookie().length() == 0) 1082 { 1083 final Entry baseEntry = fetchBaseEntry(txn, aBaseDN, searchScope); 1084 if (!manageDsaIT) 1085 { 1086 dn2uri.checkTargetForReferral(baseEntry, searchScope); 1087 } 1088 1089 /* The base entry is only included for whole subtree search. */ 1090 if (searchScope == SearchScope.WHOLE_SUBTREE 1091 && searchOperation.getFilter().matchesEntry(baseEntry)) 1092 { 1093 searchOperation.returnEntry(baseEntry, null); 1094 } 1095 1096 if (!manageDsaIT && !dn2uri.returnSearchReferences(txn, searchOperation)) 1097 { 1098 // Indicate no more pages. 1099 addPagedResultsControl(searchOperation, pageRequest, null); 1100 } 1101 } 1102 1103 /* 1104 * We will iterate forwards through a range of the dn2id keys to 1105 * find subordinates of the target entry from the top of the tree 1106 * downwards. For example, any subordinates of dn "dc=example,dc=com" appear 1107 * in dn2id with a dn ending in ",dc=example,dc=com". The dn 1108 * "cn=joe,ou=people,dc=example,dc=com" will appear after the dn 1109 * "ou=people,dc=example,dc=com". 1110 */ 1111 ByteString baseDNKey = dnToDNKey(aBaseDN, this.baseDN.size()); 1112 ByteStringBuilder beforeFirstChild = beforeFirstChildOf(baseDNKey); 1113 ByteStringBuilder afterLastChild = afterLastChildOf(baseDNKey); 1114 1115 // Set the starting value. 1116 ByteSequence begin; 1117 if (pageRequest != null && pageRequest.getCookie().length() != 0) 1118 { 1119 // The cookie contains the DN of the next entry to be returned. 1120 try 1121 { 1122 begin = ByteString.wrap(pageRequest.getCookie().toByteArray()); 1123 } 1124 catch (Exception e) 1125 { 1126 logger.traceException(e); 1127 throw new DirectoryException(ResultCode.UNWILLING_TO_PERFORM, 1128 ERR_INVALID_PAGED_RESULTS_COOKIE.get(pageRequest.getCookie().toHexString()), e); 1129 } 1130 } 1131 else 1132 { 1133 // Set the starting value to the suffix. 1134 begin = beforeFirstChild; 1135 } 1136 1137 int lookthroughCount = 0; 1138 int lookthroughLimit = searchOperation.getClientConnection().getLookthroughLimit(); 1139 1140 try (final Cursor<ByteString, ByteString> cursor = txn.openCursor(dn2id.getName())) 1141 { 1142 // Initialize the cursor very close to the starting value. 1143 boolean success = cursor.positionToKeyOrNext(begin); 1144 1145 // Step forward until we pass the ending value. 1146 while (success && cursor.getKey().compareTo(afterLastChild) < 0) 1147 { 1148 if (lookthroughLimit > 0 && lookthroughCount > lookthroughLimit) 1149 { 1150 // Lookthrough limit exceeded 1151 searchOperation.setResultCode(ResultCode.ADMIN_LIMIT_EXCEEDED); 1152 searchOperation.appendErrorMessage(NOTE_LOOKTHROUGH_LIMIT_EXCEEDED.get(lookthroughLimit)); 1153 return; 1154 } 1155 1156 // We have found a subordinate entry. 1157 EntryID entryID = new EntryID(cursor.getValue()); 1158 boolean isInScope = 1159 searchScope != SearchScope.SINGLE_LEVEL 1160 // Check if this entry is an immediate child. 1161 || findDNKeyParent(cursor.getKey()) == baseDNKey.length(); 1162 if (isInScope) 1163 { 1164 // Process the candidate entry. 1165 final Entry entry = getEntry(txn, entryID); 1166 if (entry != null) 1167 { 1168 lookthroughCount++; 1169 1170 if ((manageDsaIT || entry.getReferralURLs() == null) 1171 && searchOperation.getFilter().matchesEntry(entry)) 1172 { 1173 if (isPageFull(searchOperation, pageRequest)) 1174 { 1175 // Set the cookie to remember where we were. 1176 addPagedResultsControl(searchOperation, pageRequest, cursor.getKey()); 1177 return; 1178 } 1179 1180 if (!searchOperation.returnEntry(entry, null)) 1181 { 1182 // We have been told to discontinue processing of the search. 1183 // This could be due to size limit exceeded or operation cancelled 1184 return; 1185 } 1186 } 1187 } 1188 } 1189 1190 searchOperation.checkIfCanceled(false); 1191 1192 // Move to the next record. 1193 success = cursor.next(); 1194 } 1195 } 1196 catch (StorageRuntimeException e) 1197 { 1198 logger.traceException(e); 1199 } 1200 1201 // Indicate no more pages. 1202 addPagedResultsControl(searchOperation, pageRequest, null); 1203 } 1204 1205 private boolean isPageFull(SearchOperation searchOperation, PagedResultsControl pageRequest) 1206 { 1207 return pageRequest != null && searchOperation.getEntriesSent() == pageRequest.getSize(); 1208 } 1209 1210 private void addPagedResultsControl(SearchOperation searchOp, PagedResultsControl pageRequest, ByteString cookie) 1211 { 1212 if (pageRequest != null) 1213 { 1214 searchOp.addResponseControl(new PagedResultsControl(pageRequest.isCritical(), 0, cookie)); 1215 } 1216 } 1217 1218 /** 1219 * Returns the entry corresponding to the provided entryID. 1220 * 1221 * @param txn a non null transaction 1222 * @param entryID 1223 * the id of the entry to retrieve 1224 * @return the entry corresponding to the provided entryID 1225 * @throws DirectoryException 1226 * If an error occurs retrieving the entry 1227 */ 1228 private Entry getEntry(ReadableTransaction txn, EntryID entryID) throws DirectoryException 1229 { 1230 // Try the entry cache first. 1231 final EntryCache<?> entryCache = getEntryCache(); 1232 final Entry cacheEntry = entryCache.getEntry(backendID, entryID.longValue()); 1233 if (cacheEntry != null) 1234 { 1235 return cacheEntry; 1236 } 1237 1238 final Entry entry = id2entry.get(txn, entryID); 1239 if (entry != null) 1240 { 1241 // Put the entry in the cache making sure not to overwrite a newer copy 1242 // that may have been inserted since the time we read the cache. 1243 entryCache.putEntryIfAbsent(entry, backendID, entryID.longValue()); 1244 } 1245 return entry; 1246 } 1247 1248 /** 1249 * We were able to obtain a set of candidate entry IDs for the search from the indexes. 1250 * <p> 1251 * Here we are relying on ID order to ensure children are returned after their parents. 1252 * <ul> 1253 * <li>Iterate through the candidate IDs 1254 * <li>fetch entry by ID from cache or id2entry 1255 * <li>put the entry in the cache if not present 1256 * <li>discard entries that are not in scope 1257 * <li>return entry if it matches the filter 1258 * </ul> 1259 * 1260 * @param entryIDReorderedSet 1261 * The candidate entry IDs. 1262 * @param candidatesAreInScope 1263 * true if it is certain that every candidate entry is in the search scope. 1264 * @param searchOperation 1265 * The search operation. 1266 * @param pageRequest 1267 * A Paged Results control, or null if none. 1268 * @throws DirectoryException 1269 * If an error prevented the search from being processed. 1270 */ 1271 private void searchIndexed(ReadableTransaction txn, long[] entryIDReorderedSet, boolean candidatesAreInScope, 1272 SearchOperation searchOperation, PagedResultsControl pageRequest) throws DirectoryException, 1273 CanceledOperationException 1274 { 1275 SearchScope searchScope = searchOperation.getScope(); 1276 DN aBaseDN = searchOperation.getBaseDN(); 1277 boolean manageDsaIT = isManageDsaITOperation(searchOperation); 1278 boolean continueSearch = true; 1279 1280 // Set the starting value. 1281 Long beginEntryID = null; 1282 if (pageRequest != null && pageRequest.getCookie().length() != 0) 1283 { 1284 // The cookie contains the ID of the next entry to be returned. 1285 try 1286 { 1287 beginEntryID = pageRequest.getCookie().toLong(); 1288 } 1289 catch (Exception e) 1290 { 1291 logger.traceException(e); 1292 throw new DirectoryException(ResultCode.UNWILLING_TO_PERFORM, 1293 ERR_INVALID_PAGED_RESULTS_COOKIE.get(pageRequest.getCookie().toHexString()), e); 1294 } 1295 } 1296 else if (!manageDsaIT) 1297 { 1298 continueSearch = dn2uri.returnSearchReferences(txn, searchOperation); 1299 } 1300 1301 // Make sure the candidate list is smaller than the lookthrough limit 1302 int lookthroughLimit = 1303 searchOperation.getClientConnection().getLookthroughLimit(); 1304 if (lookthroughLimit > 0 && entryIDReorderedSet.length > lookthroughLimit) 1305 { 1306 //Lookthrough limit exceeded 1307 searchOperation.setResultCode(ResultCode.ADMIN_LIMIT_EXCEEDED); 1308 searchOperation.appendErrorMessage(NOTE_LOOKTHROUGH_LIMIT_EXCEEDED.get(lookthroughLimit)); 1309 continueSearch = false; 1310 } 1311 1312 // Iterate through the index candidates. 1313 if (continueSearch) 1314 { 1315 final SearchFilter filter = searchOperation.getFilter(); 1316 for (int i = findStartIndex(beginEntryID, entryIDReorderedSet); i < entryIDReorderedSet.length; i++) 1317 { 1318 EntryID entryID = new EntryID(entryIDReorderedSet[i]); 1319 Entry entry; 1320 try 1321 { 1322 entry = getEntry(txn, entryID); 1323 } 1324 catch (Exception e) 1325 { 1326 logger.traceException(e); 1327 continue; 1328 } 1329 1330 // Process the candidate entry. 1331 if (entry != null 1332 && isInScope(candidatesAreInScope, searchScope, aBaseDN, entry) 1333 && (manageDsaIT || entry.getReferralURLs() == null) 1334 && filter.matchesEntry(entry)) 1335 { 1336 if (isPageFull(searchOperation, pageRequest)) 1337 { 1338 // Set the cookie to remember where we were. 1339 addPagedResultsControl(searchOperation, pageRequest, entryID.toByteString()); 1340 return; 1341 } 1342 1343 if (!searchOperation.returnEntry(entry, null)) 1344 { 1345 // We have been told to discontinue processing of the search. 1346 // This could be due to size limit exceeded or operation cancelled 1347 break; 1348 } 1349 } 1350 } 1351 searchOperation.checkIfCanceled(false); 1352 } 1353 1354 // Before we return success from the search we must ensure the base entry 1355 // exists. However, if we have returned at least one entry or subordinate 1356 // reference it implies the base does exist, so we can omit the check. 1357 if (searchOperation.getEntriesSent() == 0 1358 && searchOperation.getReferencesSent() == 0) 1359 { 1360 final Entry baseEntry = fetchBaseEntry(txn, aBaseDN, searchScope); 1361 if (!manageDsaIT) 1362 { 1363 dn2uri.checkTargetForReferral(baseEntry, searchScope); 1364 } 1365 } 1366 1367 // Indicate no more pages. 1368 addPagedResultsControl(searchOperation, pageRequest, null); 1369 } 1370 1371 private int findStartIndex(Long beginEntryID, long[] entryIDReorderedSet) 1372 { 1373 if (beginEntryID == null) 1374 { 1375 return 0; 1376 } 1377 final long begin = beginEntryID.longValue(); 1378 for (int i = 0; i < entryIDReorderedSet.length; i++) 1379 { 1380 if (entryIDReorderedSet[i] == begin) 1381 { 1382 return i; 1383 } 1384 } 1385 return 0; 1386 } 1387 1388 private boolean isInScope(boolean candidatesAreInScope, SearchScope searchScope, DN aBaseDN, Entry entry) 1389 { 1390 DN entryDN = entry.getName(); 1391 1392 if (candidatesAreInScope) 1393 { 1394 return true; 1395 } 1396 else if (searchScope == SearchScope.SINGLE_LEVEL) 1397 { 1398 // Check if this entry is an immediate child. 1399 if (entryDN.size() == aBaseDN.size() + 1 1400 && entryDN.isSubordinateOrEqualTo(aBaseDN)) 1401 { 1402 return true; 1403 } 1404 } 1405 else if (searchScope == SearchScope.WHOLE_SUBTREE) 1406 { 1407 if (entryDN.isSubordinateOrEqualTo(aBaseDN)) 1408 { 1409 return true; 1410 } 1411 } 1412 else if (searchScope == SearchScope.SUBORDINATES 1413 && entryDN.size() > aBaseDN.size() 1414 && entryDN.isSubordinateOrEqualTo(aBaseDN)) 1415 { 1416 return true; 1417 } 1418 return false; 1419 } 1420 1421 /** 1422 * Adds the provided entry to this tree. This method must ensure that the 1423 * entry is appropriate for the tree and that no entry already exists with 1424 * the same DN. The caller must hold a write lock on the DN of the provided 1425 * entry. 1426 * 1427 * @param entry The entry to add to this tree. 1428 * @param addOperation The add operation with which the new entry is 1429 * associated. This may be <CODE>null</CODE> for adds 1430 * performed internally. 1431 * @throws DirectoryException If a problem occurs while trying to add the 1432 * entry. 1433 * @throws StorageRuntimeException If an error occurs in the storage. 1434 * @throws CanceledOperationException if this operation should be cancelled. 1435 */ 1436 void addEntry(final Entry entry, final AddOperation addOperation) 1437 throws StorageRuntimeException, DirectoryException, CanceledOperationException 1438 { 1439 final DN parentDN = getParentWithinBase(entry.getName()); 1440 final EntryID entryID = rootContainer.getNextEntryID(); 1441 1442 // Insert into the indexes, in index configuration order. 1443 final IndexBuffer indexBuffer = new IndexBuffer(); 1444 insertEntryIntoIndexes(indexBuffer, entry, entryID); 1445 1446 final ByteString encodedEntry = id2entry.encode(entry); 1447 1448 try 1449 { 1450 storage.write(new WriteOperation() 1451 { 1452 @Override 1453 public void run(WriteableTransaction txn) throws Exception 1454 { 1455 // No need to call indexBuffer.reset() since IndexBuffer content will be the same for each retry attempt. 1456 try 1457 { 1458 // Check whether the entry already exists. 1459 if (dn2id.get(txn, entry.getName()) != null) 1460 { 1461 throw new DirectoryException(ResultCode.ENTRY_ALREADY_EXISTS, 1462 ERR_ADD_ENTRY_ALREADY_EXISTS.get(entry.getName())); 1463 } 1464 // Check that the parent entry exists. 1465 EntryID parentID = null; 1466 if (parentDN != null) 1467 { 1468 // Check for referral entries above the target. 1469 dn2uri.targetEntryReferrals(txn, entry.getName(), null); 1470 1471 parentID = dn2id.get(txn, parentDN); 1472 if (parentID == null) 1473 { 1474 throw new DirectoryException(ResultCode.NO_SUCH_OBJECT, 1475 ERR_ADD_NO_SUCH_OBJECT.get(entry.getName()), 1476 getMatchedDN(txn, parentDN), 1477 null); 1478 } 1479 } 1480 1481 // Ensure same access ordering as deleteEntry. 1482 dn2id.put(txn, entry.getName(), entryID); 1483 id2childrenCount.updateCount(txn, parentID, 1); 1484 id2entry.put(txn, entryID, encodedEntry); 1485 dn2uri.addEntry(txn, entry); 1486 id2childrenCount.updateTotalCount(txn, 1); 1487 indexBuffer.flush(txn); 1488 // One last check before committing 1489 addOperation.checkIfCanceled(true); 1490 } 1491 catch (StorageRuntimeException | DirectoryException | CanceledOperationException e) 1492 { 1493 throw e; 1494 } 1495 catch (Exception e) 1496 { 1497 String msg = e.getMessage(); 1498 if (msg == null) 1499 { 1500 msg = stackTraceToSingleLineString(e); 1501 } 1502 throw new DirectoryException( 1503 DirectoryServer.getServerErrorResultCode(), ERR_UNCHECKED_EXCEPTION.get(msg), e); 1504 } 1505 } 1506 }); 1507 } 1508 catch (Exception e) 1509 { 1510 writeTrustState(indexBuffer); 1511 throwAllowedExceptionTypes(e, DirectoryException.class, CanceledOperationException.class); 1512 } 1513 1514 final EntryCache<?> entryCache = DirectoryServer.getEntryCache(); 1515 if (entryCache != null) 1516 { 1517 entryCache.putEntry(entry, backendID, entryID.longValue()); 1518 } 1519 } 1520 1521 private void writeTrustState(final IndexBuffer indexBuffer) 1522 { 1523 // Transaction modifying the index has been rolled back. 1524 // Ensure that the index trusted state is persisted. 1525 try 1526 { 1527 storage.write(new WriteOperation() 1528 { 1529 @Override 1530 public void run(WriteableTransaction txn) throws Exception 1531 { 1532 indexBuffer.writeTrustState(txn); 1533 } 1534 }); 1535 } 1536 catch (Exception e) 1537 { 1538 // Cannot throw because this method is used in a catch block and we do not want to hide the real exception. 1539 logger.traceException(e); 1540 } 1541 } 1542 1543 void importEntry(WriteableTransaction txn, EntryID entryID, Entry entry) throws DirectoryException, 1544 StorageRuntimeException 1545 { 1546 final IndexBuffer indexBuffer = IndexBuffer.newImportIndexBuffer(txn, entryID); 1547 insertEntryIntoIndexes(indexBuffer, entry, entryID); 1548 dn2id.put(txn, entry.getName(), entryID); 1549 id2entry.put(txn, entryID, id2entry.encode(entry)); 1550 dn2uri.addEntry(txn, entry); 1551 indexBuffer.flush(txn); 1552 } 1553 1554 /** 1555 * Removes the specified entry from this tree. This method must ensure 1556 * that the entry exists and that it does not have any subordinate entries 1557 * (unless the storage supports a subtree delete operation and the client 1558 * included the appropriate information in the request). The caller must hold 1559 * a write lock on the provided entry DN. 1560 * 1561 * @param entryDN The DN of the entry to remove from this tree. 1562 * @param deleteOperation The delete operation with which this action is 1563 * associated. This may be <CODE>null</CODE> for 1564 * deletes performed internally. 1565 * @throws DirectoryException If a problem occurs while trying to remove the 1566 * entry. 1567 * @throws StorageRuntimeException If an error occurs in the storage. 1568 * @throws CanceledOperationException if this operation should be cancelled. 1569 */ 1570 void deleteEntry(final DN entryDN, final DeleteOperation deleteOperation) 1571 throws DirectoryException, StorageRuntimeException, CanceledOperationException 1572 { 1573 final IndexBuffer indexBuffer = new IndexBuffer(); 1574 try 1575 { 1576 storage.write(new WriteOperation() 1577 { 1578 @Override 1579 public void run(WriteableTransaction txn) throws Exception 1580 { 1581 indexBuffer.reset(); 1582 try 1583 { 1584 // Check for referral entries above the target entry. 1585 dn2uri.targetEntryReferrals(txn, entryDN, null); 1586 1587 // We'll need the parent ID when we update the id2childrenCount. Fetch it now so that accesses to dn2id 1588 // are ordered. 1589 final DN parentDN = getParentWithinBase(entryDN); 1590 EntryID parentID = null; 1591 if (parentDN != null) 1592 { 1593 parentID = dn2id.get(txn, parentDN); 1594 if (parentID == null) 1595 { 1596 throw new DirectoryException(ResultCode.NO_SUCH_OBJECT, 1597 ERR_DELETE_NO_SUCH_OBJECT.get(entryDN), 1598 getMatchedDN(txn, parentDN), 1599 null); 1600 } 1601 } 1602 1603 // Delete the subordinate entries in dn2id if requested. 1604 final boolean isSubtreeDelete = deleteOperation.getRequestControl(SubtreeDeleteControl.DECODER) != null; 1605 1606 /* draft-armijo-ldap-treedelete, 4.1 Tree Delete Semantics: The server MUST NOT chase referrals stored in 1607 * the tree. If information about referrals is stored in this section of the tree, this pointer will be 1608 * deleted. 1609 */ 1610 final boolean isManageDsaIT = isSubtreeDelete || isManageDsaITOperation(deleteOperation); 1611 1612 /* Ensure that all index updates are done in the correct order to avoid deadlocks. First iterate over 1613 * dn2id collecting all the IDs of the entries to be deleted. Then update dn2uri, id2entry, 1614 * id2childrenCount, and finally the attribute indexes. 1615 */ 1616 final List<Long> entriesToBeDeleted = new ArrayList<>(); 1617 try (final SequentialCursor<Void, EntryID> cursor = dn2id.openSubordinatesCursor(txn, entryDN)) 1618 { 1619 // Delete the target entry in dn2id. 1620 if (!cursor.isDefined()) 1621 { 1622 throw new DirectoryException(ResultCode.NO_SUCH_OBJECT, 1623 ERR_DELETE_NO_SUCH_OBJECT.get(entryDN), 1624 getMatchedDN(txn, entryDN), 1625 null); 1626 } 1627 entriesToBeDeleted.add(cursor.getValue().longValue()); 1628 cursor.delete(); 1629 1630 // Now delete the subordinate entries in dn2id. 1631 while (cursor.next()) 1632 { 1633 if (!isSubtreeDelete) 1634 { 1635 throw new DirectoryException(ResultCode.NOT_ALLOWED_ON_NONLEAF, 1636 ERR_DELETE_NOT_ALLOWED_ON_NONLEAF.get(entryDN)); 1637 } 1638 entriesToBeDeleted.add(cursor.getValue().longValue()); 1639 cursor.delete(); 1640 deleteOperation.checkIfCanceled(false); 1641 } 1642 } 1643 // The target entry will have the lowest entryID so it will remain the first element. 1644 Collections.sort(entriesToBeDeleted); 1645 1646 // Now update id2entry, dn2uri, and id2childrenCount in key order. 1647 id2childrenCount.updateCount(txn, parentID, -1); 1648 final EntryCache<?> entryCache = DirectoryServer.getEntryCache(); 1649 boolean isBaseEntry = true; 1650 try (final Cursor<EntryID, Entry> cursor = id2entry.openCursor(txn)) 1651 { 1652 for (Long entryIDLong : entriesToBeDeleted) 1653 { 1654 final EntryID entryID = new EntryID(entryIDLong); 1655 if (!cursor.positionToKey(entryID.toByteString())) 1656 { 1657 throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), 1658 ERR_MISSING_ID2ENTRY_RECORD.get(entryID)); 1659 } 1660 final Entry entry = cursor.getValue(); 1661 if (isBaseEntry && !isManageDsaIT) 1662 { 1663 dn2uri.checkTargetForReferral(entry, null); 1664 } 1665 cursor.delete(); 1666 dn2uri.deleteEntry(txn, entry); 1667 id2childrenCount.removeCount(txn, entryID); 1668 removeEntryFromIndexes(indexBuffer, entry, entryID); 1669 if (!isBaseEntry) 1670 { 1671 invokeSubordinateDeletePlugins(entry); 1672 } 1673 if (entryCache != null) 1674 { 1675 entryCache.removeEntry(entry.getName()); 1676 } 1677 isBaseEntry = false; 1678 deleteOperation.checkIfCanceled(false); 1679 } 1680 } 1681 id2childrenCount.updateTotalCount(txn, -entriesToBeDeleted.size()); 1682 indexBuffer.flush(txn); 1683 deleteOperation.checkIfCanceled(true); 1684 if (isSubtreeDelete) 1685 { 1686 deleteOperation.addAdditionalLogItem(unquotedKeyValue(getClass(), "deletedEntries", 1687 entriesToBeDeleted.size())); 1688 } 1689 } 1690 catch (StorageRuntimeException | DirectoryException | CanceledOperationException e) 1691 { 1692 throw e; 1693 } 1694 catch (Exception e) 1695 { 1696 String msg = e.getMessage(); 1697 if (msg == null) 1698 { 1699 msg = stackTraceToSingleLineString(e); 1700 } 1701 throw new DirectoryException( 1702 DirectoryServer.getServerErrorResultCode(), ERR_UNCHECKED_EXCEPTION.get(msg), e); 1703 } 1704 } 1705 1706 private void invokeSubordinateDeletePlugins(final Entry entry) throws DirectoryException 1707 { 1708 if (!deleteOperation.isSynchronizationOperation()) 1709 { 1710 SubordinateDelete pluginResult = 1711 getPluginConfigManager().invokeSubordinateDeletePlugins(deleteOperation, entry); 1712 if (!pluginResult.continueProcessing()) 1713 { 1714 throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), 1715 ERR_DELETE_ABORTED_BY_SUBORDINATE_PLUGIN.get(entry.getName())); 1716 } 1717 } 1718 } 1719 }); 1720 } 1721 catch (Exception e) 1722 { 1723 writeTrustState(indexBuffer); 1724 throwAllowedExceptionTypes(e, DirectoryException.class, CanceledOperationException.class); 1725 } 1726 } 1727 1728 /** 1729 * Indicates whether an entry with the specified DN exists. 1730 * 1731 * @param entryDN The DN of the entry for which to determine existence. 1732 * 1733 * @return <CODE>true</CODE> if the specified entry exists, 1734 * or <CODE>false</CODE> if it does not. 1735 */ 1736 private boolean entryExists(ReadableTransaction txn, final DN entryDN) 1737 { 1738 // Try the entry cache first. 1739 EntryCache<?> entryCache = DirectoryServer.getEntryCache(); 1740 return (entryCache != null && entryCache.containsEntry(entryDN)) 1741 || dn2id.get(txn, entryDN) != null; 1742 } 1743 1744 1745 boolean entryExists(final DN entryDN) throws StorageRuntimeException 1746 { 1747 final EntryCache<?> entryCache = DirectoryServer.getEntryCache(); 1748 if (entryCache != null && entryCache.containsEntry(entryDN)) 1749 { 1750 return true; 1751 } 1752 1753 try 1754 { 1755 return storage.read(new ReadOperation<Boolean>() 1756 { 1757 @Override 1758 public Boolean run(ReadableTransaction txn) throws Exception 1759 { 1760 return dn2id.get(txn, entryDN) != null; 1761 } 1762 }); 1763 } 1764 catch (Exception e) 1765 { 1766 throw new StorageRuntimeException(e); 1767 } 1768 } 1769 1770 /** 1771 * Fetch an entry by DN, trying the entry cache first, then the tree. 1772 * Retrieves the requested entry, trying the entry cache first, 1773 * then the tree. 1774 * 1775 * @param entryDN The distinguished name of the entry to retrieve. 1776 * @return The requested entry, or <CODE>null</CODE> if the entry does not 1777 * exist. 1778 * @throws DirectoryException If a problem occurs while trying to retrieve 1779 * the entry. 1780 * @throws StorageRuntimeException An error occurred during a storage operation. 1781 */ 1782 Entry getEntry(final DN entryDN) throws StorageRuntimeException, DirectoryException 1783 { 1784 try 1785 { 1786 return storage.read(new ReadOperation<Entry>() 1787 { 1788 @Override 1789 public Entry run(ReadableTransaction txn) throws Exception 1790 { 1791 Entry entry = getEntry0(txn, entryDN); 1792 if (entry == null) 1793 { 1794 // The entryDN does not exist. Check for referral entries above the target entry. 1795 dn2uri.targetEntryReferrals(txn, entryDN, null); 1796 } 1797 return entry; 1798 } 1799 }); 1800 } 1801 catch (Exception e) 1802 { 1803 // it is not very clean to specify twice the same exception but it saves me some code for now 1804 throwAllowedExceptionTypes(e, DirectoryException.class, DirectoryException.class); 1805 return null; // it can never happen 1806 } 1807 } 1808 1809 private Entry getEntry0(ReadableTransaction txn, final DN entryDN) throws StorageRuntimeException, DirectoryException 1810 { 1811 final EntryCache<?> entryCache = DirectoryServer.getEntryCache(); 1812 if (entryCache != null) 1813 { 1814 final Entry entry = entryCache.getEntry(entryDN); 1815 if (entry != null) 1816 { 1817 return entry; 1818 } 1819 } 1820 1821 final EntryID entryID = dn2id.get(txn, entryDN); 1822 if (entryID == null) 1823 { 1824 return null; 1825 } 1826 1827 final Entry entry = id2entry.get(txn, entryID); 1828 if (entry != null && entryCache != null) 1829 { 1830 /* 1831 * Put the entry in the cache making sure not to overwrite a newer copy that may have been 1832 * inserted since the time we read the cache. 1833 */ 1834 entryCache.putEntryIfAbsent(entry, backendID, entryID.longValue()); 1835 } 1836 return entry; 1837 } 1838 1839 /** 1840 * The simplest case of replacing an entry in which the entry DN has 1841 * not changed. 1842 * 1843 * @param oldEntry The old contents of the entry 1844 * @param newEntry The new contents of the entry 1845 * @param modifyOperation The modify operation with which this action is 1846 * associated. This may be <CODE>null</CODE> for 1847 * modifications performed internally. 1848 * @throws StorageRuntimeException If an error occurs in the storage. 1849 * @throws DirectoryException If a Directory Server error occurs. 1850 * @throws CanceledOperationException if this operation should be cancelled. 1851 */ 1852 void replaceEntry(final Entry oldEntry, final Entry newEntry, final ModifyOperation modifyOperation) 1853 throws StorageRuntimeException, DirectoryException, CanceledOperationException 1854 { 1855 final IndexBuffer indexBuffer = new IndexBuffer(); 1856 final ByteString encodedNewEntry = id2entry.encode(newEntry); 1857 try 1858 { 1859 storage.write(new WriteOperation() 1860 { 1861 @Override 1862 public void run(WriteableTransaction txn) throws Exception 1863 { 1864 indexBuffer.reset(); 1865 try 1866 { 1867 EntryID entryID = dn2id.get(txn, newEntry.getName()); 1868 if (entryID == null) 1869 { 1870 throw new DirectoryException(ResultCode.NO_SUCH_OBJECT, 1871 ERR_MODIFY_NO_SUCH_OBJECT.get(newEntry.getName()), 1872 getMatchedDN(txn, newEntry.getName()), 1873 null); 1874 } 1875 1876 if (!isManageDsaITOperation(modifyOperation)) 1877 { 1878 // Check if the entry is a referral entry. 1879 dn2uri.checkTargetForReferral(oldEntry, null); 1880 } 1881 1882 // Ensure same ordering as deleteEntry: id2entry, dn2uri, then indexes. 1883 id2entry.put(txn, entryID, encodedNewEntry); 1884 1885 // Update the referral tree and indexes 1886 dn2uri.modifyEntry(txn, oldEntry, newEntry, modifyOperation.getModifications()); 1887 indexModifications(indexBuffer, oldEntry, newEntry, entryID, modifyOperation.getModifications()); 1888 1889 indexBuffer.flush(txn); 1890 1891 // One last check before committing 1892 modifyOperation.checkIfCanceled(true); 1893 1894 // Update the entry cache. 1895 EntryCache<?> entryCache = DirectoryServer.getEntryCache(); 1896 if (entryCache != null) 1897 { 1898 entryCache.putEntry(newEntry, backendID, entryID.longValue()); 1899 } 1900 } 1901 catch (StorageRuntimeException | DirectoryException | CanceledOperationException e) 1902 { 1903 throw e; 1904 } 1905 catch (Exception e) 1906 { 1907 String msg = e.getMessage(); 1908 if (msg == null) 1909 { 1910 msg = stackTraceToSingleLineString(e); 1911 } 1912 throw new DirectoryException( 1913 DirectoryServer.getServerErrorResultCode(), ERR_UNCHECKED_EXCEPTION.get(msg), e); 1914 } 1915 } 1916 }); 1917 } 1918 catch (Exception e) 1919 { 1920 writeTrustState(indexBuffer); 1921 throwAllowedExceptionTypes(e, DirectoryException.class, CanceledOperationException.class); 1922 } 1923 } 1924 1925 /** 1926 * Moves and/or renames the provided entry in this backend, altering any 1927 * subordinate entries as necessary. This must ensure that an entry already 1928 * exists with the provided current DN, and that no entry exists with the 1929 * target DN of the provided entry. The caller must hold write locks on both 1930 * the current DN and the new DN for the entry. 1931 * 1932 * @param oldTargetDN The current DN of the entry to be renamed. 1933 * @param newTargetEntry The new content to use for the entry. 1934 * @param modifyDNOperation The modify DN operation with which this action 1935 * is associated. This may be <CODE>null</CODE> 1936 * for modify DN operations performed internally. 1937 * @throws DirectoryException 1938 * If a problem occurs while trying to perform the rename. 1939 * @throws CanceledOperationException 1940 * If this backend noticed and reacted 1941 * to a request to cancel or abandon the 1942 * modify DN operation. 1943 * @throws StorageRuntimeException If an error occurs in the storage. 1944 */ 1945 void renameEntry(final DN oldTargetDN, final Entry newTargetEntry, final ModifyDNOperation modifyDNOperation) 1946 throws StorageRuntimeException, DirectoryException, CanceledOperationException 1947 { 1948 final IndexBuffer indexBuffer = new IndexBuffer(); 1949 try 1950 { 1951 storage.write(new WriteOperation() 1952 { 1953 @Override 1954 public void run(WriteableTransaction txn) throws Exception 1955 { 1956 indexBuffer.reset(); 1957 try 1958 { 1959 // Validate the request. 1960 final DN newTargetDN = newTargetEntry.getName(); 1961 final DN oldSuperiorDN = getParentWithinBase(oldTargetDN); 1962 final DN newSuperiorDN = getParentWithinBase(newTargetDN); 1963 1964 final EntryID oldSuperiorID = oldSuperiorDN != null ? dn2id.get(txn, oldSuperiorDN) : null; 1965 final EntryID oldTargetID = dn2id.get(txn, oldTargetDN); 1966 if ((oldSuperiorDN != null && oldSuperiorID == null) || oldTargetID == null) 1967 { 1968 // Check for referral entries above the target entry. 1969 dn2uri.targetEntryReferrals(txn, oldTargetDN, null); 1970 throw new DirectoryException(ResultCode.NO_SUCH_OBJECT, 1971 ERR_MODIFYDN_NO_SUCH_OBJECT.get(oldTargetDN), 1972 getMatchedDN(txn, oldTargetDN), 1973 null); 1974 } 1975 1976 final EntryID newSuperiorID = newSuperiorDN != null ? dn2id.get(txn, newSuperiorDN) : null; 1977 if (newSuperiorDN != null && newSuperiorID == null) 1978 { 1979 throw new DirectoryException(ResultCode.NO_SUCH_OBJECT, 1980 ERR_NEW_SUPERIOR_NO_SUCH_OBJECT.get(newSuperiorDN), 1981 getMatchedDN(txn, newSuperiorDN), 1982 null); 1983 } 1984 1985 // Check that an entry with the new name does not already exist, but take care to handle the case where 1986 // the user is renaming the entry with an equivalent name, e.g. "cn=matt" to "cn=Matt". 1987 if (!oldTargetDN.equals(newTargetDN) && dn2id.get(txn, newTargetDN) != null) 1988 { 1989 throw new DirectoryException(ResultCode.ENTRY_ALREADY_EXISTS, 1990 ERR_MODIFYDN_ALREADY_EXISTS.get(newTargetDN)); 1991 } 1992 1993 /* We want to preserve the invariant that the ID of an entry is greater than its parent, since search 1994 * results are returned in ID order. Note: if the superior has changed then oldSuperiorDN and 1995 * newSuperiorDN will be non-null. 1996 */ 1997 final boolean superiorHasChanged = !Objects.equals(oldSuperiorDN, newSuperiorDN); 1998 final boolean renumberEntryIDs = superiorHasChanged && newSuperiorID.compareTo(oldSuperiorID) > 0; 1999 2000 /* Ensure that all index updates are done in the correct order to avoid deadlocks. First iterate over 2001 * dn2id collecting all the IDs of the entries to be renamed. Then update dn2uri, id2entry, 2002 * id2childrenCount, and finally the attribute indexes. 2003 */ 2004 final List<Pair<Long, Long>> renamedEntryIDs = dn2id.renameSubtree(txn, 2005 oldTargetDN, 2006 newTargetDN, 2007 rootContainer, 2008 renumberEntryIDs, 2009 modifyDNOperation); 2010 2011 // The target entry will have the lowest entryID so it will remain the first element. 2012 Collections.sort(renamedEntryIDs, Pair.<Long, Long>getPairComparator()); 2013 2014 // Now update id2entry, dn2uri, and id2childrenCount in key order. 2015 if (superiorHasChanged) 2016 { 2017 id2childrenCount.updateCount(txn, oldSuperiorID, -1); 2018 id2childrenCount.updateCount(txn, newSuperiorID, 1); 2019 } 2020 boolean isBaseEntry = true; 2021 try (final Cursor<EntryID, Entry> cursor = id2entry.openCursor(txn)) 2022 { 2023 for (Pair<Long, Long> renamedEntryID : renamedEntryIDs) 2024 { 2025 renameSingleEntry(txn, renamedEntryID, cursor, indexBuffer, newTargetDN, renumberEntryIDs, isBaseEntry); 2026 isBaseEntry = false; 2027 modifyDNOperation.checkIfCanceled(false); 2028 } 2029 2030 } 2031 indexBuffer.flush(txn); 2032 modifyDNOperation.checkIfCanceled(true); 2033 } 2034 catch (StorageRuntimeException | DirectoryException | CanceledOperationException e) 2035 { 2036 throw e; 2037 } 2038 catch (Exception e) 2039 { 2040 String msg = e.getMessage(); 2041 if (msg == null) 2042 { 2043 msg = stackTraceToSingleLineString(e); 2044 } 2045 throw new DirectoryException( 2046 DirectoryServer.getServerErrorResultCode(), ERR_UNCHECKED_EXCEPTION.get(msg), e); 2047 } 2048 } 2049 2050 private void renameSingleEntry( 2051 final WriteableTransaction txn, 2052 final Pair<Long, Long> renamedEntryID, 2053 final Cursor<EntryID, Entry> cursor, 2054 final IndexBuffer indexBuffer, 2055 final DN newTargetDN, 2056 final boolean renumberEntryIDs, 2057 final boolean isBaseEntry) throws DirectoryException 2058 { 2059 final EntryID oldEntryID = new EntryID(renamedEntryID.getFirst()); 2060 final EntryID newEntryID = new EntryID(renamedEntryID.getSecond()); 2061 if (!cursor.positionToKey(oldEntryID.toByteString())) 2062 { 2063 throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), 2064 ERR_MISSING_ID2ENTRY_RECORD.get(oldEntryID)); 2065 } 2066 2067 final Entry oldEntry = cursor.getValue(); 2068 final Entry newEntry; 2069 final List<Modification> modifications; 2070 if (isBaseEntry) 2071 { 2072 if (!isManageDsaITOperation(modifyDNOperation)) 2073 { 2074 dn2uri.checkTargetForReferral(oldEntry, null); 2075 } 2076 newEntry = newTargetEntry; 2077 modifications = modifyDNOperation.getModifications(); 2078 } 2079 else 2080 { 2081 final DN newDN = oldEntry.getName().rename(oldTargetDN, newTargetDN); 2082 newEntry = oldEntry.duplicate(false); 2083 newEntry.setDN(newDN); 2084 modifications = invokeSubordinateModifyDNPlugins(oldEntry, newEntry); 2085 } 2086 2087 if (renumberEntryIDs) 2088 { 2089 cursor.delete(); 2090 } 2091 id2entry.put(txn, newEntryID, newEntry); 2092 dn2uri.deleteEntry(txn, oldEntry); 2093 dn2uri.addEntry(txn, newEntry); 2094 if (renumberEntryIDs) 2095 { 2096 // In-order: new entryID is guaranteed to be greater than old entryID. 2097 final long count = id2childrenCount.removeCount(txn, oldEntryID); 2098 id2childrenCount.updateCount(txn, newEntryID, count); 2099 } 2100 2101 if (renumberEntryIDs || modifications == null) 2102 { 2103 // Slow path: the entry has been renumbered so we need to fully re-index. 2104 removeEntryFromIndexes(indexBuffer, oldEntry, oldEntryID); 2105 insertEntryIntoIndexes(indexBuffer, newEntry, newEntryID); 2106 } 2107 else if (!modifications.isEmpty()) 2108 { 2109 // Fast-path: the entryID has not changed so we only need to re-index the mods. 2110 indexModifications(indexBuffer, oldEntry, newEntry, oldEntryID, modifications); 2111 } 2112 2113 final EntryCache<?> entryCache = DirectoryServer.getEntryCache(); 2114 if (entryCache != null) 2115 { 2116 entryCache.removeEntry(oldEntry.getName()); 2117 } 2118 } 2119 2120 private List<Modification> invokeSubordinateModifyDNPlugins( 2121 final Entry oldEntry, final Entry newEntry) throws DirectoryException 2122 { 2123 final List<Modification> modifications = Collections.unmodifiableList(new ArrayList<Modification>(0)); 2124 2125 // Create a new entry that is a copy of the old entry but with the new DN. 2126 // Also invoke any subordinate modify DN plugins on the entry. 2127 // FIXME -- At the present time, we don't support subordinate modify DN 2128 // plugins that make changes to subordinate entries and therefore 2129 // provide an unmodifiable list for the modifications element. 2130 // FIXME -- This will need to be updated appropriately if we decided that 2131 // these plugins should be invoked for synchronization operations. 2132 if (!modifyDNOperation.isSynchronizationOperation()) 2133 { 2134 SubordinateModifyDN pluginResult = getPluginConfigManager().invokeSubordinateModifyDNPlugins( 2135 modifyDNOperation, oldEntry, newEntry, modifications); 2136 2137 if (!pluginResult.continueProcessing()) 2138 { 2139 throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), 2140 ERR_MODIFYDN_ABORTED_BY_SUBORDINATE_PLUGIN.get(oldEntry.getName(), 2141 newEntry.getName())); 2142 } 2143 2144 if (!modifications.isEmpty()) 2145 { 2146 LocalizableMessageBuilder invalidReason = new LocalizableMessageBuilder(); 2147 if (!newEntry.conformsToSchema(null, false, false, false, invalidReason)) 2148 { 2149 throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), 2150 ERR_MODIFYDN_ABORTED_BY_SUBORDINATE_SCHEMA_ERROR.get(oldEntry.getName(), 2151 newEntry.getName(), 2152 invalidReason)); 2153 } 2154 } 2155 } 2156 return modifications; 2157 } 2158 }); 2159 } 2160 catch (Exception e) 2161 { 2162 writeTrustState(indexBuffer); 2163 throwAllowedExceptionTypes(e, DirectoryException.class, CanceledOperationException.class); 2164 } 2165 } 2166 2167 /** 2168 * Insert a new entry into the attribute indexes. 2169 * 2170 * @param buffer The index buffer used to buffer up the index changes. 2171 * @param entry The entry to be inserted into the indexes. 2172 * @param entryID The ID of the entry to be inserted into the indexes. 2173 * @throws StorageRuntimeException If an error occurs in the storage. 2174 * @throws DirectoryException If a Directory Server error occurs. 2175 */ 2176 private void insertEntryIntoIndexes(IndexBuffer buffer, Entry entry, EntryID entryID) 2177 throws StorageRuntimeException, DirectoryException 2178 { 2179 for (AttributeIndex index : attrIndexMap.values()) 2180 { 2181 index.addEntry(buffer, entryID, entry); 2182 } 2183 2184 for (VLVIndex vlvIndex : vlvIndexMap.values()) 2185 { 2186 vlvIndex.addEntry(buffer, entryID, entry); 2187 } 2188 } 2189 2190 /** 2191 * Remove an entry from the attribute indexes. 2192 * 2193 * @param buffer The index buffer used to buffer up the index changes. 2194 * @param entry The entry to be removed from the indexes. 2195 * @param entryID The ID of the entry to be removed from the indexes. 2196 * @throws StorageRuntimeException If an error occurs in the storage. 2197 * @throws DirectoryException If a Directory Server error occurs. 2198 */ 2199 private void removeEntryFromIndexes(IndexBuffer buffer, Entry entry, EntryID entryID) 2200 throws StorageRuntimeException, DirectoryException 2201 { 2202 for (AttributeIndex index : attrIndexMap.values()) 2203 { 2204 index.removeEntry(buffer, entryID, entry); 2205 } 2206 2207 for (VLVIndex vlvIndex : vlvIndexMap.values()) 2208 { 2209 vlvIndex.removeEntry(buffer, entryID, entry); 2210 } 2211 } 2212 2213 /** 2214 * Update the attribute indexes to reflect the changes to the 2215 * attributes of an entry resulting from a sequence of modifications. 2216 * 2217 * @param buffer The index buffer used to buffer up the index changes. 2218 * @param oldEntry The contents of the entry before the change. 2219 * @param newEntry The contents of the entry after the change. 2220 * @param entryID The ID of the entry that was changed. 2221 * @param mods The sequence of modifications made to the entry. 2222 * @throws StorageRuntimeException If an error occurs in the storage. 2223 * @throws DirectoryException If a Directory Server error occurs. 2224 */ 2225 private void indexModifications(IndexBuffer buffer, Entry oldEntry, Entry newEntry, 2226 EntryID entryID, List<Modification> mods) 2227 throws StorageRuntimeException, DirectoryException 2228 { 2229 // Process in index configuration order. 2230 for (AttributeIndex index : attrIndexMap.values()) 2231 { 2232 if (isAttributeModified(index.getAttributeType(), mods)) 2233 { 2234 index.modifyEntry(buffer, entryID, oldEntry, newEntry); 2235 } 2236 } 2237 2238 for(VLVIndex vlvIndex : vlvIndexMap.values()) 2239 { 2240 vlvIndex.modifyEntry(buffer, entryID, oldEntry, newEntry, mods); 2241 } 2242 } 2243 2244 /** 2245 * Get a count of the number of entries stored in this entry container including the baseDN 2246 * 2247 * @return The number of entries stored in this entry container including the baseDN. 2248 * @throws StorageRuntimeException 2249 * If an error occurs in the storage. 2250 */ 2251 long getNumberOfEntriesInBaseDN() throws StorageRuntimeException 2252 { 2253 try 2254 { 2255 return storage.read(new ReadOperation<Long>() 2256 { 2257 @Override 2258 public Long run(ReadableTransaction txn) throws Exception 2259 { 2260 return getNumberOfEntriesInBaseDN0(txn); 2261 } 2262 }); 2263 } 2264 catch (Exception e) 2265 { 2266 throw new StorageRuntimeException(e); 2267 } 2268 } 2269 2270 long getNumberOfEntriesInBaseDN0(ReadableTransaction txn) 2271 { 2272 return id2childrenCount.getTotalCount(txn); 2273 } 2274 2275 /** 2276 * Determine whether the provided operation has the ManageDsaIT request control. 2277 * @param operation The operation for which the determination is to be made. 2278 * @return true if the operation has the ManageDsaIT request control, or false if not. 2279 */ 2280 private static boolean isManageDsaITOperation(Operation operation) 2281 { 2282 for (Control control : operation.getRequestControls()) 2283 { 2284 if (ServerConstants.OID_MANAGE_DSAIT_CONTROL.equals(control.getOID())) 2285 { 2286 return true; 2287 } 2288 } 2289 return false; 2290 } 2291 2292 /** 2293 * Delete this entry container from disk. The entry container should be 2294 * closed before calling this method. 2295 * 2296 * @param txn a non null transaction 2297 * @throws StorageRuntimeException If an error occurs while removing the entry container. 2298 */ 2299 void delete(WriteableTransaction txn) throws StorageRuntimeException 2300 { 2301 for (Tree tree : listTrees()) 2302 { 2303 tree.delete(txn); 2304 } 2305 } 2306 2307 /** 2308 * Remove a tree from disk. 2309 * 2310 * @param txn a non null transaction 2311 * @param tree The tree container to remove. 2312 * @throws StorageRuntimeException If an error occurs while attempting to delete the tree. 2313 */ 2314 void deleteTree(WriteableTransaction txn, Tree tree) throws StorageRuntimeException 2315 { 2316 if(tree == state) 2317 { 2318 // The state tree cannot be removed individually. 2319 return; 2320 } 2321 2322 tree.delete(txn); 2323 if(tree instanceof Index) 2324 { 2325 state.deleteRecord(txn, tree.getName()); 2326 } 2327 } 2328 2329 /** 2330 * This method constructs a container name from a base DN. Only alphanumeric 2331 * characters are preserved, all other characters are replaced with an 2332 * underscore. 2333 * 2334 * @return The container name for the base DN. 2335 */ 2336 String getTreePrefix() 2337 { 2338 return treePrefix; 2339 } 2340 2341 @Override 2342 public DN getBaseDN() 2343 { 2344 return baseDN; 2345 } 2346 2347 /** 2348 * Get the parent of a DN in the scope of the base DN. 2349 * 2350 * @param dn A DN which is in the scope of the base DN. 2351 * @return The parent DN, or null if the given DN is the base DN. 2352 */ 2353 DN getParentWithinBase(DN dn) 2354 { 2355 if (dn.equals(baseDN)) 2356 { 2357 return null; 2358 } 2359 return dn.parent(); 2360 } 2361 2362 @Override 2363 public boolean isConfigurationChangeAcceptable(PluggableBackendCfg cfg, List<LocalizableMessage> unacceptableReasons) 2364 { 2365 if (cfg.isConfidentialityEnabled()) 2366 { 2367 final String cipherTransformation = cfg.getCipherTransformation(); 2368 final int keyLength = cfg.getCipherKeyLength(); 2369 2370 try 2371 { 2372 serverContext.getCryptoManager().ensureCipherKeyIsAvailable(cipherTransformation, keyLength); 2373 } 2374 catch (Exception e) 2375 { 2376 unacceptableReasons.add(ERR_BACKEND_FAULTY_CRYPTO_TRANSFORMATION.get(cipherTransformation, keyLength, e)); 2377 return false; 2378 } 2379 } 2380 else 2381 { 2382 StringBuilder builder = new StringBuilder(); 2383 for (AttributeIndex attributeIndex : attrIndexMap.values()) 2384 { 2385 if (attributeIndex.isConfidentialityEnabled()) 2386 { 2387 if (builder.length() > 0) 2388 { 2389 builder.append(", "); 2390 } 2391 builder.append(attributeIndex.getAttributeType().getNameOrOID()); 2392 } 2393 } 2394 if (builder.length() > 0) 2395 { 2396 unacceptableReasons.add(ERR_BACKEND_CANNOT_CHANGE_CONFIDENTIALITY.get(getBaseDN(), builder.toString())); 2397 return false; 2398 } 2399 } 2400 return true; 2401 } 2402 2403 @Override 2404 public ConfigChangeResult applyConfigurationChange(final PluggableBackendCfg cfg) 2405 { 2406 final ConfigChangeResult ccr = new ConfigChangeResult(); 2407 2408 exclusiveLock.lock(); 2409 try 2410 { 2411 storage.write(new WriteOperation() 2412 { 2413 @Override 2414 public void run(WriteableTransaction txn) throws Exception 2415 { 2416 id2entry.setDataConfig(newDataConfig(cfg)); 2417 EntryContainer.this.config = cfg; 2418 } 2419 }); 2420 for (CryptoSuite indexCrypto : attrCryptoMap.values()) 2421 { 2422 indexCrypto.newParameters(cfg.getCipherTransformation(), cfg.getCipherKeyLength(), indexCrypto.isEncrypted()); 2423 } 2424 } 2425 catch (Exception e) 2426 { 2427 ccr.setResultCode(DirectoryServer.getServerErrorResultCode()); 2428 ccr.addMessage(LocalizableMessage.raw(stackTraceToSingleLineString(e))); 2429 } 2430 finally 2431 { 2432 exclusiveLock.unlock(); 2433 } 2434 2435 return ccr; 2436 } 2437 2438 /** 2439 * Clear the contents of this entry container. 2440 * 2441 * @throws StorageRuntimeException If an error occurs while removing the entry 2442 * container. 2443 */ 2444 public void clear() throws StorageRuntimeException 2445 { 2446 try 2447 { 2448 storage.write(new WriteOperation() 2449 { 2450 @Override 2451 public void run(WriteableTransaction txn) throws Exception 2452 { 2453 for (Tree tree : listTrees()) 2454 { 2455 tree.delete(txn); 2456 } 2457 } 2458 }); 2459 } 2460 catch (Exception e) 2461 { 2462 throw new StorageRuntimeException(e); 2463 } 2464 } 2465 2466 List<Tree> listTrees() 2467 { 2468 final List<Tree> allTrees = new ArrayList<>(); 2469 allTrees.add(dn2id); 2470 allTrees.add(id2entry); 2471 allTrees.add(dn2uri); 2472 allTrees.add(id2childrenCount); 2473 allTrees.add(state); 2474 2475 for (AttributeIndex index : attrIndexMap.values()) 2476 { 2477 allTrees.addAll(index.getNameToIndexes().values()); 2478 } 2479 2480 allTrees.addAll(vlvIndexMap.values()); 2481 return allTrees; 2482 } 2483 2484 /** 2485 * Finds an existing entry whose DN is the closest ancestor of a given baseDN. 2486 * 2487 * @param targetDN the DN for which we are searching a matched DN. 2488 * @return the DN of the closest ancestor of the baseDN. 2489 * @throws DirectoryException If an error prevented the check of an 2490 * existing entry from being performed. 2491 */ 2492 private DN getMatchedDN(ReadableTransaction txn, DN targetDN) throws DirectoryException 2493 { 2494 DN parentDN = DirectoryServer.getParentDNInSuffix(targetDN); 2495 while (parentDN != null && parentDN.isSubordinateOrEqualTo(baseDN)) 2496 { 2497 if (entryExists(txn, parentDN)) 2498 { 2499 return parentDN; 2500 } 2501 parentDN = DirectoryServer.getParentDNInSuffix(parentDN); 2502 } 2503 return null; 2504 } 2505 2506 boolean isConfidentialityEnabled() 2507 { 2508 return config.isConfidentialityEnabled(); 2509 } 2510 2511 /** 2512 * Fetch the base Entry of the EntryContainer. 2513 * @param searchBaseDN the DN for the base entry 2514 * @param searchScope the scope under which this is fetched. 2515 * Scope is used for referral processing. 2516 * @return the Entry matching the baseDN. 2517 * @throws DirectoryException if the baseDN doesn't exist. 2518 */ 2519 private Entry fetchBaseEntry(ReadableTransaction txn, DN searchBaseDN, SearchScope searchScope) 2520 throws DirectoryException 2521 { 2522 Entry baseEntry = getEntry0(txn, searchBaseDN); 2523 if (baseEntry == null) 2524 { 2525 // Check for referral entries above the base entry. 2526 dn2uri.targetEntryReferrals(txn, searchBaseDN, searchScope); 2527 throw new DirectoryException(ResultCode.NO_SUCH_OBJECT, 2528 ERR_SEARCH_NO_SUCH_OBJECT.get(searchBaseDN), getMatchedDN(txn, searchBaseDN), null); 2529 } 2530 return baseEntry; 2531 } 2532 2533 private long[] sort(ReadableTransaction txn, EntryIDSet entryIDSet, SearchOperation searchOperation, 2534 List<SortKey> sortKeys, VLVRequestControl vlvRequest) throws DirectoryException 2535 { 2536 if (!entryIDSet.isDefined()) 2537 { 2538 return null; 2539 } 2540 2541 final DN baseDN = searchOperation.getBaseDN(); 2542 final SearchScope scope = searchOperation.getScope(); 2543 final SearchFilter filter = searchOperation.getFilter(); 2544 2545 final TreeMap<ByteString, EntryID> sortMap = new TreeMap<>(); 2546 for (EntryID id : entryIDSet) 2547 { 2548 try 2549 { 2550 Entry e = getEntry(txn, id); 2551 if (e.matchesBaseAndScope(baseDN, scope) && filter.matchesEntry(e)) 2552 { 2553 sortMap.put(encodeVLVKey(sortKeys, e, id.longValue()), id); 2554 } 2555 } 2556 catch (Exception e) 2557 { 2558 LocalizableMessage message = ERR_ENTRYIDSORTER_CANNOT_EXAMINE_ENTRY.get(id, getExceptionMessage(e)); 2559 throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), message, e); 2560 } 2561 } 2562 2563 // See if there is a VLV request to further pare down the set of results, and if there is where it should be 2564 // processed by offset or assertion value. 2565 if (vlvRequest == null) 2566 { 2567 return toArray(sortMap.values()); 2568 } 2569 2570 if (vlvRequest.getTargetType() == VLVRequestControl.TYPE_TARGET_BYOFFSET) 2571 { 2572 return sortByOffset(searchOperation, vlvRequest, sortMap); 2573 } 2574 return sortByGreaterThanOrEqualAssertion(searchOperation, vlvRequest, sortKeys, sortMap); 2575 } 2576 2577 private static final long[] toArray(Collection<EntryID> entryIDs) 2578 { 2579 final long[] array = new long[entryIDs.size()]; 2580 int i = 0; 2581 for (EntryID entryID : entryIDs) 2582 { 2583 array[i++] = entryID.longValue(); 2584 } 2585 return array; 2586 } 2587 2588 private static final long[] sortByGreaterThanOrEqualAssertion(SearchOperation searchOperation, 2589 VLVRequestControl vlvRequest, List<SortKey> sortKeys, final TreeMap<ByteString, EntryID> sortMap) 2590 throws DirectoryException 2591 { 2592 ByteString assertionValue = vlvRequest.getGreaterThanOrEqualAssertion(); 2593 ByteSequence encodedTargetAssertion = 2594 encodeTargetAssertion(sortKeys, assertionValue, searchOperation, sortMap.size()); 2595 2596 boolean targetFound = false; 2597 int index = 0; 2598 int targetIndex = 0; 2599 int startIndex = 0; 2600 int includedAfterCount = 0; 2601 long[] idSet = new long[sortMap.size()]; 2602 for (Map.Entry<ByteString, EntryID> entry : sortMap.entrySet()) 2603 { 2604 ByteString vlvKey = entry.getKey(); 2605 EntryID id = entry.getValue(); 2606 idSet[index++] = id.longValue(); 2607 2608 if (targetFound) 2609 { 2610 includedAfterCount++; 2611 if (includedAfterCount >= vlvRequest.getAfterCount()) 2612 { 2613 break; 2614 } 2615 } 2616 else 2617 { 2618 targetFound = vlvKey.compareTo(encodedTargetAssertion) >= 0; 2619 if (targetFound) 2620 { 2621 startIndex = Math.max(0, targetIndex - vlvRequest.getBeforeCount()); 2622 } 2623 targetIndex++; 2624 } 2625 } 2626 2627 final long[] result; 2628 if (targetFound) 2629 { 2630 final long[] array = new long[index - startIndex]; 2631 System.arraycopy(idSet, startIndex, array, 0, array.length); 2632 result = array; 2633 } 2634 else 2635 { 2636 /* 2637 * No entry was found to be greater than or equal to the sort key, so the target offset will 2638 * be one greater than the content count. 2639 */ 2640 targetIndex = sortMap.size() + 1; 2641 result = new long[0]; 2642 } 2643 addVLVResponseControl(searchOperation, targetIndex, sortMap.size(), SUCCESS); 2644 return result; 2645 } 2646 2647 private static final long[] sortByOffset(SearchOperation searchOperation, VLVRequestControl vlvRequest, 2648 TreeMap<ByteString, EntryID> sortMap) throws DirectoryException 2649 { 2650 int targetOffset = vlvRequest.getOffset(); 2651 if (targetOffset < 0) 2652 { 2653 // The client specified a negative target offset. This should never be allowed. 2654 addVLVResponseControl(searchOperation, targetOffset, sortMap.size(), OFFSET_RANGE_ERROR); 2655 2656 LocalizableMessage message = ERR_ENTRYIDSORTER_NEGATIVE_START_POS.get(); 2657 throw new DirectoryException(ResultCode.VIRTUAL_LIST_VIEW_ERROR, message); 2658 } 2659 2660 // This is an easy mistake to make, since VLV offsets start at 1 instead of 0. We'll assume the client meant 2661 // to use 1. 2662 targetOffset = (targetOffset == 0) ? 1 : targetOffset; 2663 2664 int beforeCount = vlvRequest.getBeforeCount(); 2665 int afterCount = vlvRequest.getAfterCount(); 2666 int listOffset = targetOffset - 1; // VLV offsets start at 1, not 0. 2667 int startPos = listOffset - beforeCount; 2668 if (startPos < 0) 2669 { 2670 // This can happen if beforeCount >= offset, and in this case we'll just adjust the start position to ignore 2671 // the range of beforeCount that doesn't exist. 2672 startPos = 0; 2673 beforeCount = listOffset; 2674 } 2675 else if (startPos >= sortMap.size()) 2676 { 2677 // The start position is beyond the end of the list. In this case, we'll assume that the start position was 2678 // one greater than the size of the list and will only return the beforeCount entries. 2679 targetOffset = sortMap.size() + 1; 2680 listOffset = sortMap.size(); 2681 startPos = listOffset - beforeCount; 2682 afterCount = 0; 2683 } 2684 2685 int count = 1 + beforeCount + afterCount; 2686 long[] sortedIDs = new long[count]; 2687 int treePos = 0; 2688 int arrayPos = 0; 2689 for (EntryID id : sortMap.values()) 2690 { 2691 if (treePos++ < startPos) 2692 { 2693 continue; 2694 } 2695 2696 sortedIDs[arrayPos++] = id.longValue(); 2697 if (arrayPos >= count) 2698 { 2699 break; 2700 } 2701 } 2702 2703 if (arrayPos < count) 2704 { 2705 // We don't have enough entries in the set to meet the requested page size, so we'll need to shorten the array. 2706 sortedIDs = Arrays.copyOf(sortedIDs, arrayPos); 2707 } 2708 2709 addVLVResponseControl(searchOperation, targetOffset, sortMap.size(), SUCCESS); 2710 return sortedIDs; 2711 } 2712 2713 private static void addVLVResponseControl(SearchOperation searchOp, int targetPosition, int contentCount, 2714 int vlvResultCode) 2715 { 2716 searchOp.addResponseControl(new VLVResponseControl(targetPosition, contentCount, vlvResultCode)); 2717 } 2718 2719 /** Get the exclusive lock. */ 2720 void lock() 2721 { 2722 exclusiveLock.lock(); 2723 } 2724 2725 /** Unlock the exclusive lock. */ 2726 void unlock() 2727 { 2728 exclusiveLock.unlock(); 2729 } 2730 2731 @Override 2732 public String toString() { 2733 return treePrefix; 2734 } 2735 2736 static boolean isAttributeModified(AttributeType attrType, List<Modification> mods) 2737 { 2738 for (Modification mod : mods) 2739 { 2740 if (attrType.isSuperTypeOf(mod.getAttribute().getAttributeDescription().getAttributeType())) 2741 { 2742 return true; 2743 } 2744 } 2745 return false; 2746 } 2747}