| 12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250 |
- /** BEGIN COPYRIGHT BLOCK
- * This Program is free software; you can redistribute it and/or modify it under
- * the terms of the GNU General Public License as published by the Free Software
- * Foundation; version 2 of the License.
- *
- * This Program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
- * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this Program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place, Suite 330, Boston, MA 02111-1307 USA.
- *
- * In addition, as a special exception, Red Hat, Inc. gives You the additional
- * right to link the code of this Program with code not covered under the GNU
- * General Public License ("Non-GPL Code") and to distribute linked combinations
- * including the two, subject to the limitations in this paragraph. Non-GPL Code
- * permitted under this exception must only link to the code of this Program
- * through those well defined interfaces identified in the file named EXCEPTION
- * found in the source code files (the "Approved Interfaces"). The files of
- * Non-GPL Code may instantiate templates or use macros or inline functions from
- * the Approved Interfaces without causing the resulting work to be covered by
- * the GNU General Public License. Only Red Hat, Inc. may make changes or
- * additions to the list of Approved Interfaces. You must obey the GNU General
- * Public License in all respects for all of the Program code and other code used
- * in conjunction with the Program except the Non-GPL Code covered by this
- * exception. If you modify this file, you may extend this exception to your
- * version of the file, but you are not obligated to do so. If you do not wish to
- * provide this exception without modification, you must delete this exception
- * statement from your version and license this file solely under the GPL without
- * exception.
- *
- *
- * Copyright (C) 2001 Sun Microsystems, Inc. Used by permission.
- * Copyright (C) 2005 Red Hat, Inc.
- * All rights reserved.
- * END COPYRIGHT BLOCK **/
- #ifdef HAVE_CONFIG_H
- # include <config.h>
- #endif
- /* cache.c - routines to maintain an in-core cache of entries */
- #include "back-ldbm.h"
- #ifdef DEBUG
- #define LDAP_CACHE_DEBUG
- /* #define LDAP_CACHE_DEBUG_LRU */ /* causes slowdown */
- #endif
- /* cache can't get any smaller than this (in bytes) */
- #define MINCACHESIZE (size_t)512000
- /* don't let hash be smaller than this # of slots */
- #define MINHASHSIZE 1024
- /*
- * the cache has three entry points (ways to find things):
- *
- * by entry e.g., if you already have an entry from the cache
- * and want to delete it. (really by entry ptr)
- * by dn e.g., when looking for the base object of a search
- * by id e.g., for search candidates
- * by uniqueid
- *
- * these correspond to three different avl trees that are maintained.
- * those avl trees are being destroyed as we speak.
- */
- #ifdef LDAP_CACHE_DEBUG
- #define ASSERT(_x) do { \
- if (!(_x)) { \
- LDAPDebug(LDAP_DEBUG_ANY, "BAD CACHE ASSERTION at %s/%d: %s\n", \
- __FILE__, __LINE__, #_x); \
- *(char *)0L = 23; \
- } \
- } while (0)
- #define LOG(_a, _x1, _x2, _x3) LDAPDebug(LDAP_DEBUG_CACHE, _a, _x1, _x2, _x3)
- #else
- #define ASSERT(_x) ;
- #define LOG(_a, _x1, _x2, _x3) ;
- #endif
- /***** tiny hashtable implementation *****/
- #define HASH_VALUE(_key, _keylen) \
- ((ht->hashfn == NULL) ? (*(unsigned int *)(_key)) : \
- ((*ht->hashfn)(_key, _keylen)))
- #define HASH_NEXT(ht, entry) (*(void **)((char *)(entry) + (ht)->offset))
- static int entry_same_id(const void *e, const void *k)
- {
- return (((struct backentry *)e)->ep_id == *(ID *)k);
- }
- static unsigned long dn_hash(const void *key, size_t keylen)
- {
- unsigned char *x = (unsigned char *)key;
- ssize_t i;
- unsigned long val = 0;
- for (i = keylen-1; i >= 0; i--)
- val += ((val << 5) + (*x++)) & 0xffffffff;
- return val;
- }
- #ifdef UUIDCACHE_ON
- static unsigned long uuid_hash(const void *key, size_t keylen)
- {
- unsigned char *x = (unsigned char *)key;
- size_t i;
- unsigned long val = 0;
- for (i = 0; i < keylen; i++, x++) {
- char c = (*x <= '9' ? (*x - '0') : (*x - 'A' + 10));
- val = ((val << 4) ^ (val >> 28) ^ c) & 0xffffffff;
- }
- return val;
- }
- static int entry_same_uuid(const void *e, const void *k)
- {
- struct backentry *be = (struct backentry *)e;
- const char *uuid = slapi_entry_get_uniqueid(be->ep_entry);
- return (strcmp(uuid, (char *)k) == 0);
- }
- #endif
- static int entry_same_dn(const void *e, const void *k)
- {
- struct backentry *be = (struct backentry *)e;
- const char *ndn = slapi_sdn_get_ndn(backentry_get_sdn(be));
- return (strcmp(ndn, (char *)k) == 0);
- }
- Hashtable *new_hash(u_long size, u_long offset, HashFn hfn,
- HashTestFn tfn)
- {
- static u_long prime[] = { 3, 5, 7, 11, 13, 17, 19 };
- Hashtable *ht;
- int ok = 0, i;
- if (size < MINHASHSIZE)
- size = MINHASHSIZE;
- /* move up to nearest relative prime (it's a statistical thing) */
- size |= 1;
- do {
- ok = 1;
- for (i = 0; i < (sizeof(prime) / sizeof(prime[0])); i++)
- if (!(size % prime[i]))
- ok = 0;
- if (!ok)
- size += 2;
- } while (!ok);
- ht = (Hashtable*)slapi_ch_calloc(1, sizeof(Hashtable) + size*sizeof(void *));
- if (!ht)
- return NULL;
- ht->size = size;
- ht->offset = offset;
- ht->hashfn = hfn;
- ht->testfn = tfn;
- /* calloc zeroes out the slots automagically */
- return ht;
- }
- /* adds an entry to the hash -- returns 1 on success, 0 if the key was
- * already there (filled into 'alt' if 'alt' is not NULL)
- */
- int add_hash(Hashtable *ht, void *key, size_t keylen, void *entry,
- void **alt)
- {
- u_long val, slot;
- void *e;
- val = HASH_VALUE(key, keylen);
- slot = (val % ht->size);
- /* first, check if this key is already in the table */
- e = ht->slot[slot];
- while (e) {
- if ((*ht->testfn)(e, key)) {
- /* ack! already in! */
- if (alt)
- *alt = e;
- return 0;
- }
- e = HASH_NEXT(ht, e);
- }
- /* ok, it's not already there, so add it */
- HASH_NEXT(ht, entry) = ht->slot[slot];
- ht->slot[slot] = entry;
- return 1;
- }
- /* returns 1 if the item was found, and puts a ptr to it in 'entry' */
- int find_hash(Hashtable *ht, const void *key, size_t keylen, void **entry)
- {
- u_long val, slot;
- void *e;
- val = HASH_VALUE(key, keylen);
- slot = (val % ht->size);
- e = ht->slot[slot];
- while (e) {
- if ((*ht->testfn)(e, key)) {
- *entry = e;
- return 1;
- }
- e = HASH_NEXT(ht, e);
- }
- /* no go */
- *entry = NULL;
- return 0;
- }
- /* returns 1 if the item was found and removed */
- int remove_hash(Hashtable *ht, const void *key, size_t keylen)
- {
- u_long val, slot;
- void *e, *laste = NULL;
- val = HASH_VALUE(key, keylen);
- slot = (val % ht->size);
- e = ht->slot[slot];
- while (e) {
- if ((*ht->testfn)(e, key)) {
- /* remove this one */
- if (laste)
- HASH_NEXT(ht, laste) = HASH_NEXT(ht, e);
- else
- ht->slot[slot] = HASH_NEXT(ht, e);
- HASH_NEXT(ht, e) = NULL;
- return 1;
- }
- laste = e;
- e = HASH_NEXT(ht, e);
- }
- /* nope */
- return 0;
- }
- /* hashtable distribution stats --
- * slots: # of slots in the hashtable
- * total_entries: # of entries in the hashtable
- * max_entries_per_slot: highest number of chained entries in a single slot
- * slot_stats: if X is the number of entries in a given slot, then
- * slot_stats[X] will hold the number of slots that held X entries
- */
- static void hash_stats(Hashtable *ht, u_long *slots, int *total_entries,
- int *max_entries_per_slot, int **slot_stats)
- {
- #define MAX_SLOT_STATS 50
- u_long i;
- int x;
- void *e;
- *slot_stats = (int *)slapi_ch_malloc(MAX_SLOT_STATS * sizeof(int));
- for (i = 0; i < MAX_SLOT_STATS; i++)
- (*slot_stats)[i] = 0;
- *slots = ht->size;
- *max_entries_per_slot = 0;
- *total_entries = 0;
- for (i = 0; i < ht->size; i++) {
- e = ht->slot[i];
- x = 0;
- while (e) {
- x++;
- (*total_entries)++;
- e = HASH_NEXT(ht, e);
- }
- if (x < MAX_SLOT_STATS)
- (*slot_stats)[x]++;
- if (x > *max_entries_per_slot)
- *max_entries_per_slot = x;
- }
- }
- /***** add/remove entries to/from the LRU list *****/
- #ifdef LDAP_CACHE_DEBUG_LRU
- /* for debugging -- painstakingly verify the lru list is ok -- if 'in' is
- * true, then entry 'e' should be in the list right now; otherwise, it
- * should NOT be in the list.
- */
- static void lru_verify(struct cache *cache, struct backentry *e, int in)
- {
- int is_in = 0;
- int count = 0;
- struct backentry *ep;
- ep = cache->c_lruhead;
- while (ep) {
- count++;
- if (ep == e) {
- is_in = 1;
- }
- if (ep->ep_lruprev) {
- ASSERT(ep->ep_lruprev->ep_lrunext == ep);
- } else {
- ASSERT(ep == cache->c_lruhead);
- }
- if (ep->ep_lrunext) {
- ASSERT(ep->ep_lrunext->ep_lruprev == ep);
- } else {
- ASSERT(ep == cache->c_lrutail);
- }
- ep = ep->ep_lrunext;
- }
- ASSERT(is_in == in);
- }
- #endif
- /* assume lock is held */
- static void lru_detach(struct cache *cache, struct backentry *e)
- {
- #ifdef LDAP_CACHE_DEBUG_LRU
- lru_verify(cache, e, 1);
- #endif
- if (e->ep_lruprev)
- {
- e->ep_lruprev->ep_lrunext = NULL;
- cache->c_lrutail = e->ep_lruprev;
- }
- else
- {
- cache->c_lruhead = NULL;
- cache->c_lrutail = NULL;
- }
- #ifdef LDAP_CACHE_DEBUG_LRU
- lru_verify(cache, e, 0);
- #endif
- }
- /* assume lock is held */
- static void lru_delete(struct cache *cache, struct backentry *e)
- {
- #ifdef LDAP_CACHE_DEBUG_LRU
- lru_verify(cache, e, 1);
- #endif
- if (e->ep_lruprev)
- e->ep_lruprev->ep_lrunext = e->ep_lrunext;
- else
- cache->c_lruhead = e->ep_lrunext;
- if (e->ep_lrunext)
- e->ep_lrunext->ep_lruprev = e->ep_lruprev;
- else
- cache->c_lrutail = e->ep_lruprev;
- #ifdef LDAP_CACHE_DEBUG_LRU
- e->ep_lrunext = e->ep_lruprev = NULL;
- lru_verify(cache, e, 0);
- #endif
- }
- /* assume lock is held */
- static void lru_add(struct cache *cache, struct backentry *e)
- {
- #ifdef LDAP_CACHE_DEBUG_LRU
- lru_verify(cache, e, 0);
- #endif
- e->ep_lruprev = NULL;
- e->ep_lrunext = cache->c_lruhead;
- cache->c_lruhead = e;
- if (e->ep_lrunext)
- e->ep_lrunext->ep_lruprev = e;
- if (! cache->c_lrutail)
- cache->c_lrutail = e;
- #ifdef LDAP_CACHE_DEBUG_LRU
- lru_verify(cache, e, 1);
- #endif
- }
- /***** cache overhead *****/
- static int cache_remove_int(struct cache *cache, struct backentry *e);
- static void cache_make_hashes(struct cache *cache)
- {
- u_long hashsize = (cache->c_maxentries > 0) ? cache->c_maxentries :
- (cache->c_maxsize/512);
- cache->c_dntable = new_hash(hashsize,
- HASHLOC(struct backentry, ep_dn_link),
- dn_hash, entry_same_dn);
- cache->c_idtable = new_hash(hashsize,
- HASHLOC(struct backentry, ep_id_link),
- NULL, entry_same_id);
- #ifdef UUIDCACHE_ON
- cache->c_uuidtable = new_hash(hashsize,
- HASHLOC(struct backentry, ep_uuid_link),
- uuid_hash, entry_same_uuid);
- #endif
- }
- /* initialize the cache */
- int cache_init(struct cache *cache, size_t maxsize, long maxentries)
- {
- LDAPDebug(LDAP_DEBUG_TRACE, "=> cache_init\n", 0, 0, 0);
- cache->c_maxsize = maxsize;
- cache->c_maxentries = maxentries;
- cache->c_cursize = slapi_counter_new();
- cache->c_curentries = 0;
- if (config_get_slapi_counters()) {
- cache->c_hits = slapi_counter_new();
- cache->c_tries = slapi_counter_new();
- } else {
- cache->c_hits = NULL;
- cache->c_tries = NULL;
- }
- cache->c_lruhead = cache->c_lrutail = NULL;
- cache_make_hashes(cache);
- if (((cache->c_mutex = PR_NewLock()) == NULL) ||
- ((cache->c_emutexalloc_mutex = PR_NewLock()) == NULL)) {
- LDAPDebug(LDAP_DEBUG_ANY, "ldbm: cache_init: PR_NewLock failed\n",
- 0, 0, 0);
- return 0;
- }
- LDAPDebug(LDAP_DEBUG_TRACE, "<= cache_init\n", 0, 0, 0);
- return 1;
- }
- #define CACHE_FULL(cache) \
- ((slapi_counter_get_value((cache)->c_cursize) > (cache)->c_maxsize) || \
- (((cache)->c_maxentries > 0) && \
- ((cache)->c_curentries > (cache)->c_maxentries)))
- /* clear out the cache to make room for new entries
- * you must be holding cache->c_mutex !!
- * return a pointer on the list of entries that get kicked out
- * of the cache.
- * These entries should be freed outside of the cache->c_mutex
- */
- static struct backentry * cache_flush(struct cache *cache)
- {
- struct backentry *e = NULL;
- LOG("=> cache_flush\n", 0, 0, 0);
- /* all entries on the LRU list are guaranteed to have a refcnt = 0
- * (iow, nobody's using them), so just delete from the tail down
- * until the cache is a managable size again.
- * (cache->c_mutex is locked when we enter this)
- */
- while ((cache->c_lrutail != NULL) && CACHE_FULL(cache)) {
- if (e == NULL)
- {
- e = cache->c_lrutail;
- }
- else
- {
- e = e->ep_lruprev;
- }
- ASSERT(e->ep_refcnt == 0);
- e->ep_refcnt++;
- if (cache_remove_int(cache, e) < 0) {
- LDAPDebug(LDAP_DEBUG_ANY, "cache flush: unable to delete entry\n",
- 0, 0, 0);
- break;
- }
- if(e == cache->c_lruhead) {
- break;
- }
- }
- if (e)
- lru_detach(cache, e);
- LOG("<= cache_flush (down to %lu entries, %lu bytes)\n", cache->c_curentries,
- slapi_counter_get_value(cache->c_cursize), 0);
- return e;
- }
- /* remove everything from the cache */
- static void cache_clear_int(struct cache *cache)
- {
- struct backentry *eflush = NULL;
- struct backentry *eflushtemp = NULL;
- size_t size = cache->c_maxsize;
- cache->c_maxsize = 0;
- eflush = cache_flush(cache);
- while (eflush)
- {
- eflushtemp = eflush->ep_lrunext;
- backentry_free(&eflush);
- eflush = eflushtemp;
- }
- cache->c_maxsize = size;
- if (cache->c_curentries > 0) {
- LDAPDebug(LDAP_DEBUG_ANY, "somehow, there are still %ld entries "
- "in the entry cache. :/\n", cache->c_curentries, 0, 0);
- }
- }
- void cache_clear(struct cache *cache)
- {
- PR_Lock(cache->c_mutex);
- cache_clear_int(cache);
- PR_Unlock(cache->c_mutex);
- }
- static void erase_cache(struct cache *cache)
- {
- cache_clear_int(cache);
- slapi_ch_free((void **)&cache->c_dntable);
- slapi_ch_free((void **)&cache->c_idtable);
- #ifdef UUIDCACHE_ON
- slapi_ch_free((void **)&cache->c_uuidtable);
- #endif
- }
- /* to be used on shutdown or when destroying a backend instance */
- void cache_destroy_please(struct cache *cache)
- {
- erase_cache(cache);
- PR_DestroyLock(cache->c_mutex);
- PR_DestroyLock(cache->c_emutexalloc_mutex);
- }
- void cache_set_max_size(struct cache *cache, size_t bytes)
- {
- struct backentry *eflush = NULL;
- struct backentry *eflushtemp = NULL;
- if (bytes < MINCACHESIZE) {
- bytes = MINCACHESIZE;
- LDAPDebug(LDAP_DEBUG_ANY,
- "WARNING -- Minimum cache size is %lu -- rounding up\n",
- MINCACHESIZE, 0, 0);
- }
- PR_Lock(cache->c_mutex);
- cache->c_maxsize = bytes;
- LOG("entry cache size set to %lu\n", bytes, 0, 0);
- /* check for full cache, and clear out if necessary */
- if (CACHE_FULL(cache))
- eflush = cache_flush(cache);
- while (eflush)
- {
- eflushtemp = eflush->ep_lrunext;
- backentry_free(&eflush);
- eflush = eflushtemp;
- }
- if (cache->c_curentries < 50) {
- /* there's hardly anything left in the cache -- clear it out and
- * resize the hashtables for efficiency.
- */
- erase_cache(cache);
- cache_make_hashes(cache);
- }
- PR_Unlock(cache->c_mutex);
- if (! dblayer_is_cachesize_sane(&bytes)) {
- LDAPDebug(LDAP_DEBUG_ANY,
- "WARNING -- Possible CONFIGURATION ERROR -- cachesize "
- "(%lu) may be configured to use more than the available "
- "physical memory.\n", bytes, 0, 0);
- }
- }
- void cache_set_max_entries(struct cache *cache, long entries)
- {
- struct backentry *eflush = NULL;
- struct backentry *eflushtemp = NULL;
- /* this is a dumb remnant of pre-5.0 servers, where the cache size
- * was given in # entries instead of memory footprint. hopefully,
- * we can eventually drop this.
- */
- PR_Lock(cache->c_mutex);
- cache->c_maxentries = entries;
- if (entries >= 0) {
- LOG("entry cache entry-limit set to %lu\n", entries, 0, 0);
- } else {
- LOG("entry cache entry-limit turned off\n", 0, 0, 0);
- }
- /* check for full cache, and clear out if necessary */
- if (CACHE_FULL(cache))
- eflush = cache_flush(cache);
- PR_Unlock(cache->c_mutex);
- while (eflush)
- {
- eflushtemp = eflush->ep_lrunext;
- backentry_free(&eflush);
- eflush = eflushtemp;
- }
- }
- size_t cache_get_max_size(struct cache *cache)
- {
- size_t n;
- PR_Lock(cache->c_mutex);
- n = cache->c_maxsize;
- PR_Unlock(cache->c_mutex);
- return n;
- }
- long cache_get_max_entries(struct cache *cache)
- {
- long n;
- PR_Lock(cache->c_mutex);
- n = cache->c_maxentries;
- PR_Unlock(cache->c_mutex);
- return n;
- }
- /* determine the general size of a cache entry */
- static size_t cache_entry_size(struct backentry *e)
- {
- size_t size = 0;
- if (e->ep_entry)
- size += slapi_entry_size(e->ep_entry);
- if (e->ep_vlventry)
- size += slapi_entry_size(e->ep_vlventry);
- /* cannot size ep_mutexp (PRLock) */
- size += sizeof(struct backentry);
- return size;
- }
- /* the monitor code wants to be able to safely fetch the cache stats --
- * if it ever wants to pull out more info, we might want to change all
- * these u_long *'s to a struct
- */
- void cache_get_stats(struct cache *cache, PRUint64 *hits, PRUint64 *tries,
- long *nentries, long *maxentries,
- size_t *size, size_t *maxsize)
- {
- PR_Lock(cache->c_mutex);
- if (hits) *hits = slapi_counter_get_value(cache->c_hits);
- if (tries) *tries = slapi_counter_get_value(cache->c_tries);
- if (nentries) *nentries = cache->c_curentries;
- if (maxentries) *maxentries = cache->c_maxentries;
- if (size) *size = slapi_counter_get_value(cache->c_cursize);
- if (maxsize) *maxsize = cache->c_maxsize;
- PR_Unlock(cache->c_mutex);
- }
- void cache_debug_hash(struct cache *cache, char **out)
- {
- u_long slots;
- int total_entries, max_entries_per_slot, *slot_stats;
- int i, j;
- Hashtable *ht;
- char *name;
- PR_Lock(cache->c_mutex);
- *out = (char *)slapi_ch_malloc(1024);
- **out = 0;
- for (i = 0; i < 3; i++) {
- if (i > 0)
- sprintf(*out + strlen(*out), "; ");
- switch(i) {
- case 0:
- ht = cache->c_dntable;
- name = "dn";
- break;
- case 1:
- ht = cache->c_idtable;
- name = "id";
- break;
- #ifdef UUIDCACHE_ON
- case 2:
- default:
- ht = cache->c_uuidtable;
- name = "uuid";
- break;
- #endif
- }
- hash_stats(ht, &slots, &total_entries, &max_entries_per_slot,
- &slot_stats);
- sprintf(*out + strlen(*out), "%s hash: %lu slots, %d entries (%d max "
- "entries per slot) -- ", name, slots, total_entries,
- max_entries_per_slot);
- for (j = 0; j <= max_entries_per_slot; j++)
- sprintf(*out + strlen(*out), "%d[%d] ", j, slot_stats[j]);
- slapi_ch_free((void **)&slot_stats);
- }
- PR_Unlock(cache->c_mutex);
- }
- /***** general-purpose cache stuff *****/
- /* remove an entry from the cache */
- /* you must be holding c_mutex !! */
- static int cache_remove_int(struct cache *cache, struct backentry *e)
- {
- int ret = 1; /* assume not in cache */
- const char *ndn;
- #ifdef UUIDCACHE_ON
- const char *uuid;
- #endif
- LOG("=> cache_remove (%s)\n", backentry_get_ndn(e), 0, 0);
- if (e->ep_state & ENTRY_STATE_NOTINCACHE)
- {
- return ret;
- }
- /* remove from all hashtables -- this function may be called from places
- * where the entry isn't in all the tables yet, so we don't care if any
- * of these return errors.
- */
- ndn = slapi_sdn_get_ndn(backentry_get_sdn(e));
- if (remove_hash(cache->c_dntable, (void *)ndn, strlen(ndn)))
- {
- ret = 0;
- }
- else
- {
- LOG("remove %s from dn hash failed\n", ndn, 0, 0);
- }
- if (remove_hash(cache->c_idtable, &(e->ep_id), sizeof(ID)))
- {
- ret = 0;
- }
- else
- {
- LOG("remove %d from id hash failed\n", e->ep_id, 0, 0);
- }
- #ifdef UUIDCACHE_ON
- uuid = slapi_entry_get_uniqueid(e->ep_entry);
- if (remove_hash(cache->c_uuidtable, (void *)uuid, strlen(uuid)))
- {
- ret = 0;
- }
- else
- {
- LOG("remove %d from uuid hash failed\n", uuid, 0, 0);
- }
- #endif
- if (ret == 0) {
- /* won't be on the LRU list since it has a refcount on it */
- /* adjust cache size */
- slapi_counter_subtract(cache->c_cursize, e->size);
- cache->c_curentries--;
- LOG("<= cache_remove (size %lu): cache now %lu entries, %lu bytes\n",
- e->size, cache->c_curentries,
- slapi_counter_get_value(cache->c_cursize));
- }
- /* mark for deletion (will be erased when refcount drops to zero) */
- e->ep_state |= ENTRY_STATE_DELETED;
- LOG("<= cache_remove: %d\n", ret, 0, 0);
- return ret;
- }
- /* remove an entry from the cache.
- * you must have a refcount on e (iow, fetched via cache_find_*). the
- * entry is removed from the cache, but NOT freed! you are responsible
- * for freeing the entry yourself when done with it, preferrably via
- * cache_return (called AFTER cache_remove). some code still does this
- * via backentry_free, which is okay, as long as you know you're the only
- * thread holding a reference to the deleted entry.
- * returns: 0 on success
- * 1 if the entry wasn't in the cache at all (not even partially)
- */
- int cache_remove(struct cache *cache, struct backentry *e)
- {
- int ret;
- PR_Lock(cache->c_mutex);
- ASSERT(e->ep_refcnt > 0);
- ret = cache_remove_int(cache, e);
- PR_Unlock(cache->c_mutex);
- return ret;
- }
- /* replace an entry in the cache.
- * returns: 0 on success
- * 1 if the entry wasn't in the cache
- */
- int cache_replace(struct cache *cache, struct backentry *olde,
- struct backentry *newe)
- {
- int found;
- const char *oldndn;
- const char *newndn;
- #ifdef UUIDCACHE_ON
- const char *olduuid;
- const char *newuuid;
- #endif
- LOG("=> cache_replace (%s) -> (%s)\n", backentry_get_ndn(olde),
- backentry_get_ndn(newe), 0);
- /* remove from all hashtables -- this function may be called from places
- * where the entry isn't in all the tables yet, so we don't care if any
- * of these return errors.
- */
- oldndn = slapi_sdn_get_ndn(backentry_get_sdn(olde));
- #ifdef UUIDCACHE_ON
- olduuid = slapi_entry_get_uniqueid(olde->ep_entry);
- newuuid = slapi_entry_get_uniqueid(newe->ep_entry);
- #endif
- newndn = slapi_sdn_get_ndn(backentry_get_sdn(newe));
- PR_Lock(cache->c_mutex);
- /*
- * First, remove the old entry from all the hashtables.
- * If the old entry is in cache but not in at least one of the
- * cache tables, operation error
- */
- if ( (olde->ep_state & ENTRY_STATE_NOTINCACHE) == 0 ) {
- found = remove_hash(cache->c_dntable, (void *)oldndn, strlen(oldndn));
- found &= remove_hash(cache->c_idtable, &(olde->ep_id), sizeof(ID));
- #ifdef UUIDCACHE_ON
- found &= remove_hash(cache->c_uuidtable, (void *)olduuid, strlen(olduuid));
- #endif
- if (!found) {
- LOG("cache replace: cache index tables out of sync\n", 0, 0, 0);
- PR_Unlock(cache->c_mutex);
- return 1;
- }
- }
- if (! entry_same_dn(newe, (void *)oldndn) &&
- (newe->ep_state & ENTRY_STATE_NOTINCACHE) == 0) {
- /* if we're doing a modrdn, the new entry can be in the dn table
- * already, so we need to remove that too.
- */
- if (remove_hash(cache->c_dntable, (void *)newndn, strlen(newndn)))
- {
- slapi_counter_subtract(cache->c_cursize, newe->size);
- cache->c_curentries--;
- LOG("cache replace remove entry size %lu\n", newe->size, 0, 0);
- }
- }
- /* now, add the new entry to the hashtables */
- /* (probably don't need such extensive error handling, once this has been
- * tested enough that we believe it works.)
- */
- if (!add_hash(cache->c_dntable, (void *)newndn, strlen(newndn), newe, NULL)) {
- LOG("cache replace: can't add dn\n", 0, 0, 0);
- PR_Unlock(cache->c_mutex);
- return 1;
- }
- if (!add_hash(cache->c_idtable, &(newe->ep_id), sizeof(ID), newe, NULL)) {
- LOG("cache replace: can't add id\n", 0, 0, 0);
- remove_hash(cache->c_dntable, (void *)newndn, strlen(newndn));
- PR_Unlock(cache->c_mutex);
- return 1;
- }
- #ifdef UUIDCACHE_ON
- if (newuuid && !add_hash(cache->c_uuidtable, (void *)newuuid, strlen(newuuid),
- newe, NULL)) {
- LOG("cache replace: can't add uuid\n", 0, 0, 0);
- remove_hash(cache->c_dntable, (void *)newndn, strlen(newndn));
- remove_hash(cache->c_idtable, &(newe->ep_id), sizeof(ID));
- PR_Unlock(cache->c_mutex);
- return 1;
- }
- #endif
- /* adjust cache meta info */
- newe->ep_refcnt = 1;
- newe->size = cache_entry_size(newe);
- if (newe->size > olde->size) {
- slapi_counter_add(cache->c_cursize, newe->size - olde->size);
- } else if (newe->size < olde->size) {
- slapi_counter_subtract(cache->c_cursize, olde->size - newe->size);
- }
- olde->ep_state = ENTRY_STATE_DELETED;
- newe->ep_state = 0;
- PR_Unlock(cache->c_mutex);
- LOG("<= cache_replace OK, cache size now %lu cache count now %ld\n",
- slapi_counter_get_value(cache->c_cursize), cache->c_curentries, 0);
- return 0;
- }
- /* call this when you're done with an entry that was fetched via one of
- * the cache_find_* calls.
- */
- void cache_return(struct cache *cache, struct backentry **bep)
- {
- struct backentry *eflush = NULL;
- struct backentry *eflushtemp = NULL;
- struct backentry *e;
- if (NULL == bep || NULL == *bep)
- {
- LOG("=> cache_return (null entry)\n", 0, 0, 0);
- return;
- }
- e = *bep;
- LOG("=> cache_return (%s) entry count: %d, entry in cache:%ld\n", backentry_get_ndn(e), e->ep_refcnt, cache->c_curentries);
- PR_Lock(cache->c_mutex);
- if (e->ep_state & ENTRY_STATE_NOTINCACHE)
- {
- backentry_free(bep);
- }
- else
- {
- ASSERT(e->ep_refcnt > 0);
- if (! --e->ep_refcnt) {
- if (e->ep_state & ENTRY_STATE_DELETED) {
- backentry_free(bep);
- } else {
- lru_add(cache, e);
- /* the cache might be overfull... */
- if (CACHE_FULL(cache))
- eflush = cache_flush(cache);
- }
- }
- }
- PR_Unlock(cache->c_mutex);
- while (eflush)
- {
- eflushtemp = eflush->ep_lrunext;
- backentry_free(&eflush);
- eflush = eflushtemp;
- }
- }
- /* lookup entry by DN (assume cache lock is held) */
- struct backentry *cache_find_dn(struct cache *cache, const char *dn, unsigned long ndnlen)
- {
- struct backentry *e;
- LOG("=> cache_find_dn (%s)\n", dn, 0, 0);
- /*entry normalized by caller (dn2entry.c) */
- PR_Lock(cache->c_mutex);
- if (find_hash(cache->c_dntable, (void *)dn, ndnlen, (void **)&e)) {
- /* need to check entry state */
- if (e->ep_state != 0) {
- /* entry is deleted or not fully created yet */
- PR_Unlock(cache->c_mutex);
- LOG("<= cache_find_dn (NOT FOUND)\n", 0, 0, 0);
- return NULL;
- }
- if (e->ep_refcnt == 0)
- lru_delete(cache, e);
- e->ep_refcnt++;
- PR_Unlock(cache->c_mutex);
- slapi_counter_increment(cache->c_hits);
- } else {
- PR_Unlock(cache->c_mutex);
- }
- slapi_counter_increment(cache->c_tries);
- LOG("<= cache_find_dn (%sFOUND)\n", e ? "" : "NOT ", 0, 0);
- return e;
- }
- /* lookup an entry in the cache by its id# (you must return it later) */
- struct backentry *cache_find_id(struct cache *cache, ID id)
- {
- struct backentry *e;
- LOG("=> cache_find_id (%lu)\n", (u_long)id, 0, 0);
- PR_Lock(cache->c_mutex);
- if (find_hash(cache->c_idtable, &id, sizeof(ID), (void **)&e)) {
- /* need to check entry state */
- if (e->ep_state != 0) {
- /* entry is deleted or not fully created yet */
- PR_Unlock(cache->c_mutex);
- LOG("<= cache_find_id (NOT FOUND)\n", 0, 0, 0);
- return NULL;
- }
- if (e->ep_refcnt == 0)
- lru_delete(cache, e);
- e->ep_refcnt++;
- PR_Unlock(cache->c_mutex);
- slapi_counter_increment(cache->c_hits);
- } else {
- PR_Unlock(cache->c_mutex);
- }
- slapi_counter_increment(cache->c_tries);
- LOG("<= cache_find_id (%sFOUND)\n", e ? "" : "NOT ", 0, 0);
- return e;
- }
- #ifdef UUIDCACHE_ON
- /* lookup an entry in the cache by it's uuid (you must return it later) */
- struct backentry *cache_find_uuid(struct cache *cache, const char *uuid)
- {
- struct backentry *e;
- LOG("=> cache_find_uuid (%s)\n", uuid, 0, 0);
- PR_Lock(cache->c_mutex);
- if (find_hash(cache->c_uuidtable, uuid, strlen(uuid), (void **)&e)) {
- /* need to check entry state */
- if (e->ep_state != 0) {
- /* entry is deleted or not fully created yet */
- PR_Unlock(cache->c_mutex);
- LOG("<= cache_find_uuid (NOT FOUND)\n", 0, 0, 0);
- return NULL;
- }
- if (e->ep_refcnt == 0)
- lru_delete(cache, e);
- e->ep_refcnt++;
- PR_Unlock(cache->c_mutex);
- slapi_counter_increment(cache->c_hits);
- } else {
- PR_Unlock(cache->c_mutex);
- }
- slapi_counter_increment(cache->c_tries);
- LOG("<= cache_find_uuid (%sFOUND)\n", e ? "" : "NOT ", 0, 0);
- return e;
- }
- #endif
- /* add an entry to the cache */
- static int cache_add_int(struct cache *cache, struct backentry *e, int state,
- struct backentry **alt)
- {
- struct backentry *eflush = NULL;
- struct backentry *eflushtemp = NULL;
- const char *ndn = slapi_sdn_get_ndn(backentry_get_sdn(e));
- #ifdef UUIDCACHE_ON
- const char *uuid = slapi_entry_get_uniqueid(e->ep_entry);
- #endif
- struct backentry *my_alt;
- int already_in = 0;
- LOG("=> cache_add_int( \"%s\", %ld )\n", backentry_get_ndn(e),
- e->ep_id, 0);
- PR_Lock(cache->c_mutex);
- if (! add_hash(cache->c_dntable, (void *)ndn, strlen(ndn), e,
- (void **)&my_alt)) {
- LOG("entry \"%s\" already in dn cache\n", backentry_get_ndn(e), 0, 0);
- /* add_hash filled in 'my_alt' if necessary */
- if (my_alt == e)
- {
- if ((e->ep_state & ENTRY_STATE_CREATING) && (state == 0))
- {
- /* attempting to "add" an entry that's already in the cache,
- * and the old entry was a placeholder and the new one isn't?
- * sounds like a confirmation of a previous add!
- */
- LOG("confirming a previous add\n", 0, 0, 0);
- already_in = 1;
- }
- else
- {
- /* the entry already in the cache and either one of these:
- * 1) ep_state: CREATING && state: CREATING
- * ==> keep protecting the entry; increase the refcnt
- * 2) ep_state: 0 && state: CREATING
- * ==> change the state to CREATING (protect it);
- * increase the refcnt
- * 3) ep_state: 0 && state: 0
- * ==> increase the refcnt
- */
- if (e->ep_refcnt == 0)
- lru_delete(cache, e);
- e->ep_refcnt++;
- e->ep_state = state; /* might be CREATING */
- /* returning 1 (entry already existed), but don't set to alt
- * to prevent that the caller accidentally thinks the existing
- * entry is not the same one the caller has and releases it.
- */
- PR_Unlock(cache->c_mutex);
- return 1;
- }
- }
- else
- {
- if (my_alt->ep_state & ENTRY_STATE_CREATING)
- {
- LOG("the entry is reserved\n", 0, 0, 0);
- e->ep_state |= ENTRY_STATE_NOTINCACHE;
- PR_Unlock(cache->c_mutex);
- return -1;
- }
- else if (state != 0)
- {
- LOG("the entry already exists. cannot reserve it.\n", 0, 0, 0);
- e->ep_state |= ENTRY_STATE_NOTINCACHE;
- PR_Unlock(cache->c_mutex);
- return -1;
- }
- else
- {
- if (alt) {
- *alt = my_alt;
- if ((*alt)->ep_refcnt == 0)
- lru_delete(cache, *alt);
- (*alt)->ep_refcnt++;
- }
- PR_Unlock(cache->c_mutex);
- return 1;
- }
- }
- }
- /* creating an entry with ENTRY_STATE_CREATING just creates a stub
- * which is only stored in the dn table (basically, reserving the dn) --
- * doing an add later with state==0 will "confirm" the add
- */
- if (state == 0) {
- /* neither of these should fail, or something is very wrong. */
- if (! add_hash(cache->c_idtable, &(e->ep_id), sizeof(ID), e, NULL)) {
- LOG("entry %s already in id cache!\n", backentry_get_ndn(e), 0, 0);
- if (already_in) {
- /* there's a bug in the implementatin of 'modify' and 'modrdn'
- * that i'm working around here. basically they do a
- * tentative add of the new (modified) entry, which places
- * the new entry in the cache, indexed only by dn.
- *
- * later they call id2entry_add() on the new entry, which
- * "adds" the new entry to the cache. unfortunately, that
- * add will fail, since the old entry is still in the cache,
- * and both the old and new entries have the same ID and UUID.
- *
- * i catch that here, and just return 0 for success, without
- * messing with either entry. a later cache_replace() will
- * remove the old entry and add the new one, and all will be
- * fine (i think).
- */
- LOG("<= cache_add_int (ignoring)\n", 0, 0, 0);
- PR_Unlock(cache->c_mutex);
- return 0;
- }
- remove_hash(cache->c_dntable, (void *)ndn, strlen(ndn));
- e->ep_state |= ENTRY_STATE_NOTINCACHE;
- PR_Unlock(cache->c_mutex);
- return -1;
- }
- #ifdef UUIDCACHE_ON
- if (uuid) {
- /* (only insert entries with a uuid) */
- if (! add_hash(cache->c_uuidtable, (void *)uuid, strlen(uuid), e,
- NULL)) {
- LOG("entry %s already in uuid cache!\n", backentry_get_ndn(e),
- 0, 0);
- remove_hash(cache->c_dntable, (void *)ndn, strlen(ndn));
- remove_hash(cache->c_idtable, &(e->ep_id), sizeof(ID));
- e->ep_state |= ENTRY_STATE_NOTINCACHE;
- PR_Unlock(cache->c_mutex);
- return -1;
- }
- }
- #endif
- }
- e->ep_state = state;
- if (! already_in) {
- e->ep_refcnt = 1;
- e->size = cache_entry_size(e);
-
- slapi_counter_add(cache->c_cursize, e->size);
- cache->c_curentries++;
- /* don't add to lru since refcnt = 1 */
- LOG("added entry of size %lu -> total now %lu out of max %lu\n",
- e->size, slapi_counter_get_value(cache->c_cursize), cache->c_maxsize);
- if (cache->c_maxentries >= 0) {
- LOG(" total entries %ld out of %ld\n",
- cache->c_curentries, cache->c_maxentries, 0);
- }
- /* check for full cache, and clear out if necessary */
- if (CACHE_FULL(cache))
- eflush = cache_flush(cache);
- }
- PR_Unlock(cache->c_mutex);
- while (eflush)
- {
- eflushtemp = eflush->ep_lrunext;
- backentry_free(&eflush);
- eflush = eflushtemp;
- }
- LOG("<= cache_add_int OK\n", 0, 0, 0);
- return 0;
- }
- /* create an entry in the cache, and increase its refcount (you must
- * return it when you're done).
- * returns: 0 entry has been created & locked
- * 1 entry already existed
- * -1 something bad happened
- *
- * if 'alt' is not NULL, and the entry is found to already exist in the
- * cache, a refcounted pointer to that entry will be placed in 'alt'.
- * (this means code which suffered from race conditions between multiple
- * entry modifiers can now work.)
- */
- int cache_add(struct cache *cache, struct backentry *e,
- struct backentry **alt)
- {
- return cache_add_int(cache, e, 0, alt);
- }
- /* same as above, but add it tentatively: nobody else can use this entry
- * from the cache until you later call cache_add.
- */
- int cache_add_tentative(struct cache *cache, struct backentry *e,
- struct backentry **alt)
- {
- return cache_add_int(cache, e, ENTRY_STATE_CREATING, alt);
- }
- /* locks an entry so that it can be modified (you should have gotten the
- * entry via cache_find_*).
- * returns 0 on success, 1 if the entry is scheduled for deletion.
- */
- int cache_lock_entry(struct cache *cache, struct backentry *e)
- {
- LOG("=> cache_lock_entry (%s)\n", backentry_get_ndn(e), 0, 0);
- if (! e->ep_mutexp) {
- /* make sure only one thread does this */
- PR_Lock(cache->c_emutexalloc_mutex);
- if (! e->ep_mutexp)
- e->ep_mutexp = PR_NewLock();
- PR_Unlock(cache->c_emutexalloc_mutex);
- }
- /* wait on entry lock (done w/o holding the cache lock) */
- PR_Lock(e->ep_mutexp);
- /* make sure entry hasn't been deleted now */
- PR_Lock(cache->c_mutex);
- if (e->ep_state & (ENTRY_STATE_DELETED|ENTRY_STATE_NOTINCACHE)) {
- PR_Unlock(cache->c_mutex);
- PR_Unlock(e->ep_mutexp);
- LOG("<= cache_lock_entry (DELETED)\n", 0, 0, 0);
- return 1;
- }
- PR_Unlock(cache->c_mutex);
- LOG("<= cache_lock_entry (FOUND)\n", 0, 0, 0);
- return 0;
- }
- /* the opposite of above */
- void cache_unlock_entry(struct cache *cache, struct backentry *e)
- {
- LOG("=> cache_unlock_entry\n", 0, 0, 0);
- PR_Unlock(e->ep_mutexp);
- }
|