mempool.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843
  1. /** BEGIN COPYRIGHT BLOCK
  2. * This Program is free software; you can redistribute it and/or modify it under
  3. * the terms of the GNU General Public License as published by the Free Software
  4. * Foundation; version 2 of the License.
  5. *
  6. * This Program is distributed in the hope that it will be useful, but WITHOUT
  7. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
  8. * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
  9. *
  10. * You should have received a copy of the GNU General Public License along with
  11. * this Program; if not, write to the Free Software Foundation, Inc., 59 Temple
  12. * Place, Suite 330, Boston, MA 02111-1307 USA.
  13. *
  14. * In addition, as a special exception, Red Hat, Inc. gives You the additional
  15. * right to link the code of this Program with code not covered under the GNU
  16. * General Public License ("Non-GPL Code") and to distribute linked combinations
  17. * including the two, subject to the limitations in this paragraph. Non-GPL Code
  18. * permitted under this exception must only link to the code of this Program
  19. * through those well defined interfaces identified in the file named EXCEPTION
  20. * found in the source code files (the "Approved Interfaces"). The files of
  21. * Non-GPL Code may instantiate templates or use macros or inline functions from
  22. * the Approved Interfaces without causing the resulting work to be covered by
  23. * the GNU General Public License. Only Red Hat, Inc. may make changes or
  24. * additions to the list of Approved Interfaces. You must obey the GNU General
  25. * Public License in all respects for all of the Program code and other code used
  26. * in conjunction with the Program except the Non-GPL Code covered by this
  27. * exception. If you modify this file, you may extend this exception to your
  28. * version of the file, but you are not obligated to do so. If you do not wish to
  29. * provide this exception without modification, you must delete this exception
  30. * statement from your version and license this file solely under the GPL without
  31. * exception.
  32. *
  33. * Copyright (C) 2008 Red Hat, Inc.
  34. * All rights reserved.
  35. * END COPYRIGHT BLOCK **/
  36. #ifdef MEMPOOL_EXPERIMENTAL
  37. #ifdef HAVE_CONFIG_H
  38. # include <config.h>
  39. #endif
  40. #include <slap.h>
  41. #include <prcountr.h>
  42. struct mempool_object {
  43. struct mempool_object *mempool_next;
  44. };
  45. typedef int (*mempool_cleanup_callback)(void *object);
  46. #ifdef SHARED_MEMPOOL
  47. /*
  48. * shared mempool among threads
  49. * contention causes the performance degradation
  50. * (Warning: SHARED_MEMPOOL code is obsolete)
  51. */
  52. #define MEMPOOL_END NULL
  53. static struct mempool {
  54. const char *mempool_name;
  55. struct mempool_object *mempool_head;
  56. PRLock *mempool_mutex;
  57. mempool_cleanup_callback mempool_cleanup_fn;
  58. unsigned long mempool_count;
  59. } mempool[] = {
  60. {"2K", NULL, NULL, NULL, 0},
  61. {"4K", NULL, NULL, NULL, 0},
  62. {"8K", NULL, NULL, NULL, 0},
  63. {"16K", NULL, NULL, NULL, 0},
  64. {"32K", NULL, NULL, NULL, 0},
  65. {"64K", NULL, NULL, NULL, 0},
  66. {"128K", NULL, NULL, NULL, 0},
  67. {"256K", NULL, NULL, NULL, 0},
  68. {"512K", NULL, NULL, NULL, 0},
  69. {"1M", NULL, NULL, NULL, 0},
  70. {"2M", NULL, NULL, NULL, 0},
  71. {"4M", NULL, NULL, NULL, 0},
  72. {"8M", NULL, NULL, NULL, 0},
  73. {"16M", NULL, NULL, NULL, 0},
  74. {"32M", NULL, NULL, NULL, 0},
  75. {"64M", NULL, NULL, NULL, 0},
  76. {MEMPOOL_END, NULL, NULL, NULL, 0}
  77. };
  78. #else
  79. /*
  80. * mempool per thread; no lock is needed
  81. */
  82. #define MAX_MEMPOOL 16
  83. #define MEMPOOL_END 0
  84. struct mempool {
  85. const char *mempool_name;
  86. struct mempool_object *mempool_head;
  87. mempool_cleanup_callback mempool_cleanup_fn;
  88. unsigned long mempool_count;
  89. };
  90. char *mempool_names[] =
  91. {
  92. "2K", "4K", "8K", "16K",
  93. "32K", "64K", "128K", "256K",
  94. "512K", "1M", "2M", "4M",
  95. "8M", "16M", "32M", "64M"
  96. };
  97. #endif
  98. static PRUintn mempool_index; /* thread private index used to store mempool
  99. in NSPR ThreadPrivateIndex */
  100. static void mempool_destroy();
  101. /*
  102. * mempool_init creates NSPR thread private index,
  103. * then allocates per-thread-private.
  104. * mempool is initialized at the first mempool_return
  105. */
  106. static void
  107. mempool_init(struct mempool **my_mempool)
  108. {
  109. int i;
  110. if (NULL == my_mempool) {
  111. return;
  112. }
  113. #ifdef SHARED_MEMPOOL
  114. for (i = 0; MEMPOOL_END != mempool[i].mempool_name; i++) {
  115. mempool[i].mempool_mutex = PR_NewLock();
  116. if (NULL == mempool[i].mempool_mutex) {
  117. PRErrorCode ec = PR_GetError();
  118. slapi_log_error (SLAPI_LOG_FATAL, "mempool", "mempool_init: "
  119. "failed to create mutex - (%d - %s); mempool(%s) is disabled",
  120. ec, slapd_pr_strerror(ec), mempool[i].mempool_name);
  121. rc = LDAP_OPERATIONS_ERROR;
  122. }
  123. }
  124. #else
  125. PR_NewThreadPrivateIndex (&mempool_index, mempool_destroy);
  126. *my_mempool = (struct mempool *)slapi_ch_calloc(MAX_MEMPOOL, sizeof(struct mempool));
  127. for (i = 0; i < MAX_MEMPOOL; i++) {
  128. (*my_mempool)[i].mempool_name = mempool_names[i];
  129. }
  130. #endif
  131. }
  132. /*
  133. * mempool_destroy is a callback which is set to NSPR ThreadPrivateIndex
  134. */
  135. static void
  136. mempool_destroy()
  137. {
  138. int i = 0;
  139. struct mempool *my_mempool;
  140. #ifdef SHARED_MEMPOOL
  141. for (i = 0; MEMPOOL_END != mempool[i].mempool_name; i++) {
  142. struct mempool_object *object = NULL;
  143. if (NULL == mempool[i].mempool_mutex) {
  144. /* mutex is NULL; this mempool is not enabled */
  145. continue;
  146. }
  147. object = mempool[i].mempool_head;
  148. mempool[i].mempool_head = NULL;
  149. while (NULL != object) {
  150. struct mempool_object *next = object->mempool_next;
  151. if (NULL != mempool[i].mempool_cleanup_fn) {
  152. (mempool[i].mempool_cleanup_fn)((void *)object);
  153. }
  154. slapi_ch_free((void **)&object);
  155. object = next;
  156. }
  157. PR_DestroyLock(mempool[i].mempool_mutex);
  158. mempool[i].mempool_mutex = NULL;
  159. }
  160. #else
  161. my_mempool = (struct mempool *)PR_GetThreadPrivate(mempool_index);
  162. if (NULL == my_mempool || my_mempool[0].mempool_name != mempool_names[0]) {
  163. /* mempool is not initialized */
  164. return;
  165. }
  166. for (i = 0; i < MAX_MEMPOOL; i++) {
  167. struct mempool_object *object = my_mempool[i].mempool_head;
  168. while (NULL != object) {
  169. struct mempool_object *next = object->mempool_next;
  170. if (NULL != my_mempool[i].mempool_cleanup_fn) {
  171. (my_mempool[i].mempool_cleanup_fn)((void *)object);
  172. }
  173. slapi_ch_free((void **)&object);
  174. object = next;
  175. }
  176. my_mempool[i].mempool_head = NULL;
  177. my_mempool[i].mempool_count = 0;
  178. }
  179. slapi_ch_free((void **)&my_mempool);
  180. PR_SetThreadPrivate (mempool_index, (void *)NULL);
  181. #endif
  182. }
  183. /*
  184. * return memory to memory pool
  185. * (Callback cleanup function was intented to release nested memory in the
  186. * memory area. Initially, memory had its structure which could point
  187. * other memory area. But the current code (#else) expects no structure.
  188. * Thus, the cleanup callback is not needed)
  189. * The current code (#else) uses the memory pool stored in the
  190. * per-thread-private data.
  191. */
  192. int
  193. mempool_return(int type, void *object, mempool_cleanup_callback cleanup)
  194. {
  195. PR_ASSERT(type >= 0 && type < MEMPOOL_END);
  196. if (!config_get_mempool_switch()) {
  197. return LDAP_SUCCESS; /* memory pool: off */
  198. }
  199. #ifdef SHARED_MEMPOOL
  200. if (NULL == mempool[type].mempool_mutex) {
  201. /* mutex is NULL; this mempool is not enabled */
  202. return LDAP_SUCCESS;
  203. }
  204. PR_Lock(mempool[type].mempool_mutex);
  205. ((struct mempool_object *)object)->mempool_next = mempool[type].mempool_head;
  206. mempool[type].mempool_head = (struct mempool_object *)object;
  207. mempool[type].mempool_cleanup_fn = cleanup;
  208. mempool[type].mempool_count++;
  209. PR_Unlock(mempool[type].mempool_mutex);
  210. return LDAP_SUCCESS;
  211. #else
  212. {
  213. struct mempool *my_mempool;
  214. int maxfreelist;
  215. my_mempool = (struct mempool *)PR_GetThreadPrivate(mempool_index);
  216. if (NULL == my_mempool || my_mempool[0].mempool_name != mempool_names[0]) {
  217. /* mempool is not initialized */
  218. mempool_init(&my_mempool);
  219. }
  220. ((struct mempool_object *)object)->mempool_next = my_mempool[type].mempool_head;
  221. maxfreelist = config_get_mempool_maxfreelist();
  222. if ((maxfreelist > 0) && (my_mempool[type].mempool_count > maxfreelist)) {
  223. return LDAP_UNWILLING_TO_PERFORM;
  224. } else {
  225. ((struct mempool_object *)object)->mempool_next = mempool[type].mempool_head;
  226. my_mempool[type].mempool_head = (struct mempool_object *)object;
  227. my_mempool[type].mempool_cleanup_fn = cleanup;
  228. my_mempool[type].mempool_count++;
  229. PR_SetThreadPrivate (mempool_index, (void *)my_mempool);
  230. return LDAP_SUCCESS;
  231. }
  232. }
  233. #endif
  234. }
  235. /*
  236. * get memory from memory pool
  237. * The current code (#else) uses the memory pool stored in the
  238. * per-thread-private data.
  239. */
  240. void *
  241. mempool_get(int type)
  242. {
  243. struct mempool_object *object = NULL;
  244. struct mempool *my_mempool;
  245. PR_ASSERT(type >= 0 && type < MEMPOOL_END);
  246. if (!config_get_mempool_switch()) {
  247. return NULL; /* memory pool: off */
  248. }
  249. #ifdef SHARED_MEMPOOL
  250. if (NULL == mempool[type].mempool_mutex) {
  251. /* mutex is NULL; this mempool is not enabled */
  252. return NULL;
  253. }
  254. PR_Lock(mempool[type].mempool_mutex);
  255. object = mempool[type].mempool_head;
  256. if (NULL != object) {
  257. mempool[type].mempool_head = object->mempool_next;
  258. mempool[type].mempool_count--;
  259. object->mempool_next = NULL;
  260. }
  261. PR_Unlock(mempool[type].mempool_mutex);
  262. #else
  263. my_mempool = (struct mempool *)PR_GetThreadPrivate(mempool_index);
  264. if (NULL == my_mempool || my_mempool[0].mempool_name != mempool_names[0]) { /* mempool is not initialized */
  265. return NULL;
  266. }
  267. object = my_mempool[type].mempool_head;
  268. if (NULL != object) {
  269. my_mempool[type].mempool_head = object->mempool_next;
  270. my_mempool[type].mempool_count--;
  271. object->mempool_next = NULL;
  272. PR_SetThreadPrivate (mempool_index, (void *)my_mempool);
  273. }
  274. #endif
  275. return object;
  276. }
  277. /*****************************************************************************
  278. * The rest is slapi_ch_malloc and its friends, which are adjusted to mempool.
  279. * The challenge is mempool_return needs to know the size of the memory, but
  280. * free does not pass the info. To work around it, malloc allocates the extra
  281. * space in front of the memory to be returned and store the size in the extra
  282. * space.
  283. *
  284. * Also, to simplify the code, it allocates the smallest 2^n size which
  285. * could store the requested size. We should make the granurality higher for
  286. * the real use.
  287. *
  288. * Above 64MB, the functions call mmap directly. The reason
  289. * why I chose mmap over mempool is in mempool, the memory stays until the
  290. * server is shutdown even if the memory is never be requested. By using mmap,
  291. * the memory is returned to the system and it's guaranteed to shrink the
  292. * process size.
  293. *
  294. * In this implementation, it changes the behavior based on the requested
  295. * size (+ size space -- unsigned long)* :
  296. * 1B ~ 1KB: call system *alloc/free; but still it needs to store the size to
  297. * support realloc. The function needs to know if the passed address
  298. * is the real address or shifted for the size.
  299. * 1KB + 1B ~ 64MB: use mempool
  300. * 64MB + 1B ~ : call mmap
  301. */
  302. #include <sys/mman.h>
  303. static int slapi_ch_munmap_no_roundup(void **start, unsigned long len);
  304. char *slapi_ch_mmap(unsigned long len);
  305. static int counters_created= 0;
  306. PR_DEFINE_COUNTER(slapi_ch_counter_malloc);
  307. PR_DEFINE_COUNTER(slapi_ch_counter_calloc);
  308. PR_DEFINE_COUNTER(slapi_ch_counter_realloc);
  309. PR_DEFINE_COUNTER(slapi_ch_counter_strdup);
  310. PR_DEFINE_COUNTER(slapi_ch_counter_free);
  311. PR_DEFINE_COUNTER(slapi_ch_counter_created);
  312. PR_DEFINE_COUNTER(slapi_ch_counter_exist);
  313. #define OOM_PREALLOC_SIZE 65536
  314. static void *oom_emergency_area = NULL;
  315. static PRLock *oom_emergency_lock = NULL;
  316. #define SLAPD_MODULE "memory allocator"
  317. static const char* const oom_advice =
  318. "\nThe server has probably allocated all available virtual memory. To solve\n"
  319. "this problem, make more virtual memory available to your server, or reduce\n"
  320. "one or more of the following server configuration settings:\n"
  321. " nsslapd-cachesize (Database Settings - Maximum entries in cache)\n"
  322. " nsslapd-cachememsize (Database Settings - Memory available for cache)\n"
  323. " nsslapd-dbcachesize (LDBM Plug-in Settings - Maximum cache size)\n"
  324. " nsslapd-import-cachesize (LDBM Plug-in Settings - Import cache size).\n"
  325. "Can't recover; calling exit(1).\n";
  326. static void
  327. create_counters()
  328. {
  329. PR_CREATE_COUNTER(slapi_ch_counter_malloc,"slapi_ch","malloc","");
  330. PR_CREATE_COUNTER(slapi_ch_counter_calloc,"slapi_ch","calloc","");
  331. PR_CREATE_COUNTER(slapi_ch_counter_realloc,"slapi_ch","realloc","");
  332. PR_CREATE_COUNTER(slapi_ch_counter_strdup,"slapi_ch","strdup","");
  333. PR_CREATE_COUNTER(slapi_ch_counter_free,"slapi_ch","free","");
  334. PR_CREATE_COUNTER(slapi_ch_counter_created,"slapi_ch","created","");
  335. PR_CREATE_COUNTER(slapi_ch_counter_exist,"slapi_ch","exist","");
  336. /* ensure that we have space to allow for shutdown calls to malloc()
  337. * from should we run out of memory.
  338. */
  339. if (oom_emergency_area == NULL) {
  340. oom_emergency_area = malloc(OOM_PREALLOC_SIZE);
  341. }
  342. oom_emergency_lock = PR_NewLock();
  343. }
  344. static void
  345. log_negative_alloc_msg( const char *op, const char *units, unsigned long size )
  346. {
  347. slapi_log_error( SLAPI_LOG_FATAL, SLAPD_MODULE,
  348. "cannot %s %lu %s;\n"
  349. "trying to allocate 0 or a negative number of %s is not portable and\n"
  350. "gives different results on different platforms.\n",
  351. op, size, units, units );
  352. }
  353. static char *
  354. slapi_ch_malloc_core( unsigned long lsize )
  355. {
  356. char *newmem;
  357. if ( (newmem = (char *) malloc( lsize )) == NULL ) {
  358. int oserr = errno;
  359. oom_occurred();
  360. slapi_log_error( SLAPI_LOG_FATAL, SLAPD_MODULE,
  361. "malloc of %lu bytes failed; OS error %d (%s)%s\n",
  362. lsize, oserr, slapd_system_strerror( oserr ), oom_advice );
  363. exit( 1 );
  364. }
  365. *(unsigned long *)newmem = lsize;
  366. newmem += sizeof(unsigned long);
  367. return newmem;
  368. }
  369. char *
  370. slapi_ch_malloc( unsigned long size )
  371. {
  372. char *newmem;
  373. unsigned long lsize;
  374. if (size <= 0) {
  375. log_negative_alloc_msg( "malloc", "bytes", size );
  376. return 0;
  377. }
  378. lsize = size + sizeof(unsigned long);
  379. if (lsize <= 1024) {
  380. newmem = slapi_ch_malloc_core( lsize );
  381. } else if (lsize <= 67108864) {
  382. /* return 2KB ~ 64MB memory to memory pool */
  383. unsigned long roundup = 1;
  384. int n = 0;
  385. while (1) {
  386. roundup <<= 1;
  387. n++;
  388. if (roundup >= lsize) {
  389. break;
  390. }
  391. }
  392. PR_ASSERT(n >= 11 && n <= 26);
  393. newmem = (char *)mempool_get(n-11); /* 11: 2^11 = 2K */
  394. if (NULL == newmem) {
  395. newmem = slapi_ch_malloc_core( roundup );
  396. }
  397. } else {
  398. newmem = slapi_ch_mmap( size );
  399. }
  400. if(!counters_created)
  401. {
  402. create_counters();
  403. counters_created= 1;
  404. }
  405. PR_INCREMENT_COUNTER(slapi_ch_counter_malloc);
  406. PR_INCREMENT_COUNTER(slapi_ch_counter_created);
  407. PR_INCREMENT_COUNTER(slapi_ch_counter_exist);
  408. #if defined(_WIN32) && defined(DEBUG)
  409. if(recording)
  410. {
  411. add_memory_record(newmem,size);
  412. }
  413. #endif
  414. return( newmem );
  415. }
  416. static char *
  417. slapi_ch_realloc_core( char *block, unsigned long lsize )
  418. {
  419. char *realblock;
  420. char *newmem;
  421. realblock = block - sizeof(unsigned long);
  422. if ( (newmem = (char *) realloc( realblock, lsize )) == NULL ) {
  423. int oserr = errno;
  424. oom_occurred();
  425. slapi_log_error( SLAPI_LOG_FATAL, SLAPD_MODULE,
  426. "realloc of %lu bytes failed; OS error %d (%s)%s\n",
  427. lsize, oserr, slapd_system_strerror( oserr ), oom_advice );
  428. exit( 1 );
  429. }
  430. *(unsigned long *)newmem = lsize;
  431. newmem += sizeof(unsigned long);
  432. return newmem;
  433. }
  434. char *
  435. slapi_ch_realloc( char *block, unsigned long size )
  436. {
  437. char *newmem;
  438. unsigned long lsize;
  439. unsigned long origsize;
  440. char *realblock;
  441. char *realnewmem;
  442. if ( block == NULL ) {
  443. return( slapi_ch_malloc( size ) );
  444. }
  445. if (size <= 0) {
  446. log_negative_alloc_msg( "realloc", "bytes", size );
  447. return block;
  448. }
  449. lsize = size + sizeof(unsigned long);
  450. if (lsize <= 1024) {
  451. newmem = slapi_ch_realloc_core( block, lsize );
  452. } else if (lsize <= 67108864) {
  453. /* return 2KB ~ 64MB memory to memory pool */
  454. unsigned long roundup = 1;
  455. int n = 0;
  456. while (1) {
  457. roundup <<= 1;
  458. n++;
  459. if (roundup >= lsize) {
  460. break;
  461. }
  462. }
  463. PR_ASSERT(n >= 11 && n <= 26);
  464. newmem = (char *)mempool_get(n-11); /* 11: 2^11 = 2K */
  465. if (NULL == newmem) {
  466. newmem = slapi_ch_realloc_core( block, roundup );
  467. } else {
  468. realblock = block - sizeof(unsigned long);
  469. origsize = *(unsigned long *)realblock - sizeof(unsigned long);;
  470. memcpy(newmem, block, origsize);
  471. slapi_ch_free_string(&block);
  472. }
  473. } else {
  474. realblock = block - sizeof(unsigned long);
  475. origsize = *(unsigned long *)realblock - sizeof(unsigned long);;
  476. newmem = slapi_ch_mmap( size );
  477. memcpy(newmem, block, origsize);
  478. realnewmem = newmem - sizeof(unsigned long);
  479. *(unsigned long *)realnewmem = lsize;
  480. slapi_ch_free_string(&block);
  481. }
  482. if(!counters_created)
  483. {
  484. create_counters();
  485. counters_created= 1;
  486. }
  487. PR_INCREMENT_COUNTER(slapi_ch_counter_realloc);
  488. #if defined(_WIN32) && defined(DEBUG)
  489. if(recording)
  490. {
  491. remove_memory_record(block);
  492. add_memory_record(newmem,size);
  493. }
  494. #endif
  495. return( newmem );
  496. }
  497. static char *
  498. slapi_ch_calloc_core( unsigned long lsize )
  499. {
  500. char *newmem;
  501. if ( (newmem = (char *) calloc( 1, lsize )) == NULL ) {
  502. int oserr = errno;
  503. oom_occurred();
  504. slapi_log_error( SLAPI_LOG_FATAL, SLAPD_MODULE,
  505. "calloc of %lu bytes failed; OS error %d (%s)%s\n",
  506. lsize, oserr, slapd_system_strerror( oserr ), oom_advice );
  507. exit( 1 );
  508. }
  509. *(unsigned long *)newmem = lsize;
  510. newmem += sizeof(unsigned long);
  511. return newmem;
  512. }
  513. char *
  514. slapi_ch_calloc( unsigned long nelem, unsigned long size )
  515. {
  516. char *newmem;
  517. unsigned long lsize;
  518. if (size <= 0) {
  519. log_negative_alloc_msg( "calloc", "bytes", size );
  520. return 0;
  521. }
  522. if (nelem <= 0) {
  523. log_negative_alloc_msg( "calloc", "elements", nelem );
  524. return 0;
  525. }
  526. lsize = nelem * size + sizeof(unsigned long);
  527. if (lsize <= 1024) {
  528. newmem = slapi_ch_calloc_core( lsize );
  529. } else if (lsize <= 67108864) {
  530. /* return 2KB ~ 64MB memory to memory pool */
  531. unsigned long roundup = 1;
  532. int n = 0;
  533. while (1) {
  534. roundup <<= 1;
  535. n++;
  536. if (roundup >= lsize) {
  537. break;
  538. }
  539. }
  540. PR_ASSERT(n >= 11 && n <= 26);
  541. newmem = (char *)mempool_get(n-11); /* 11: 2^11 = 2K */
  542. if (NULL == newmem) {
  543. newmem = slapi_ch_calloc_core( roundup );
  544. } else {
  545. memset (newmem, 0, size * nelem);
  546. }
  547. } else {
  548. unsigned long mysize = size * nelem;
  549. newmem = slapi_ch_mmap( mysize );
  550. memset(newmem, 0, mysize);
  551. }
  552. if(!counters_created)
  553. {
  554. create_counters();
  555. counters_created= 1;
  556. }
  557. PR_INCREMENT_COUNTER(slapi_ch_counter_calloc);
  558. PR_INCREMENT_COUNTER(slapi_ch_counter_created);
  559. PR_INCREMENT_COUNTER(slapi_ch_counter_exist);
  560. #if defined(_WIN32) && defined(DEBUG)
  561. if(recording)
  562. {
  563. add_memory_record(newmem,size);
  564. }
  565. #endif
  566. return( newmem );
  567. }
  568. char *
  569. slapi_ch_strdup ( const char* s1 )
  570. {
  571. char* newmem;
  572. unsigned long lsize;
  573. /* strdup pukes on NULL strings...bail out now */
  574. if(NULL == s1)
  575. return NULL;
  576. lsize = strlen(s1) + sizeof(unsigned long) + 1;
  577. newmem = slapi_ch_malloc( lsize );
  578. sprintf(newmem, "%s", s1);
  579. if(!counters_created)
  580. {
  581. create_counters();
  582. counters_created= 1;
  583. }
  584. PR_INCREMENT_COUNTER(slapi_ch_counter_strdup);
  585. PR_INCREMENT_COUNTER(slapi_ch_counter_created);
  586. PR_INCREMENT_COUNTER(slapi_ch_counter_exist);
  587. #if defined(_WIN32) && defined(DEBUG)
  588. if(recording)
  589. {
  590. add_memory_record(newmem,strlen(s1)+1);
  591. }
  592. #endif
  593. return newmem;
  594. }
  595. /*
  596. * Function: slapi_ch_free
  597. *
  598. * Returns: nothing
  599. *
  600. * Description: frees the pointer, and then sets it to NULL to
  601. * prevent free-memory writes.
  602. * Note: pass in the address of the pointer you want to free.
  603. * Note: you can pass in null pointers, it's cool.
  604. *
  605. * Implementation: get the size from the size space, and determine the behavior
  606. * based upon the size:
  607. * 1B ~ 1KB: call system free
  608. * 1KB + 1B ~ 64MB: return memory to mempool
  609. * 64MB + 1B ~ : call munmap
  610. */
  611. void
  612. slapi_ch_free(void **ptr)
  613. {
  614. void *realptr;
  615. unsigned long size;
  616. if (ptr==NULL || *ptr == NULL){
  617. return;
  618. }
  619. #if defined(_WIN32) && defined(DEBUG)
  620. if(recording)
  621. {
  622. remove_memory_record(*ptr);
  623. }
  624. #endif
  625. realptr = (void *)((char *)*ptr - sizeof(unsigned long));
  626. size = *(unsigned long *)realptr;
  627. if (size <= 1024) {
  628. free (realptr);
  629. } else if (size <= 67108864) {
  630. /* return 2KB ~ 64MB memory to memory pool */
  631. unsigned long roundup = 1;
  632. int n = 0;
  633. int rc = LDAP_SUCCESS;
  634. while (1) {
  635. roundup <<= 1;
  636. n++;
  637. if (roundup >= size) {
  638. break;
  639. }
  640. }
  641. PR_ASSERT(n >= 11 && n <= 26);
  642. rc = mempool_return(n-11, *ptr, (mempool_cleanup_callback)NULL);
  643. if (LDAP_SUCCESS != rc) {
  644. free (realptr);
  645. }
  646. } else {
  647. slapi_ch_munmap_no_roundup( ptr, size );
  648. }
  649. *ptr = NULL;
  650. if(!counters_created)
  651. {
  652. create_counters();
  653. counters_created= 1;
  654. }
  655. PR_INCREMENT_COUNTER(slapi_ch_counter_free);
  656. PR_DECREMENT_COUNTER(slapi_ch_counter_exist);
  657. return;
  658. }
  659. char *
  660. slapi_ch_mmap(unsigned long len)
  661. {
  662. char *newmem;
  663. long sc_page_size = config_get_system_page_size();
  664. int sc_page_bits = config_get_system_page_bits();
  665. unsigned long roundup = (len&(sc_page_size-1))?(((len>>sc_page_bits)+1)<<sc_page_bits):len;
  666. if ( (newmem = (char *)mmap(NULL, roundup, PROT_READ | PROT_WRITE,
  667. MAP_PRIVATE | MAP_ANONYMOUS, -1, 0/*ignored */)) == MAP_FAILED ) {
  668. int oserr = errno;
  669. oom_occurred();
  670. slapi_log_error( SLAPI_LOG_FATAL, SLAPD_MODULE,
  671. "mmap of %lu bytes failed; OS error %d (%s)%s\n",
  672. roundup, oserr, slapd_system_strerror( oserr ), oom_advice );
  673. exit( 1 );
  674. }
  675. *(unsigned long *)newmem = roundup;
  676. newmem += sizeof(unsigned long);
  677. return( newmem );
  678. }
  679. int
  680. slapi_ch_munmap(void **start, unsigned long len)
  681. {
  682. long sc_page_size = config_get_system_page_size();
  683. int sc_page_bits = config_get_system_page_bits();
  684. unsigned long roundup = (len&(sc_page_size-1))?(((len>>sc_page_bits)+1)<<sc_page_bits):len;
  685. void *realstart = *start - sizeof(unsigned long);
  686. int rc = munmap(realstart, roundup);
  687. if (0 != rc) {
  688. int oserr = errno;
  689. slapi_log_error( SLAPI_LOG_FATAL, SLAPD_MODULE,
  690. "munmap of %lu bytes failed; OS error %d (%s)\n",
  691. roundup, oserr, slapd_system_strerror( oserr ) );
  692. /* Leaked. This should not happen */
  693. }
  694. *start = NULL;
  695. return rc;
  696. }
  697. static char *
  698. slapi_ch_mmap_no_roundup( unsigned long size)
  699. {
  700. char *newmem;
  701. unsigned long mysize;
  702. if ( (newmem = (char *)mmap(NULL, size + sizeof(unsigned long),
  703. PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS,
  704. -1, 0/*ignored */)) == MAP_FAILED ) {
  705. int oserr = errno;
  706. oom_occurred();
  707. slapi_log_error( SLAPI_LOG_FATAL, SLAPD_MODULE,
  708. "mmap of %lu bytes failed; OS error %d (%s)%s\n",
  709. size + sizeof(unsigned long), oserr,
  710. slapd_system_strerror( oserr ), oom_advice );
  711. exit( 1 );
  712. }
  713. *(unsigned long *)newmem = size;
  714. newmem += sizeof(unsigned long);
  715. return newmem;
  716. }
  717. static int
  718. slapi_ch_munmap_no_roundup(void **start, unsigned long len)
  719. {
  720. void *realstart = *start - sizeof(unsigned long);
  721. int reallen = len + sizeof(unsigned long);
  722. int rc = munmap(realstart, reallen);
  723. if (0 != rc) {
  724. int oserr = errno;
  725. slapi_log_error( SLAPI_LOG_FATAL, SLAPD_MODULE,
  726. "munmap of %lu bytes failed; OS error %d (%s)\n",
  727. len, oserr, slapd_system_strerror( oserr ) );
  728. /* Leaked. This should not happen */
  729. }
  730. *start = NULL;
  731. return rc;
  732. }
  733. /*
  734. This function is just like PR_smprintf. It works like sprintf
  735. except that it allocates enough memory to hold the result
  736. string and returns that allocated memory to the caller. The
  737. caller must use slapi_ch_free_string to free the memory.
  738. It should only be used in those situations that will eventually free
  739. the memory using slapi_ch_free_string e.g. allocating a string
  740. that will be freed as part of pblock cleanup, or passed in to create
  741. a Slapi_DN, or things of that nature. If you have control of the
  742. flow such that the memory will be allocated and freed in the same
  743. scope, better to just use PR_smprintf and PR_smprintf_free instead
  744. because it is likely faster.
  745. */
  746. /*
  747. This implementation is the same as PR_smprintf.
  748. The above comment does not apply to this function for now.
  749. see [150809] for more details.
  750. WARNING - with this fix, this means we are now mixing PR_Malloc with
  751. slapi_ch_free. Which is ok for now - they both use malloc/free from
  752. the operating system. But if this changes in the future, this
  753. function will have to change as well.
  754. */
  755. char *
  756. slapi_ch_smprintf(const char *fmt, ...)
  757. {
  758. char *p = NULL, *q = NULL;
  759. va_list ap;
  760. if (NULL == fmt) {
  761. return NULL;
  762. }
  763. va_start(ap, fmt);
  764. p = PR_vsmprintf(fmt, ap);
  765. va_end(ap);
  766. q = slapi_ch_strdup (p); /* ugly ...; hope there's any better way */
  767. free(p);
  768. return q;
  769. }
  770. #endif /* MEMPOOL_EXPERIMENTAL */