mempool.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786
  1. /** BEGIN COPYRIGHT BLOCK
  2. * Copyright (C) 2008 Red Hat, Inc.
  3. * All rights reserved.
  4. *
  5. * License: GPL (version 3 or any later version).
  6. * See LICENSE for details.
  7. * END COPYRIGHT BLOCK **/
  8. #ifdef MEMPOOL_EXPERIMENTAL
  9. #ifdef HAVE_CONFIG_H
  10. # include <config.h>
  11. #endif
  12. #include <slap.h>
  13. #include <prcountr.h>
  14. struct mempool_object {
  15. struct mempool_object *mempool_next;
  16. };
  17. typedef int (*mempool_cleanup_callback)(void *object);
  18. #ifdef SHARED_MEMPOOL
  19. /*
  20. * shared mempool among threads
  21. * contention causes the performance degradation
  22. * (Warning: SHARED_MEMPOOL code is obsolete)
  23. */
  24. #define MEMPOOL_END NULL
  25. static struct mempool {
  26. const char *mempool_name;
  27. struct mempool_object *mempool_head;
  28. PRLock *mempool_mutex;
  29. mempool_cleanup_callback mempool_cleanup_fn;
  30. unsigned long mempool_count;
  31. } mempool[] = {
  32. {"2K", NULL, NULL, NULL, 0},
  33. {"4K", NULL, NULL, NULL, 0},
  34. {"8K", NULL, NULL, NULL, 0},
  35. {"16K", NULL, NULL, NULL, 0},
  36. {"32K", NULL, NULL, NULL, 0},
  37. {"64K", NULL, NULL, NULL, 0},
  38. {"128K", NULL, NULL, NULL, 0},
  39. {"256K", NULL, NULL, NULL, 0},
  40. {"512K", NULL, NULL, NULL, 0},
  41. {"1M", NULL, NULL, NULL, 0},
  42. {"2M", NULL, NULL, NULL, 0},
  43. {"4M", NULL, NULL, NULL, 0},
  44. {"8M", NULL, NULL, NULL, 0},
  45. {"16M", NULL, NULL, NULL, 0},
  46. {"32M", NULL, NULL, NULL, 0},
  47. {"64M", NULL, NULL, NULL, 0},
  48. {MEMPOOL_END, NULL, NULL, NULL, 0}
  49. };
  50. #else
  51. /*
  52. * mempool per thread; no lock is needed
  53. */
  54. #define MAX_MEMPOOL 16
  55. #define MEMPOOL_END 0
  56. struct mempool {
  57. const char *mempool_name;
  58. struct mempool_object *mempool_head;
  59. mempool_cleanup_callback mempool_cleanup_fn;
  60. unsigned long mempool_count;
  61. };
  62. char *mempool_names[] =
  63. {
  64. "2K", "4K", "8K", "16K",
  65. "32K", "64K", "128K", "256K",
  66. "512K", "1M", "2M", "4M",
  67. "8M", "16M", "32M", "64M"
  68. };
  69. #endif
  70. static PRUintn mempool_index; /* thread private index used to store mempool
  71. in NSPR ThreadPrivateIndex */
  72. static void mempool_destroy();
  73. /*
  74. * mempool_init creates NSPR thread private index,
  75. * then allocates per-thread-private.
  76. * mempool is initialized at the first mempool_return
  77. */
  78. static void
  79. mempool_init(struct mempool **my_mempool)
  80. {
  81. int i;
  82. if (NULL == my_mempool) {
  83. return;
  84. }
  85. #ifdef SHARED_MEMPOOL
  86. for (i = 0; MEMPOOL_END != mempool[i].mempool_name; i++) {
  87. mempool[i].mempool_mutex = PR_NewLock();
  88. if (NULL == mempool[i].mempool_mutex) {
  89. PRErrorCode ec = PR_GetError();
  90. slapi_log_error (SLAPI_LOG_FATAL, "mempool", "mempool_init: "
  91. "failed to create mutex - (%d - %s); mempool(%s) is disabled",
  92. ec, slapd_pr_strerror(ec), mempool[i].mempool_name);
  93. rc = LDAP_OPERATIONS_ERROR;
  94. }
  95. }
  96. #else
  97. PR_NewThreadPrivateIndex (&mempool_index, mempool_destroy);
  98. *my_mempool = (struct mempool *)slapi_ch_calloc(MAX_MEMPOOL, sizeof(struct mempool));
  99. for (i = 0; i < MAX_MEMPOOL; i++) {
  100. (*my_mempool)[i].mempool_name = mempool_names[i];
  101. }
  102. #endif
  103. }
  104. /*
  105. * mempool_destroy is a callback which is set to NSPR ThreadPrivateIndex
  106. */
  107. static void
  108. mempool_destroy()
  109. {
  110. int i = 0;
  111. struct mempool *my_mempool;
  112. #ifdef SHARED_MEMPOOL
  113. for (i = 0; MEMPOOL_END != mempool[i].mempool_name; i++) {
  114. struct mempool_object *object = NULL;
  115. if (NULL == mempool[i].mempool_mutex) {
  116. /* mutex is NULL; this mempool is not enabled */
  117. continue;
  118. }
  119. object = mempool[i].mempool_head;
  120. mempool[i].mempool_head = NULL;
  121. while (NULL != object) {
  122. struct mempool_object *next = object->mempool_next;
  123. if (NULL != mempool[i].mempool_cleanup_fn) {
  124. (mempool[i].mempool_cleanup_fn)((void *)object);
  125. }
  126. slapi_ch_free((void **)&object);
  127. object = next;
  128. }
  129. PR_DestroyLock(mempool[i].mempool_mutex);
  130. mempool[i].mempool_mutex = NULL;
  131. }
  132. #else
  133. my_mempool = (struct mempool *)PR_GetThreadPrivate(mempool_index);
  134. if (NULL == my_mempool || my_mempool[0].mempool_name != mempool_names[0]) {
  135. /* mempool is not initialized */
  136. return;
  137. }
  138. for (i = 0; i < MAX_MEMPOOL; i++) {
  139. struct mempool_object *object = my_mempool[i].mempool_head;
  140. while (NULL != object) {
  141. struct mempool_object *next = object->mempool_next;
  142. if (NULL != my_mempool[i].mempool_cleanup_fn) {
  143. (my_mempool[i].mempool_cleanup_fn)((void *)object);
  144. }
  145. slapi_ch_free((void **)&object);
  146. object = next;
  147. }
  148. my_mempool[i].mempool_head = NULL;
  149. my_mempool[i].mempool_count = 0;
  150. }
  151. slapi_ch_free((void **)&my_mempool);
  152. PR_SetThreadPrivate (mempool_index, (void *)NULL);
  153. #endif
  154. }
  155. /*
  156. * return memory to memory pool
  157. * (Callback cleanup function was intented to release nested memory in the
  158. * memory area. Initially, memory had its structure which could point
  159. * other memory area. But the current code (#else) expects no structure.
  160. * Thus, the cleanup callback is not needed)
  161. * The current code (#else) uses the memory pool stored in the
  162. * per-thread-private data.
  163. */
  164. int
  165. mempool_return(int type, void *object, mempool_cleanup_callback cleanup)
  166. {
  167. PR_ASSERT(type >= 0 && type < MEMPOOL_END);
  168. if (!config_get_mempool_switch()) {
  169. return LDAP_SUCCESS; /* memory pool: off */
  170. }
  171. #ifdef SHARED_MEMPOOL
  172. if (NULL == mempool[type].mempool_mutex) {
  173. /* mutex is NULL; this mempool is not enabled */
  174. return LDAP_SUCCESS;
  175. }
  176. PR_Lock(mempool[type].mempool_mutex);
  177. ((struct mempool_object *)object)->mempool_next = mempool[type].mempool_head;
  178. mempool[type].mempool_head = (struct mempool_object *)object;
  179. mempool[type].mempool_cleanup_fn = cleanup;
  180. mempool[type].mempool_count++;
  181. PR_Unlock(mempool[type].mempool_mutex);
  182. return LDAP_SUCCESS;
  183. #else
  184. {
  185. struct mempool *my_mempool;
  186. int maxfreelist;
  187. my_mempool = (struct mempool *)PR_GetThreadPrivate(mempool_index);
  188. if (NULL == my_mempool || my_mempool[0].mempool_name != mempool_names[0]) {
  189. /* mempool is not initialized */
  190. mempool_init(&my_mempool);
  191. }
  192. ((struct mempool_object *)object)->mempool_next = my_mempool[type].mempool_head;
  193. maxfreelist = config_get_mempool_maxfreelist();
  194. if ((maxfreelist > 0) && (my_mempool[type].mempool_count > maxfreelist)) {
  195. return LDAP_UNWILLING_TO_PERFORM;
  196. } else {
  197. ((struct mempool_object *)object)->mempool_next = mempool[type].mempool_head;
  198. my_mempool[type].mempool_head = (struct mempool_object *)object;
  199. my_mempool[type].mempool_cleanup_fn = cleanup;
  200. my_mempool[type].mempool_count++;
  201. PR_SetThreadPrivate (mempool_index, (void *)my_mempool);
  202. return LDAP_SUCCESS;
  203. }
  204. }
  205. #endif
  206. }
  207. /*
  208. * get memory from memory pool
  209. * The current code (#else) uses the memory pool stored in the
  210. * per-thread-private data.
  211. */
  212. void *
  213. mempool_get(int type)
  214. {
  215. struct mempool_object *object = NULL;
  216. struct mempool *my_mempool;
  217. PR_ASSERT(type >= 0 && type < MEMPOOL_END);
  218. if (!config_get_mempool_switch()) {
  219. return NULL; /* memory pool: off */
  220. }
  221. #ifdef SHARED_MEMPOOL
  222. if (NULL == mempool[type].mempool_mutex) {
  223. /* mutex is NULL; this mempool is not enabled */
  224. return NULL;
  225. }
  226. PR_Lock(mempool[type].mempool_mutex);
  227. object = mempool[type].mempool_head;
  228. if (NULL != object) {
  229. mempool[type].mempool_head = object->mempool_next;
  230. mempool[type].mempool_count--;
  231. object->mempool_next = NULL;
  232. }
  233. PR_Unlock(mempool[type].mempool_mutex);
  234. #else
  235. my_mempool = (struct mempool *)PR_GetThreadPrivate(mempool_index);
  236. if (NULL == my_mempool || my_mempool[0].mempool_name != mempool_names[0]) { /* mempool is not initialized */
  237. return NULL;
  238. }
  239. object = my_mempool[type].mempool_head;
  240. if (NULL != object) {
  241. my_mempool[type].mempool_head = object->mempool_next;
  242. my_mempool[type].mempool_count--;
  243. object->mempool_next = NULL;
  244. PR_SetThreadPrivate (mempool_index, (void *)my_mempool);
  245. }
  246. #endif
  247. return object;
  248. }
  249. /*****************************************************************************
  250. * The rest is slapi_ch_malloc and its friends, which are adjusted to mempool.
  251. * The challenge is mempool_return needs to know the size of the memory, but
  252. * free does not pass the info. To work around it, malloc allocates the extra
  253. * space in front of the memory to be returned and store the size in the extra
  254. * space.
  255. *
  256. * Also, to simplify the code, it allocates the smallest 2^n size which
  257. * could store the requested size. We should make the granurality higher for
  258. * the real use.
  259. *
  260. * Above 64MB, the functions call mmap directly. The reason
  261. * why I chose mmap over mempool is in mempool, the memory stays until the
  262. * server is shutdown even if the memory is never be requested. By using mmap,
  263. * the memory is returned to the system and it's guaranteed to shrink the
  264. * process size.
  265. *
  266. * In this implementation, it changes the behavior based on the requested
  267. * size (+ size space -- unsigned long)* :
  268. * 1B ~ 1KB: call system *alloc/free; but still it needs to store the size to
  269. * support realloc. The function needs to know if the passed address
  270. * is the real address or shifted for the size.
  271. * 1KB + 1B ~ 64MB: use mempool
  272. * 64MB + 1B ~ : call mmap
  273. */
  274. #include <sys/mman.h>
  275. static int slapi_ch_munmap_no_roundup(void **start, unsigned long len);
  276. char *slapi_ch_mmap(unsigned long len);
  277. static int counters_created= 0;
  278. PR_DEFINE_COUNTER(slapi_ch_counter_malloc);
  279. PR_DEFINE_COUNTER(slapi_ch_counter_calloc);
  280. PR_DEFINE_COUNTER(slapi_ch_counter_realloc);
  281. PR_DEFINE_COUNTER(slapi_ch_counter_strdup);
  282. PR_DEFINE_COUNTER(slapi_ch_counter_free);
  283. PR_DEFINE_COUNTER(slapi_ch_counter_created);
  284. PR_DEFINE_COUNTER(slapi_ch_counter_exist);
  285. #define OOM_PREALLOC_SIZE 65536
  286. static void *oom_emergency_area = NULL;
  287. static PRLock *oom_emergency_lock = NULL;
  288. #define SLAPD_MODULE "memory allocator"
  289. static const char* const oom_advice =
  290. "\nThe server has probably allocated all available virtual memory. To solve\n"
  291. "this problem, make more virtual memory available to your server, or reduce\n"
  292. "one or more of the following server configuration settings:\n"
  293. " nsslapd-cachesize (Database Settings - Maximum entries in cache)\n"
  294. " nsslapd-cachememsize (Database Settings - Memory available for cache)\n"
  295. " nsslapd-dbcachesize (LDBM Plug-in Settings - Maximum cache size)\n"
  296. " nsslapd-import-cachesize (LDBM Plug-in Settings - Import cache size).\n"
  297. "Can't recover; calling exit(1).\n";
  298. static void
  299. create_counters()
  300. {
  301. PR_CREATE_COUNTER(slapi_ch_counter_malloc,"slapi_ch","malloc","");
  302. PR_CREATE_COUNTER(slapi_ch_counter_calloc,"slapi_ch","calloc","");
  303. PR_CREATE_COUNTER(slapi_ch_counter_realloc,"slapi_ch","realloc","");
  304. PR_CREATE_COUNTER(slapi_ch_counter_strdup,"slapi_ch","strdup","");
  305. PR_CREATE_COUNTER(slapi_ch_counter_free,"slapi_ch","free","");
  306. PR_CREATE_COUNTER(slapi_ch_counter_created,"slapi_ch","created","");
  307. PR_CREATE_COUNTER(slapi_ch_counter_exist,"slapi_ch","exist","");
  308. /* ensure that we have space to allow for shutdown calls to malloc()
  309. * from should we run out of memory.
  310. */
  311. if (oom_emergency_area == NULL) {
  312. oom_emergency_area = malloc(OOM_PREALLOC_SIZE);
  313. }
  314. oom_emergency_lock = PR_NewLock();
  315. }
  316. static void
  317. log_negative_alloc_msg( const char *op, const char *units, unsigned long size )
  318. {
  319. slapi_log_error( SLAPI_LOG_FATAL, SLAPD_MODULE,
  320. "cannot %s %lu %s;\n"
  321. "trying to allocate 0 or a negative number of %s is not portable and\n"
  322. "gives different results on different platforms.\n",
  323. op, size, units, units );
  324. }
  325. static char *
  326. slapi_ch_malloc_core( unsigned long lsize )
  327. {
  328. char *newmem;
  329. if ( (newmem = (char *) malloc( lsize )) == NULL ) {
  330. int oserr = errno;
  331. oom_occurred();
  332. slapi_log_error( SLAPI_LOG_FATAL, SLAPD_MODULE,
  333. "malloc of %lu bytes failed; OS error %d (%s)%s\n",
  334. lsize, oserr, slapd_system_strerror( oserr ), oom_advice );
  335. exit( 1 );
  336. }
  337. *(unsigned long *)newmem = lsize;
  338. newmem += sizeof(unsigned long);
  339. return newmem;
  340. }
  341. char *
  342. slapi_ch_malloc( unsigned long size )
  343. {
  344. char *newmem;
  345. unsigned long lsize;
  346. if (size <= 0) {
  347. log_negative_alloc_msg( "malloc", "bytes", size );
  348. return 0;
  349. }
  350. lsize = size + sizeof(unsigned long);
  351. if (lsize <= 1024) {
  352. newmem = slapi_ch_malloc_core( lsize );
  353. } else if (lsize <= 67108864) {
  354. /* return 2KB ~ 64MB memory to memory pool */
  355. unsigned long roundup = 1;
  356. int n = 0;
  357. while (1) {
  358. roundup <<= 1;
  359. n++;
  360. if (roundup >= lsize) {
  361. break;
  362. }
  363. }
  364. PR_ASSERT(n >= 11 && n <= 26);
  365. newmem = (char *)mempool_get(n-11); /* 11: 2^11 = 2K */
  366. if (NULL == newmem) {
  367. newmem = slapi_ch_malloc_core( roundup );
  368. }
  369. } else {
  370. newmem = slapi_ch_mmap( size );
  371. }
  372. if(!counters_created)
  373. {
  374. create_counters();
  375. counters_created= 1;
  376. }
  377. PR_INCREMENT_COUNTER(slapi_ch_counter_malloc);
  378. PR_INCREMENT_COUNTER(slapi_ch_counter_created);
  379. PR_INCREMENT_COUNTER(slapi_ch_counter_exist);
  380. return( newmem );
  381. }
  382. static char *
  383. slapi_ch_realloc_core( char *block, unsigned long lsize )
  384. {
  385. char *realblock;
  386. char *newmem;
  387. realblock = block - sizeof(unsigned long);
  388. if ( (newmem = (char *) realloc( realblock, lsize )) == NULL ) {
  389. int oserr = errno;
  390. oom_occurred();
  391. slapi_log_error( SLAPI_LOG_FATAL, SLAPD_MODULE,
  392. "realloc of %lu bytes failed; OS error %d (%s)%s\n",
  393. lsize, oserr, slapd_system_strerror( oserr ), oom_advice );
  394. exit( 1 );
  395. }
  396. *(unsigned long *)newmem = lsize;
  397. newmem += sizeof(unsigned long);
  398. return newmem;
  399. }
  400. char *
  401. slapi_ch_realloc( char *block, unsigned long size )
  402. {
  403. char *newmem;
  404. unsigned long lsize;
  405. unsigned long origsize;
  406. char *realblock;
  407. char *realnewmem;
  408. if ( block == NULL ) {
  409. return( slapi_ch_malloc( size ) );
  410. }
  411. if (size <= 0) {
  412. log_negative_alloc_msg( "realloc", "bytes", size );
  413. return block;
  414. }
  415. lsize = size + sizeof(unsigned long);
  416. if (lsize <= 1024) {
  417. newmem = slapi_ch_realloc_core( block, lsize );
  418. } else if (lsize <= 67108864) {
  419. /* return 2KB ~ 64MB memory to memory pool */
  420. unsigned long roundup = 1;
  421. int n = 0;
  422. while (1) {
  423. roundup <<= 1;
  424. n++;
  425. if (roundup >= lsize) {
  426. break;
  427. }
  428. }
  429. PR_ASSERT(n >= 11 && n <= 26);
  430. newmem = (char *)mempool_get(n-11); /* 11: 2^11 = 2K */
  431. if (NULL == newmem) {
  432. newmem = slapi_ch_realloc_core( block, roundup );
  433. } else {
  434. realblock = block - sizeof(unsigned long);
  435. origsize = *(unsigned long *)realblock - sizeof(unsigned long);;
  436. memcpy(newmem, block, origsize);
  437. slapi_ch_free_string(&block);
  438. }
  439. } else {
  440. realblock = block - sizeof(unsigned long);
  441. origsize = *(unsigned long *)realblock - sizeof(unsigned long);;
  442. newmem = slapi_ch_mmap( size );
  443. memcpy(newmem, block, origsize);
  444. realnewmem = newmem - sizeof(unsigned long);
  445. *(unsigned long *)realnewmem = lsize;
  446. slapi_ch_free_string(&block);
  447. }
  448. if(!counters_created)
  449. {
  450. create_counters();
  451. counters_created= 1;
  452. }
  453. PR_INCREMENT_COUNTER(slapi_ch_counter_realloc);
  454. return( newmem );
  455. }
  456. static char *
  457. slapi_ch_calloc_core( unsigned long lsize )
  458. {
  459. char *newmem;
  460. if ( (newmem = (char *) calloc( 1, lsize )) == NULL ) {
  461. int oserr = errno;
  462. oom_occurred();
  463. slapi_log_error( SLAPI_LOG_FATAL, SLAPD_MODULE,
  464. "calloc of %lu bytes failed; OS error %d (%s)%s\n",
  465. lsize, oserr, slapd_system_strerror( oserr ), oom_advice );
  466. exit( 1 );
  467. }
  468. *(unsigned long *)newmem = lsize;
  469. newmem += sizeof(unsigned long);
  470. return newmem;
  471. }
  472. char *
  473. slapi_ch_calloc( unsigned long nelem, unsigned long size )
  474. {
  475. char *newmem;
  476. unsigned long lsize;
  477. if (size <= 0) {
  478. log_negative_alloc_msg( "calloc", "bytes", size );
  479. return 0;
  480. }
  481. if (nelem <= 0) {
  482. log_negative_alloc_msg( "calloc", "elements", nelem );
  483. return 0;
  484. }
  485. lsize = nelem * size + sizeof(unsigned long);
  486. if (lsize <= 1024) {
  487. newmem = slapi_ch_calloc_core( lsize );
  488. } else if (lsize <= 67108864) {
  489. /* return 2KB ~ 64MB memory to memory pool */
  490. unsigned long roundup = 1;
  491. int n = 0;
  492. while (1) {
  493. roundup <<= 1;
  494. n++;
  495. if (roundup >= lsize) {
  496. break;
  497. }
  498. }
  499. PR_ASSERT(n >= 11 && n <= 26);
  500. newmem = (char *)mempool_get(n-11); /* 11: 2^11 = 2K */
  501. if (NULL == newmem) {
  502. newmem = slapi_ch_calloc_core( roundup );
  503. } else {
  504. memset (newmem, 0, size * nelem);
  505. }
  506. } else {
  507. unsigned long mysize = size * nelem;
  508. newmem = slapi_ch_mmap( mysize );
  509. memset(newmem, 0, mysize);
  510. }
  511. if(!counters_created)
  512. {
  513. create_counters();
  514. counters_created= 1;
  515. }
  516. PR_INCREMENT_COUNTER(slapi_ch_counter_calloc);
  517. PR_INCREMENT_COUNTER(slapi_ch_counter_created);
  518. PR_INCREMENT_COUNTER(slapi_ch_counter_exist);
  519. return( newmem );
  520. }
  521. char *
  522. slapi_ch_strdup ( const char* s1 )
  523. {
  524. char* newmem;
  525. unsigned long lsize;
  526. /* strdup pukes on NULL strings...bail out now */
  527. if(NULL == s1)
  528. return NULL;
  529. lsize = strlen(s1) + sizeof(unsigned long) + 1;
  530. newmem = slapi_ch_malloc( lsize );
  531. sprintf(newmem, "%s", s1);
  532. if(!counters_created)
  533. {
  534. create_counters();
  535. counters_created= 1;
  536. }
  537. PR_INCREMENT_COUNTER(slapi_ch_counter_strdup);
  538. PR_INCREMENT_COUNTER(slapi_ch_counter_created);
  539. PR_INCREMENT_COUNTER(slapi_ch_counter_exist);
  540. return newmem;
  541. }
  542. /*
  543. * Function: slapi_ch_free
  544. *
  545. * Returns: nothing
  546. *
  547. * Description: frees the pointer, and then sets it to NULL to
  548. * prevent free-memory writes.
  549. * Note: pass in the address of the pointer you want to free.
  550. * Note: you can pass in null pointers, it's cool.
  551. *
  552. * Implementation: get the size from the size space, and determine the behavior
  553. * based upon the size:
  554. * 1B ~ 1KB: call system free
  555. * 1KB + 1B ~ 64MB: return memory to mempool
  556. * 64MB + 1B ~ : call munmap
  557. */
  558. void
  559. slapi_ch_free(void **ptr)
  560. {
  561. void *realptr;
  562. unsigned long size;
  563. if (ptr==NULL || *ptr == NULL){
  564. return;
  565. }
  566. realptr = (void *)((char *)*ptr - sizeof(unsigned long));
  567. size = *(unsigned long *)realptr;
  568. if (size <= 1024) {
  569. free (realptr);
  570. } else if (size <= 67108864) {
  571. /* return 2KB ~ 64MB memory to memory pool */
  572. unsigned long roundup = 1;
  573. int n = 0;
  574. int rc = LDAP_SUCCESS;
  575. while (1) {
  576. roundup <<= 1;
  577. n++;
  578. if (roundup >= size) {
  579. break;
  580. }
  581. }
  582. PR_ASSERT(n >= 11 && n <= 26);
  583. rc = mempool_return(n-11, *ptr, (mempool_cleanup_callback)NULL);
  584. if (LDAP_SUCCESS != rc) {
  585. free (realptr);
  586. }
  587. } else {
  588. slapi_ch_munmap_no_roundup( ptr, size );
  589. }
  590. *ptr = NULL;
  591. if(!counters_created)
  592. {
  593. create_counters();
  594. counters_created= 1;
  595. }
  596. PR_INCREMENT_COUNTER(slapi_ch_counter_free);
  597. PR_DECREMENT_COUNTER(slapi_ch_counter_exist);
  598. return;
  599. }
  600. char *
  601. slapi_ch_mmap(unsigned long len)
  602. {
  603. char *newmem;
  604. long sc_page_size = config_get_system_page_size();
  605. int sc_page_bits = config_get_system_page_bits();
  606. unsigned long roundup = (len&(sc_page_size-1))?(((len>>sc_page_bits)+1)<<sc_page_bits):len;
  607. if ( (newmem = (char *)mmap(NULL, roundup, PROT_READ | PROT_WRITE,
  608. MAP_PRIVATE | MAP_ANONYMOUS, -1, 0/*ignored */)) == MAP_FAILED ) {
  609. int oserr = errno;
  610. oom_occurred();
  611. slapi_log_error( SLAPI_LOG_FATAL, SLAPD_MODULE,
  612. "mmap of %lu bytes failed; OS error %d (%s)%s\n",
  613. roundup, oserr, slapd_system_strerror( oserr ), oom_advice );
  614. exit( 1 );
  615. }
  616. *(unsigned long *)newmem = roundup;
  617. newmem += sizeof(unsigned long);
  618. return( newmem );
  619. }
  620. int
  621. slapi_ch_munmap(void **start, unsigned long len)
  622. {
  623. long sc_page_size = config_get_system_page_size();
  624. int sc_page_bits = config_get_system_page_bits();
  625. unsigned long roundup = (len&(sc_page_size-1))?(((len>>sc_page_bits)+1)<<sc_page_bits):len;
  626. void *realstart = *start - sizeof(unsigned long);
  627. int rc = munmap(realstart, roundup);
  628. if (0 != rc) {
  629. int oserr = errno;
  630. slapi_log_error( SLAPI_LOG_FATAL, SLAPD_MODULE,
  631. "munmap of %lu bytes failed; OS error %d (%s)\n",
  632. roundup, oserr, slapd_system_strerror( oserr ) );
  633. /* Leaked. This should not happen */
  634. }
  635. *start = NULL;
  636. return rc;
  637. }
  638. static char *
  639. slapi_ch_mmap_no_roundup( unsigned long size)
  640. {
  641. char *newmem;
  642. unsigned long mysize;
  643. if ( (newmem = (char *)mmap(NULL, size + sizeof(unsigned long),
  644. PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS,
  645. -1, 0/*ignored */)) == MAP_FAILED ) {
  646. int oserr = errno;
  647. oom_occurred();
  648. slapi_log_error( SLAPI_LOG_FATAL, SLAPD_MODULE,
  649. "mmap of %lu bytes failed; OS error %d (%s)%s\n",
  650. size + sizeof(unsigned long), oserr,
  651. slapd_system_strerror( oserr ), oom_advice );
  652. exit( 1 );
  653. }
  654. *(unsigned long *)newmem = size;
  655. newmem += sizeof(unsigned long);
  656. return newmem;
  657. }
  658. static int
  659. slapi_ch_munmap_no_roundup(void **start, unsigned long len)
  660. {
  661. void *realstart = *start - sizeof(unsigned long);
  662. int reallen = len + sizeof(unsigned long);
  663. int rc = munmap(realstart, reallen);
  664. if (0 != rc) {
  665. int oserr = errno;
  666. slapi_log_error( SLAPI_LOG_FATAL, SLAPD_MODULE,
  667. "munmap of %lu bytes failed; OS error %d (%s)\n",
  668. len, oserr, slapd_system_strerror( oserr ) );
  669. /* Leaked. This should not happen */
  670. }
  671. *start = NULL;
  672. return rc;
  673. }
  674. /*
  675. This function is just like PR_smprintf. It works like sprintf
  676. except that it allocates enough memory to hold the result
  677. string and returns that allocated memory to the caller. The
  678. caller must use slapi_ch_free_string to free the memory.
  679. It should only be used in those situations that will eventually free
  680. the memory using slapi_ch_free_string e.g. allocating a string
  681. that will be freed as part of pblock cleanup, or passed in to create
  682. a Slapi_DN, or things of that nature. If you have control of the
  683. flow such that the memory will be allocated and freed in the same
  684. scope, better to just use PR_smprintf and PR_smprintf_free instead
  685. because it is likely faster.
  686. */
  687. /*
  688. This implementation is the same as PR_smprintf.
  689. The above comment does not apply to this function for now.
  690. see [150809] for more details.
  691. WARNING - with this fix, this means we are now mixing PR_Malloc with
  692. slapi_ch_free. Which is ok for now - they both use malloc/free from
  693. the operating system. But if this changes in the future, this
  694. function will have to change as well.
  695. */
  696. char *
  697. slapi_ch_smprintf(const char *fmt, ...)
  698. {
  699. char *p = NULL, *q = NULL;
  700. va_list ap;
  701. if (NULL == fmt) {
  702. return NULL;
  703. }
  704. va_start(ap, fmt);
  705. p = PR_vsmprintf(fmt, ap);
  706. va_end(ap);
  707. q = slapi_ch_strdup (p); /* ugly ...; hope there's any better way */
  708. free(p);
  709. return q;
  710. }
  711. #endif /* MEMPOOL_EXPERIMENTAL */