slapi_counter.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522
  1. /** BEGIN COPYRIGHT BLOCK
  2. * This Program is free software; you can redistribute it and/or modify it under
  3. * the terms of the GNU General Public License as published by the Free Software
  4. * Foundation; version 2 of the License.
  5. *
  6. * This Program is distributed in the hope that it will be useful, but WITHOUT
  7. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
  8. * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
  9. *
  10. * You should have received a copy of the GNU General Public License along with
  11. * this Program; if not, write to the Free Software Foundation, Inc., 59 Temple
  12. * Place, Suite 330, Boston, MA 02111-1307 USA.
  13. *
  14. * In addition, as a special exception, Red Hat, Inc. gives You the additional
  15. * right to link the code of this Program with code not covered under the GNU
  16. * General Public License ("Non-GPL Code") and to distribute linked combinations
  17. * including the two, subject to the limitations in this paragraph. Non-GPL Code
  18. * permitted under this exception must only link to the code of this Program
  19. * through those well defined interfaces identified in the file named EXCEPTION
  20. * found in the source code files (the "Approved Interfaces"). The files of
  21. * Non-GPL Code may instantiate templates or use macros or inline functions from
  22. * the Approved Interfaces without causing the resulting work to be covered by
  23. * the GNU General Public License. Only Red Hat, Inc. may make changes or
  24. * additions to the list of Approved Interfaces. You must obey the GNU General
  25. * Public License in all respects for all of the Program code and other code used
  26. * in conjunction with the Program except the Non-GPL Code covered by this
  27. * exception. If you modify this file, you may extend this exception to your
  28. * version of the file, but you are not obligated to do so. If you do not wish to
  29. * provide this exception without modification, you must delete this exception
  30. * statement from your version and license this file solely under the GPL without
  31. * exception.
  32. *
  33. *
  34. * Copyright (C) 2008 Red Hat, Inc.
  35. * All rights reserved.
  36. * END COPYRIGHT BLOCK **/
  37. #ifdef HAVE_CONFIG_H
  38. # include <config.h>
  39. #endif
  40. #include "slap.h"
  41. #ifdef SOLARIS
  42. PRUint64 _sparcv9_AtomicSet_il(PRUint64 *address, PRUint64 newval);
  43. PRUint64 _sparcv9_AtomicAdd_il(PRUint64 *address, PRUint64 val);
  44. PRUint64 _sparcv9_AtomicSub_il(PRUint64 *address, PRUint64 val);
  45. #endif
  46. #ifdef HPUX
  47. #ifdef ATOMIC_64BIT_OPERATIONS
  48. #include <machine/sys/inline.h>
  49. #endif
  50. #endif
  51. #if defined LINUX && (defined CPU_x86 || !HAVE_DECL___SYNC_ADD_AND_FETCH)
  52. /* On systems that don't have the 64-bit GCC atomic builtins, we need to
  53. * implement our own atomic functions using inline assembly code. */
  54. PRUint64 __sync_add_and_fetch_8(PRUint64 *ptr, PRUint64 addval);
  55. PRUint64 __sync_sub_and_fetch_8(PRUint64 *ptr, PRUint64 subval);
  56. #endif
  57. #if defined LINUX && !HAVE_DECL___SYNC_ADD_AND_FETCH
  58. /* Systems that have the atomic builtins defined, but don't have
  59. * implementations for 64-bit values will automatically try to
  60. * call the __sync_*_8 versions we provide. If the atomic builtins
  61. * are not defined at all, we define them here to use our local
  62. * functions. */
  63. #define __sync_add_and_fetch __sync_add_and_fetch_8
  64. #define __sync_sub_and_fetch __sync_sub_and_fetch_8
  65. #endif
  66. /*
  67. * Counter Structure
  68. */
  69. typedef struct slapi_counter {
  70. PRUint64 value;
  71. #ifndef ATOMIC_64BIT_OPERATIONS
  72. Slapi_Mutex *lock;
  73. #endif
  74. } slapi_counter;
  75. /*
  76. * slapi_counter_new()
  77. *
  78. * Allocates and initializes a new Slapi_Counter.
  79. */
  80. Slapi_Counter *slapi_counter_new()
  81. {
  82. Slapi_Counter *counter = NULL;
  83. counter = (Slapi_Counter *)slapi_ch_calloc(1, sizeof(Slapi_Counter));
  84. if (counter != NULL) {
  85. slapi_counter_init(counter);
  86. }
  87. return counter;
  88. }
  89. /*
  90. * slapi_counter_init()
  91. *
  92. * Initializes a Slapi_Counter.
  93. */
  94. void slapi_counter_init(Slapi_Counter *counter)
  95. {
  96. if (counter != NULL) {
  97. #ifndef ATOMIC_64BIT_OPERATIONS
  98. /* Create the lock if necessary. */
  99. if (counter->lock == NULL) {
  100. counter->lock = slapi_new_mutex();
  101. }
  102. #endif
  103. /* Set the value to 0. */
  104. slapi_counter_set_value(counter, 0);
  105. }
  106. }
  107. /*
  108. * slapi_counter_destroy()
  109. *
  110. * Destroy's a Slapi_Counter and sets the
  111. * pointer to NULL to prevent reuse.
  112. */
  113. void slapi_counter_destroy(Slapi_Counter **counter)
  114. {
  115. if ((counter != NULL) && (*counter != NULL)) {
  116. #ifndef ATOMIC_64BIT_OPERATIONS
  117. slapi_destroy_mutex((*counter)->lock);
  118. #endif
  119. slapi_ch_free((void **)counter);
  120. }
  121. }
  122. /*
  123. * slapi_counter_increment()
  124. *
  125. * Atomically increments a Slapi_Counter.
  126. */
  127. PRUint64 slapi_counter_increment(Slapi_Counter *counter)
  128. {
  129. return slapi_counter_add(counter, 1);
  130. }
  131. /*
  132. * slapi_counter_decrement()
  133. *
  134. * Atomically decrements a Slapi_Counter. Note
  135. * that this will not prevent you from wrapping
  136. * around 0.
  137. */
  138. PRUint64 slapi_counter_decrement(Slapi_Counter *counter)
  139. {
  140. return slapi_counter_subtract(counter, 1);
  141. }
  142. /*
  143. * slapi_counter_add()
  144. *
  145. * Atomically add a value to a Slapi_Counter.
  146. */
  147. PRUint64 slapi_counter_add(Slapi_Counter *counter, PRUint64 addvalue)
  148. {
  149. PRUint64 newvalue = 0;
  150. #ifdef HPUX
  151. PRUint64 prev = 0;
  152. #endif
  153. if (counter == NULL) {
  154. return newvalue;
  155. }
  156. #ifndef ATOMIC_64BIT_OPERATIONS
  157. slapi_lock_mutex(counter->lock);
  158. counter->value += addvalue;
  159. newvalue = counter->value;
  160. slapi_unlock_mutex(counter->lock);
  161. #else
  162. #ifdef LINUX
  163. newvalue = __sync_add_and_fetch(&(counter->value), addvalue);
  164. #elif defined(SOLARIS)
  165. newvalue = _sparcv9_AtomicAdd(&(counter->value), addvalue);
  166. #elif defined(HPUX)
  167. /* fetchadd only works with values of 1, 4, 8, and 16. In addition, it requires
  168. * it's argument to be an integer constant. */
  169. if (addvalue == 1) {
  170. newvalue = _Asm_fetchadd(_FASZ_D, _SEM_ACQ, &(counter->value), 1, _LDHINT_NONE);
  171. newvalue += 1;
  172. } else if (addvalue == 4) {
  173. newvalue = _Asm_fetchadd(_FASZ_D, _SEM_ACQ, &(counter->value), 4, _LDHINT_NONE);
  174. newvalue += 4;
  175. } else if (addvalue == 8) {
  176. newvalue = _Asm_fetchadd(_FASZ_D, _SEM_ACQ, &(counter->value), 8, _LDHINT_NONE);
  177. newvalue += 8;
  178. } else if (addvalue == 16) {
  179. newvalue = _Asm_fetchadd(_FASZ_D, _SEM_ACQ, &(counter->value), 16, _LDHINT_NONE);
  180. newvalue += 16;
  181. } else {
  182. /* For other values, we have to use cmpxchg. */
  183. do {
  184. prev = slapi_counter_get_value(counter);
  185. newvalue = prev + addvalue;
  186. /* Put prev in a register for cmpxchg to compare against */
  187. _Asm_mov_to_ar(_AREG_CCV, prev);
  188. } while (prev != _Asm_cmpxchg(_FASZ_D, _SEM_ACQ, &(counter->value), newvalue, _LDHINT_NONE));
  189. }
  190. #endif
  191. #endif /* ATOMIC_64BIT_OPERATIONS */
  192. return newvalue;
  193. }
  194. /*
  195. * slapi_counter_subtract()
  196. *
  197. * Atomically subtract a value from a Slapi_Counter. Note
  198. * that this will not prevent you from wrapping around 0.
  199. */
  200. PRUint64 slapi_counter_subtract(Slapi_Counter *counter, PRUint64 subvalue)
  201. {
  202. PRUint64 newvalue = 0;
  203. #ifdef HPUX
  204. PRUint64 prev = 0;
  205. #endif
  206. if (counter == NULL) {
  207. return newvalue;
  208. }
  209. #ifndef ATOMIC_64BIT_OPERATIONS
  210. slapi_lock_mutex(counter->lock);
  211. counter->value -= subvalue;
  212. newvalue = counter->value;
  213. slapi_unlock_mutex(counter->lock);
  214. #else
  215. #ifdef LINUX
  216. newvalue = __sync_sub_and_fetch(&(counter->value), subvalue);
  217. #elif defined(SOLARIS)
  218. newvalue = _sparcv9_AtomicSub(&(counter->value), subvalue);
  219. #elif defined(HPUX)
  220. /* fetchadd only works with values of -1, -4, -8, and -16. In addition, it requires
  221. * it's argument to be an integer constant. */
  222. if (subvalue == 1) {
  223. newvalue = _Asm_fetchadd(_FASZ_D, _SEM_ACQ, &(counter->value), -1, _LDHINT_NONE);
  224. newvalue -= 1;
  225. } else if (subvalue == 4) {
  226. newvalue = _Asm_fetchadd(_FASZ_D, _SEM_ACQ, &(counter->value), -4, _LDHINT_NONE);
  227. newvalue -= 4;
  228. } else if (subvalue == 8) {
  229. newvalue = _Asm_fetchadd(_FASZ_D, _SEM_ACQ, &(counter->value), -8, _LDHINT_NONE);
  230. newvalue -= 8;
  231. } else if (subvalue == 16) {
  232. newvalue = _Asm_fetchadd(_FASZ_D, _SEM_ACQ, &(counter->value), -16, _LDHINT_NONE);
  233. newvalue -= 16;
  234. } else {
  235. /* For other values, we have to use cmpxchg. */
  236. do {
  237. prev = slapi_counter_get_value(counter);
  238. newvalue = prev - subvalue;
  239. /* Put prev in a register for cmpxchg to compare against */
  240. _Asm_mov_to_ar(_AREG_CCV, prev);
  241. } while (prev != _Asm_cmpxchg(_FASZ_D, _SEM_ACQ, &(counter->value), newvalue, _LDHINT_NONE));
  242. }
  243. #endif
  244. #endif /* ATOMIC_64BIT_OPERATIONS */
  245. return newvalue;
  246. }
  247. /*
  248. * slapi_counter_set_value()
  249. *
  250. * Atomically sets the value of a Slapi_Counter.
  251. */
  252. PRUint64 slapi_counter_set_value(Slapi_Counter *counter, PRUint64 newvalue)
  253. {
  254. PRUint64 value = 0;
  255. if (counter == NULL) {
  256. return value;
  257. }
  258. #ifndef ATOMIC_64BIT_OPERATIONS
  259. slapi_lock_mutex(counter->lock);
  260. counter->value = newvalue;
  261. slapi_unlock_mutex(counter->lock);
  262. return newvalue;
  263. #else
  264. #ifdef LINUX
  265. /* Use our own inline assembly for an atomic set if
  266. * the builtins aren't available. */
  267. #if defined CPU_x86 || !HAVE_DECL___SYNC_ADD_AND_FETCH
  268. /*
  269. * %0 = counter->value
  270. * %1 = newvalue
  271. */
  272. __asm__ __volatile__(
  273. #ifdef CPU_x86
  274. /* Save the PIC register */
  275. " pushl %%ebx;"
  276. #endif /* CPU_x86 */
  277. /* Put value of counter->value in EDX:EAX */
  278. "retryset: movl %0, %%eax;"
  279. " movl 4%0, %%edx;"
  280. /* Put newval in ECX:EBX */
  281. " movl %1, %%ebx;"
  282. " movl 4+%1, %%ecx;"
  283. /* If EDX:EAX and counter-> are the same,
  284. * replace *ptr with ECX:EBX */
  285. " lock; cmpxchg8b %0;"
  286. " jnz retryset;"
  287. #ifdef CPU_x86
  288. /* Restore the PIC register */
  289. " popl %%ebx"
  290. #endif /* CPU_x86 */
  291. : "+o" (counter->value)
  292. : "m" (newvalue)
  293. #ifdef CPU_x86
  294. : "memory", "eax", "ecx", "edx", "cc");
  295. #else
  296. : "memory", "eax", "ebx", "ecx", "edx", "cc");
  297. #endif
  298. return newvalue;
  299. #else
  300. while (1) {
  301. value = counter->value;
  302. if (__sync_bool_compare_and_swap(&(counter->value), value, newvalue)) {
  303. return newvalue;
  304. }
  305. }
  306. #endif /* CPU_x86 || !HAVE_DECL___SYNC_ADD_AND_FETCH */
  307. #elif defined(SOLARIS)
  308. _sparcv9_AtomicSet(&(counter->value), newvalue);
  309. return newvalue;
  310. #elif defined(HPUX)
  311. do {
  312. value = counter->value;
  313. /* Put value in a register for cmpxchg to compare against */
  314. _Asm_mov_to_ar(_AREG_CCV, value);
  315. } while (value != _Asm_cmpxchg(_FASZ_D, _SEM_ACQ, &(counter->value), newvalue, _LDHINT_NONE));
  316. return newvalue;
  317. #endif
  318. #endif /* ATOMIC_64BIT_OPERATIONS */
  319. }
  320. /*
  321. * slapi_counter_get_value()
  322. *
  323. * Returns the value of a Slapi_Counter.
  324. */
  325. PRUint64 slapi_counter_get_value(Slapi_Counter *counter)
  326. {
  327. PRUint64 value = 0;
  328. if (counter == NULL) {
  329. return value;
  330. }
  331. #ifndef ATOMIC_64BIT_OPERATIONS
  332. slapi_lock_mutex(counter->lock);
  333. value = counter->value;
  334. slapi_unlock_mutex(counter->lock);
  335. #else
  336. #ifdef LINUX
  337. /* Use our own inline assembly for an atomic get if
  338. * the builtins aren't available. */
  339. #if defined CPU_x86 || !HAVE_DECL___SYNC_ADD_AND_FETCH
  340. /*
  341. * %0 = counter->value
  342. * %1 = value
  343. */
  344. __asm__ __volatile__(
  345. #ifdef CPU_x86
  346. /* Save the PIC register */
  347. " pushl %%ebx;"
  348. #endif /* CPU_x86 */
  349. /* Put value of counter->value in EDX:EAX */
  350. "retryget: movl %0, %%eax;"
  351. " movl 4%0, %%edx;"
  352. /* Copy EDX:EAX to ECX:EBX */
  353. " movl %%eax, %%ebx;"
  354. " movl %%edx, %%ecx;"
  355. /* If EDX:EAX and counter->value are the same,
  356. * replace *ptr with ECX:EBX */
  357. " lock; cmpxchg8b %0;"
  358. " jnz retryget;"
  359. /* Put retreived value into value */
  360. " movl %%ebx, %1;"
  361. " movl %%ecx, 4%1;"
  362. #ifdef CPU_x86
  363. /* Restore the PIC register */
  364. " popl %%ebx"
  365. #endif /* CPU_x86 */
  366. : "+o" (counter->value), "=m" (value)
  367. :
  368. #ifdef CPU_x86
  369. : "memory", "eax", "ecx", "edx", "cc");
  370. #else
  371. : "memory", "eax", "ebx", "ecx", "edx", "cc");
  372. #endif
  373. #else
  374. while (1) {
  375. value = counter->value;
  376. if (__sync_bool_compare_and_swap(&(counter->value), value, value)) {
  377. break;
  378. }
  379. }
  380. #endif /* CPU_x86 || !HAVE_DECL___SYNC_ADD_AND_FETCH */
  381. #elif defined(SOLARIS)
  382. while (1) {
  383. value = counter->value;
  384. if (value == _sparcv9_AtomicSet(&(counter->value), value)) {
  385. break;
  386. }
  387. }
  388. #elif defined(HPUX)
  389. do {
  390. value = counter->value;
  391. /* Put value in a register for cmpxchg to compare against */
  392. _Asm_mov_to_ar(_AREG_CCV, value);
  393. } while (value != _Asm_cmpxchg(_FASZ_D, _SEM_ACQ, &(counter->value), value, _LDHINT_NONE));
  394. #endif
  395. #endif /* ATOMIC_64BIT_OPERATIONS */
  396. return value;
  397. }
  398. #if defined LINUX && (defined CPU_x86 || !HAVE_DECL___SYNC_ADD_AND_FETCH)
  399. /* On systems that don't have the 64-bit GCC atomic builtins, we need to
  400. * implement our own atomic add and subtract functions using inline
  401. * assembly code. */
  402. PRUint64 __sync_add_and_fetch_8(PRUint64 *ptr, PRUint64 addval)
  403. {
  404. PRUint64 retval = 0;
  405. /*
  406. * %0 = *ptr
  407. * %1 = retval
  408. * %2 = addval
  409. */
  410. __asm__ __volatile__(
  411. #ifdef CPU_x86
  412. /* Save the PIC register */
  413. " pushl %%ebx;"
  414. #endif /* CPU_x86 */
  415. /* Put value of *ptr in EDX:EAX */
  416. "retryadd: movl %0, %%eax;"
  417. " movl 4%0, %%edx;"
  418. /* Put addval in ECX:EBX */
  419. " movl %2, %%ebx;"
  420. " movl 4+%2, %%ecx;"
  421. /* Add value from EDX:EAX to value in ECX:EBX */
  422. " addl %%eax, %%ebx;"
  423. " adcl %%edx, %%ecx;"
  424. /* If EDX:EAX and *ptr are the same, replace ptr with ECX:EBX */
  425. " lock; cmpxchg8b %0;"
  426. " jnz retryadd;"
  427. /* Put new value into retval */
  428. " movl %%ebx, %1;"
  429. " movl %%ecx, 4%1;"
  430. #ifdef CPU_x86
  431. /* Restore the PIC register */
  432. " popl %%ebx"
  433. #endif /* CPU_x86 */
  434. : "+o" (*ptr), "=m" (retval)
  435. : "m" (addval)
  436. #ifdef CPU_x86
  437. : "memory", "eax", "ecx", "edx", "cc");
  438. #else
  439. : "memory", "eax", "ebx", "ecx", "edx", "cc");
  440. #endif
  441. return retval;
  442. }
  443. PRUint64 __sync_sub_and_fetch_8(PRUint64 *ptr, PRUint64 subval)
  444. {
  445. PRUint64 retval = 0;
  446. /*
  447. * %0 = *ptr
  448. * %1 = retval
  449. * %2 = subval
  450. */
  451. __asm__ __volatile__(
  452. #ifdef CPU_x86
  453. /* Save the PIC register */
  454. " pushl %%ebx;"
  455. #endif /* CPU_x86 */
  456. /* Put value of *ptr in EDX:EAX */
  457. "retrysub: movl %0, %%eax;"
  458. " movl 4%0, %%edx;"
  459. /* Copy EDX:EAX to ECX:EBX */
  460. " movl %%eax, %%ebx;"
  461. " movl %%edx, %%ecx;"
  462. /* Subtract subval from value in ECX:EBX */
  463. " subl %2, %%ebx;"
  464. " sbbl 4+%2, %%ecx;"
  465. /* If EDX:EAX and ptr are the same, replace *ptr with ECX:EBX */
  466. " lock; cmpxchg8b %0;"
  467. " jnz retrysub;"
  468. /* Put new value into retval */
  469. " movl %%ebx, %1;"
  470. " movl %%ecx, 4%1;"
  471. #ifdef CPU_x86
  472. /* Restore the PIC register */
  473. " popl %%ebx"
  474. #endif /* CPU_x86 */
  475. : "+o" (*ptr), "=m" (retval)
  476. : "m" (subval)
  477. #ifdef CPU_x86
  478. : "memory", "eax", "ecx", "edx", "cc");
  479. #else
  480. : "memory", "eax", "ebx", "ecx", "edx", "cc");
  481. #endif
  482. return retval;
  483. }
  484. #endif /* LINUX && (defined CPU_x86 || !HAVE_DECL___SYNC_ADD_AND_FETCH) */