rwlock8.c 4.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205
  1. /*
  2. * rwlock8.c
  3. *
  4. * Hammer on a bunch of rwlocks to test robustness and fairness.
  5. * Printed stats should be roughly even for each thread.
  6. *
  7. * Yield during each access to exercise lock contention code paths
  8. * more than rwlock7.c does (particularly on uni-processor systems).
  9. */
  10. #include "test.h"
  11. #include <sys/timeb.h>
  12. #ifdef __GNUC__
  13. #include <stdlib.h>
  14. #endif
  15. #define THREADS 5
  16. #define DATASIZE 7
  17. #define ITERATIONS 100000
  18. /*
  19. * Keep statistics for each thread.
  20. */
  21. typedef struct thread_tag {
  22. int thread_num;
  23. pthread_t thread_id;
  24. int updates;
  25. int reads;
  26. int changed;
  27. int seed;
  28. } thread_t;
  29. /*
  30. * Read-write lock and shared data
  31. */
  32. typedef struct data_tag {
  33. pthread_rwlock_t lock;
  34. int data;
  35. int updates;
  36. } data_t;
  37. static thread_t threads[THREADS];
  38. static data_t data[DATASIZE];
  39. /*
  40. * Thread start routine that uses read-write locks
  41. */
  42. void *thread_routine (void *arg)
  43. {
  44. thread_t *self = (thread_t*)arg;
  45. int iteration;
  46. int element = 0;
  47. int seed = self->seed;
  48. int interval = 1 + rand_r (&seed) % 71;
  49. self->changed = 0;
  50. for (iteration = 0; iteration < ITERATIONS; iteration++)
  51. {
  52. if (iteration % (ITERATIONS / 10) == 0)
  53. {
  54. putchar('.');
  55. fflush(stdout);
  56. }
  57. /*
  58. * Each "self->interval" iterations, perform an
  59. * update operation (write lock instead of read
  60. * lock).
  61. */
  62. if ((iteration % interval) == 0)
  63. {
  64. assert(pthread_rwlock_wrlock (&data[element].lock) == 0);
  65. data[element].data = self->thread_num;
  66. data[element].updates++;
  67. self->updates++;
  68. interval = 1 + rand_r (&seed) % 71;
  69. sched_yield();
  70. assert(pthread_rwlock_unlock (&data[element].lock) == 0);
  71. } else {
  72. /*
  73. * Look at the current data element to see whether
  74. * the current thread last updated it. Count the
  75. * times, to report later.
  76. */
  77. assert(pthread_rwlock_rdlock (&data[element].lock) == 0);
  78. self->reads++;
  79. if (data[element].data != self->thread_num)
  80. {
  81. self->changed++;
  82. interval = 1 + self->changed % 71;
  83. }
  84. sched_yield();
  85. assert(pthread_rwlock_unlock (&data[element].lock) == 0);
  86. }
  87. element = (element + 1) % DATASIZE;
  88. }
  89. return NULL;
  90. }
  91. int
  92. main (int argc, char *argv[])
  93. {
  94. int count;
  95. int data_count;
  96. int thread_updates = 0;
  97. int data_updates = 0;
  98. int seed = 1;
  99. PTW32_STRUCT_TIMEB currSysTime1;
  100. PTW32_STRUCT_TIMEB currSysTime2;
  101. /*
  102. * Initialize the shared data.
  103. */
  104. for (data_count = 0; data_count < DATASIZE; data_count++)
  105. {
  106. data[data_count].data = 0;
  107. data[data_count].updates = 0;
  108. assert(pthread_rwlock_init (&data[data_count].lock, NULL) == 0);
  109. }
  110. PTW32_FTIME(&currSysTime1);
  111. /*
  112. * Create THREADS threads to access shared data.
  113. */
  114. for (count = 0; count < THREADS; count++)
  115. {
  116. threads[count].thread_num = count;
  117. threads[count].updates = 0;
  118. threads[count].reads = 0;
  119. threads[count].seed = 1 + rand_r (&seed) % 71;
  120. assert(pthread_create (&threads[count].thread_id,
  121. NULL, thread_routine, (void*)(size_t)&threads[count]) == 0);
  122. }
  123. /*
  124. * Wait for all threads to complete, and collect
  125. * statistics.
  126. */
  127. for (count = 0; count < THREADS; count++)
  128. {
  129. assert(pthread_join (threads[count].thread_id, NULL) == 0);
  130. }
  131. putchar('\n');
  132. fflush(stdout);
  133. for (count = 0; count < THREADS; count++)
  134. {
  135. if (threads[count].changed > 0)
  136. {
  137. printf ("Thread %d found changed elements %d times\n",
  138. count, threads[count].changed);
  139. }
  140. }
  141. putchar('\n');
  142. fflush(stdout);
  143. for (count = 0; count < THREADS; count++)
  144. {
  145. thread_updates += threads[count].updates;
  146. printf ("%02d: seed %d, updates %d, reads %d\n",
  147. count, threads[count].seed,
  148. threads[count].updates, threads[count].reads);
  149. }
  150. putchar('\n');
  151. fflush(stdout);
  152. /*
  153. * Collect statistics for the data.
  154. */
  155. for (data_count = 0; data_count < DATASIZE; data_count++)
  156. {
  157. data_updates += data[data_count].updates;
  158. printf ("data %02d: value %d, %d updates\n",
  159. data_count, data[data_count].data, data[data_count].updates);
  160. assert(pthread_rwlock_destroy (&data[data_count].lock) == 0);
  161. }
  162. printf ("%d thread updates, %d data updates\n",
  163. thread_updates, data_updates);
  164. PTW32_FTIME(&currSysTime2);
  165. printf( "\nstart: %ld/%d, stop: %ld/%d, duration:%ld\n",
  166. (long)currSysTime1.time,currSysTime1.millitm,
  167. (long)currSysTime2.time,currSysTime2.millitm,
  168. ((long)((currSysTime2.time*1000+currSysTime2.millitm) -
  169. (currSysTime1.time*1000+currSysTime1.millitm))));
  170. return 0;
  171. }