profiler.c 27 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117
  1. #include <inttypes.h>
  2. #include "profiler.h"
  3. #include "darray.h"
  4. #include "dstr.h"
  5. #include "platform.h"
  6. #include "threading.h"
  7. #include <math.h>
  8. #include <zlib.h>
  9. //#define TRACK_OVERHEAD
  10. struct profiler_snapshot {
  11. DARRAY(profiler_snapshot_entry_t) roots;
  12. };
  13. struct profiler_snapshot_entry {
  14. const char *name;
  15. profiler_time_entries_t times;
  16. uint64_t min_time;
  17. uint64_t max_time;
  18. uint64_t overall_count;
  19. profiler_time_entries_t times_between_calls;
  20. uint64_t expected_time_between_calls;
  21. uint64_t min_time_between_calls;
  22. uint64_t max_time_between_calls;
  23. uint64_t overall_between_calls_count;
  24. DARRAY(profiler_snapshot_entry_t) children;
  25. };
  26. typedef struct profiler_time_entry profiler_time_entry;
  27. typedef struct profile_call profile_call;
  28. struct profile_call {
  29. const char *name;
  30. #ifdef TRACK_OVERHEAD
  31. uint64_t overhead_start;
  32. #endif
  33. uint64_t start_time;
  34. uint64_t end_time;
  35. #ifdef TRACK_OVERHEAD
  36. uint64_t overhead_end;
  37. #endif
  38. uint64_t expected_time_between_calls;
  39. DARRAY(profile_call) children;
  40. profile_call *parent;
  41. };
  42. typedef struct profile_times_table_entry profile_times_table_entry;
  43. struct profile_times_table_entry {
  44. size_t probes;
  45. profiler_time_entry entry;
  46. };
  47. typedef struct profile_times_table profile_times_table;
  48. struct profile_times_table {
  49. size_t size;
  50. size_t occupied;
  51. size_t max_probe_count;
  52. profile_times_table_entry *entries;
  53. size_t old_start_index;
  54. size_t old_occupied;
  55. profile_times_table_entry *old_entries;
  56. };
  57. typedef struct profile_entry profile_entry;
  58. struct profile_entry {
  59. const char *name;
  60. profile_times_table times;
  61. #ifdef TRACK_OVERHEAD
  62. profile_times_table overhead;
  63. #endif
  64. uint64_t expected_time_between_calls;
  65. profile_times_table times_between_calls;
  66. DARRAY(profile_entry) children;
  67. };
  68. typedef struct profile_root_entry profile_root_entry;
  69. struct profile_root_entry {
  70. pthread_mutex_t *mutex;
  71. const char *name;
  72. profile_entry *entry;
  73. profile_call *prev_call;
  74. };
  75. static inline uint64_t diff_ns_to_usec(uint64_t prev, uint64_t next)
  76. {
  77. return (next - prev + 500) / 1000;
  78. }
  79. static inline void update_max_probes(profile_times_table *map, size_t val)
  80. {
  81. map->max_probe_count = map->max_probe_count < val ? val : map->max_probe_count;
  82. }
  83. static void migrate_old_entries(profile_times_table *map, bool limit_items);
  84. static void grow_hashmap(profile_times_table *map, uint64_t usec, uint64_t count);
  85. static void add_hashmap_entry(profile_times_table *map, uint64_t usec, uint64_t count)
  86. {
  87. size_t probes = 1;
  88. size_t start = usec % map->size;
  89. for (;; probes += 1) {
  90. size_t idx = (start + probes) % map->size;
  91. profile_times_table_entry *entry = &map->entries[idx];
  92. if (!entry->probes) {
  93. entry->probes = probes;
  94. entry->entry.time_delta = usec;
  95. entry->entry.count = count;
  96. map->occupied += 1;
  97. update_max_probes(map, probes);
  98. return;
  99. }
  100. if (entry->entry.time_delta == usec) {
  101. entry->entry.count += count;
  102. return;
  103. }
  104. if (entry->probes >= probes)
  105. continue;
  106. if (map->occupied / (double)map->size > 0.7) {
  107. grow_hashmap(map, usec, count);
  108. return;
  109. }
  110. size_t old_probes = entry->probes;
  111. uint64_t old_count = entry->entry.count;
  112. uint64_t old_usec = entry->entry.time_delta;
  113. entry->probes = probes;
  114. entry->entry.count = count;
  115. entry->entry.time_delta = usec;
  116. update_max_probes(map, probes);
  117. probes = old_probes;
  118. count = old_count;
  119. usec = old_usec;
  120. start = usec % map->size;
  121. }
  122. }
  123. static void init_hashmap(profile_times_table *map, size_t size)
  124. {
  125. map->size = size;
  126. map->occupied = 0;
  127. map->max_probe_count = 0;
  128. map->entries = bzalloc(sizeof(profile_times_table_entry) * size);
  129. map->old_start_index = 0;
  130. map->old_occupied = 0;
  131. map->old_entries = NULL;
  132. }
  133. static void migrate_old_entries(profile_times_table *map, bool limit_items)
  134. {
  135. if (!map->old_entries)
  136. return;
  137. if (!map->old_occupied) {
  138. bfree(map->old_entries);
  139. map->old_entries = NULL;
  140. return;
  141. }
  142. for (size_t i = 0; !limit_items || i < 8; i++, map->old_start_index++) {
  143. if (!map->old_occupied)
  144. return;
  145. profile_times_table_entry *entry = &map->old_entries[map->old_start_index];
  146. if (!entry->probes)
  147. continue;
  148. add_hashmap_entry(map, entry->entry.time_delta, entry->entry.count);
  149. map->old_occupied -= 1;
  150. }
  151. }
  152. static void grow_hashmap(profile_times_table *map, uint64_t usec, uint64_t count)
  153. {
  154. migrate_old_entries(map, false);
  155. size_t old_size = map->size;
  156. size_t old_occupied = map->occupied;
  157. profile_times_table_entry *entries = map->entries;
  158. init_hashmap(map, (old_size * 2 < 16) ? 16 : (old_size * 2));
  159. map->old_occupied = old_occupied;
  160. map->old_entries = entries;
  161. add_hashmap_entry(map, usec, count);
  162. }
  163. static profile_entry *init_entry(profile_entry *entry, const char *name)
  164. {
  165. entry->name = name;
  166. init_hashmap(&entry->times, 1);
  167. #ifdef TRACK_OVERHEAD
  168. init_hashmap(&entry->overhead, 1);
  169. #endif
  170. entry->expected_time_between_calls = 0;
  171. init_hashmap(&entry->times_between_calls, 1);
  172. return entry;
  173. }
  174. static profile_entry *get_child(profile_entry *parent, const char *name)
  175. {
  176. const size_t num = parent->children.num;
  177. for (size_t i = 0; i < num; i++) {
  178. profile_entry *child = &parent->children.array[i];
  179. if (child->name == name)
  180. return child;
  181. }
  182. return init_entry(da_push_back_new(parent->children), name);
  183. }
  184. static void merge_call(profile_entry *entry, profile_call *call, profile_call *prev_call)
  185. {
  186. const size_t num = call->children.num;
  187. for (size_t i = 0; i < num; i++) {
  188. profile_call *child = &call->children.array[i];
  189. merge_call(get_child(entry, child->name), child, NULL);
  190. }
  191. if (entry->expected_time_between_calls != 0 && prev_call) {
  192. migrate_old_entries(&entry->times_between_calls, true);
  193. uint64_t usec = diff_ns_to_usec(prev_call->start_time, call->start_time);
  194. add_hashmap_entry(&entry->times_between_calls, usec, 1);
  195. }
  196. migrate_old_entries(&entry->times, true);
  197. uint64_t usec = diff_ns_to_usec(call->start_time, call->end_time);
  198. add_hashmap_entry(&entry->times, usec, 1);
  199. #ifdef TRACK_OVERHEAD
  200. migrate_old_entries(&entry->overhead, true);
  201. usec = diff_ns_to_usec(call->overhead_start, call->start_time);
  202. usec += diff_ns_to_usec(call->end_time, call->overhead_end);
  203. add_hashmap_entry(&entry->overhead, usec, 1);
  204. #endif
  205. }
  206. static bool enabled = false;
  207. static pthread_mutex_t root_mutex = PTHREAD_MUTEX_INITIALIZER;
  208. static DARRAY(profile_root_entry) root_entries;
  209. static THREAD_LOCAL profile_call *thread_context = NULL;
  210. static THREAD_LOCAL bool thread_enabled = true;
  211. void profiler_start(void)
  212. {
  213. pthread_mutex_lock(&root_mutex);
  214. enabled = true;
  215. pthread_mutex_unlock(&root_mutex);
  216. }
  217. void profiler_stop(void)
  218. {
  219. pthread_mutex_lock(&root_mutex);
  220. enabled = false;
  221. pthread_mutex_unlock(&root_mutex);
  222. }
  223. void profile_reenable_thread(void)
  224. {
  225. if (thread_enabled)
  226. return;
  227. pthread_mutex_lock(&root_mutex);
  228. thread_enabled = enabled;
  229. pthread_mutex_unlock(&root_mutex);
  230. }
  231. static bool lock_root(void)
  232. {
  233. pthread_mutex_lock(&root_mutex);
  234. if (!enabled) {
  235. pthread_mutex_unlock(&root_mutex);
  236. thread_enabled = false;
  237. return false;
  238. }
  239. return true;
  240. }
  241. static profile_root_entry *get_root_entry(const char *name)
  242. {
  243. profile_root_entry *r_entry = NULL;
  244. for (size_t i = 0; i < root_entries.num; i++) {
  245. if (root_entries.array[i].name == name) {
  246. r_entry = &root_entries.array[i];
  247. break;
  248. }
  249. }
  250. if (!r_entry) {
  251. r_entry = da_push_back_new(root_entries);
  252. r_entry->mutex = bmalloc(sizeof(pthread_mutex_t));
  253. pthread_mutex_init(r_entry->mutex, NULL);
  254. r_entry->name = name;
  255. r_entry->entry = bzalloc(sizeof(profile_entry));
  256. init_entry(r_entry->entry, name);
  257. }
  258. return r_entry;
  259. }
  260. void profile_register_root(const char *name, uint64_t expected_time_between_calls)
  261. {
  262. if (!lock_root())
  263. return;
  264. get_root_entry(name)->entry->expected_time_between_calls = (expected_time_between_calls + 500) / 1000;
  265. pthread_mutex_unlock(&root_mutex);
  266. }
  267. static void free_call_context(profile_call *context);
  268. static void merge_context(profile_call *context)
  269. {
  270. pthread_mutex_t *mutex = NULL;
  271. profile_entry *entry = NULL;
  272. profile_call *prev_call = NULL;
  273. if (!lock_root()) {
  274. free_call_context(context);
  275. return;
  276. }
  277. profile_root_entry *r_entry = get_root_entry(context->name);
  278. mutex = r_entry->mutex;
  279. entry = r_entry->entry;
  280. prev_call = r_entry->prev_call;
  281. r_entry->prev_call = context;
  282. pthread_mutex_lock(mutex);
  283. pthread_mutex_unlock(&root_mutex);
  284. merge_call(entry, context, prev_call);
  285. pthread_mutex_unlock(mutex);
  286. free_call_context(prev_call);
  287. }
  288. void profile_start(const char *name)
  289. {
  290. if (!thread_enabled)
  291. return;
  292. profile_call new_call = {
  293. .name = name,
  294. #ifdef TRACK_OVERHEAD
  295. .overhead_start = os_gettime_ns(),
  296. #endif
  297. .parent = thread_context,
  298. };
  299. profile_call *call = NULL;
  300. if (new_call.parent) {
  301. size_t idx = da_push_back(new_call.parent->children, &new_call);
  302. call = &new_call.parent->children.array[idx];
  303. } else {
  304. call = bmalloc(sizeof(profile_call));
  305. memcpy(call, &new_call, sizeof(profile_call));
  306. }
  307. thread_context = call;
  308. call->start_time = os_gettime_ns();
  309. }
  310. void profile_end(const char *name)
  311. {
  312. uint64_t end = os_gettime_ns();
  313. if (!thread_enabled)
  314. return;
  315. profile_call *call = thread_context;
  316. if (!call) {
  317. blog(LOG_ERROR, "Called profile end with no active profile");
  318. return;
  319. }
  320. if (!call->name)
  321. call->name = name;
  322. if (call->name != name) {
  323. blog(LOG_ERROR,
  324. "Called profile end with mismatching name: "
  325. "start(\"%s\"[%p]) <-> end(\"%s\"[%p])",
  326. call->name, call->name, name, name);
  327. profile_call *parent = call->parent;
  328. while (parent && parent->parent && parent->name != name)
  329. parent = parent->parent;
  330. if (!parent || parent->name != name)
  331. return;
  332. while (call->name != name) {
  333. profile_end(call->name);
  334. call = call->parent;
  335. }
  336. }
  337. thread_context = call->parent;
  338. call->end_time = end;
  339. #ifdef TRACK_OVERHEAD
  340. call->overhead_end = os_gettime_ns();
  341. #endif
  342. if (call->parent)
  343. return;
  344. merge_context(call);
  345. }
  346. static int profiler_time_entry_compare(const void *first, const void *second)
  347. {
  348. int64_t diff = ((profiler_time_entry *)second)->time_delta - ((profiler_time_entry *)first)->time_delta;
  349. return diff < 0 ? -1 : (diff > 0 ? 1 : 0);
  350. }
  351. static uint64_t copy_map_to_array(profile_times_table *map, profiler_time_entries_t *entry_buffer, uint64_t *min_,
  352. uint64_t *max_)
  353. {
  354. migrate_old_entries(map, false);
  355. da_reserve(*entry_buffer, map->occupied);
  356. da_resize(*entry_buffer, 0);
  357. uint64_t min__ = ~(uint64_t)0;
  358. uint64_t max__ = 0;
  359. uint64_t calls = 0;
  360. for (size_t i = 0; i < map->size; i++) {
  361. if (!map->entries[i].probes)
  362. continue;
  363. profiler_time_entry *entry = &map->entries[i].entry;
  364. da_push_back(*entry_buffer, entry);
  365. calls += entry->count;
  366. min__ = (min__ < entry->time_delta) ? min__ : entry->time_delta;
  367. max__ = (max__ > entry->time_delta) ? max__ : entry->time_delta;
  368. }
  369. if (min_)
  370. *min_ = min__;
  371. if (max_)
  372. *max_ = max__;
  373. return calls;
  374. }
  375. typedef void (*profile_entry_print_func)(profiler_snapshot_entry_t *entry, struct dstr *indent_buffer,
  376. struct dstr *output_buffer, unsigned indent, uint64_t active,
  377. uint64_t parent_calls);
  378. /* UTF-8 characters */
  379. #define VPIPE_RIGHT " \xe2\x94\xa3"
  380. #define VPIPE " \xe2\x94\x83"
  381. #define DOWN_RIGHT " \xe2\x94\x97"
  382. static void make_indent_string(struct dstr *indent_buffer, unsigned indent, uint64_t active)
  383. {
  384. indent_buffer->len = 0;
  385. if (!indent) {
  386. dstr_cat_ch(indent_buffer, 0);
  387. return;
  388. }
  389. for (size_t i = 0; i < indent; i++) {
  390. const char *fragment = "";
  391. bool last = i + 1 == indent;
  392. if (active & ((uint64_t)1 << i))
  393. fragment = last ? VPIPE_RIGHT : VPIPE;
  394. else
  395. fragment = last ? DOWN_RIGHT : " ";
  396. dstr_cat(indent_buffer, fragment);
  397. }
  398. }
  399. static void gather_stats(uint64_t expected_time_between_calls, profiler_time_entries_t *entries, uint64_t calls,
  400. uint64_t *percentile99, uint64_t *median, double *percent_within_bounds)
  401. {
  402. if (!entries->num) {
  403. *percentile99 = 0;
  404. *median = 0;
  405. *percent_within_bounds = 0.;
  406. return;
  407. }
  408. /*if (entry_buffer->num > 2)
  409. blog(LOG_INFO, "buffer-size %lu, overall count %llu\n"
  410. "map-size %lu, occupied %lu, probes %lu",
  411. entry_buffer->num, calls,
  412. map->size, map->occupied,
  413. map->max_probe_count);*/
  414. uint64_t accu = 0;
  415. for (size_t i = 0; i < entries->num; i++) {
  416. uint64_t old_accu = accu;
  417. accu += entries->array[i].count;
  418. if (old_accu < calls * 0.01 && accu >= calls * 0.01)
  419. *percentile99 = entries->array[i].time_delta;
  420. else if (old_accu < calls * 0.5 && accu >= calls * 0.5) {
  421. *median = entries->array[i].time_delta;
  422. break;
  423. }
  424. }
  425. *percent_within_bounds = 0.;
  426. if (!expected_time_between_calls)
  427. return;
  428. accu = 0;
  429. for (size_t i = 0; i < entries->num; i++) {
  430. profiler_time_entry *entry = &entries->array[i];
  431. if (entry->time_delta < expected_time_between_calls)
  432. break;
  433. accu += entry->count;
  434. }
  435. *percent_within_bounds = (1. - (double)accu / calls) * 100;
  436. }
  437. #define G_MS "g\xC2\xA0ms"
  438. static void profile_print_entry(profiler_snapshot_entry_t *entry, struct dstr *indent_buffer,
  439. struct dstr *output_buffer, unsigned indent, uint64_t active, uint64_t parent_calls)
  440. {
  441. uint64_t calls = entry->overall_count;
  442. uint64_t min_ = entry->min_time;
  443. uint64_t max_ = entry->max_time;
  444. uint64_t percentile99 = 0;
  445. uint64_t median = 0;
  446. double percent_within_bounds = 0.;
  447. gather_stats(entry->expected_time_between_calls, &entry->times, calls, &percentile99, &median,
  448. &percent_within_bounds);
  449. make_indent_string(indent_buffer, indent, active);
  450. if (min_ == max_) {
  451. dstr_printf(output_buffer, "%s%s: %" G_MS, indent_buffer->array, entry->name, min_ / 1000.);
  452. } else {
  453. dstr_printf(output_buffer,
  454. "%s%s: min=%" G_MS ", median=%" G_MS ", "
  455. "max=%" G_MS ", 99th percentile=%" G_MS,
  456. indent_buffer->array, entry->name, min_ / 1000., median / 1000., max_ / 1000.,
  457. percentile99 / 1000.);
  458. if (entry->expected_time_between_calls) {
  459. double expected_ms = entry->expected_time_between_calls / 1000.;
  460. dstr_catf(output_buffer, ", %g%% below %" G_MS, percent_within_bounds, expected_ms);
  461. }
  462. }
  463. if (parent_calls && calls != parent_calls) {
  464. double calls_per_parent = (double)calls / parent_calls;
  465. if (lround(calls_per_parent * 10) != 10)
  466. dstr_catf(output_buffer, ", %g calls per parent call", calls_per_parent);
  467. }
  468. blog(LOG_INFO, "%s", output_buffer->array);
  469. active |= (uint64_t)1 << indent;
  470. for (size_t i = 0; i < entry->children.num; i++) {
  471. if ((i + 1) == entry->children.num)
  472. active &= (1 << indent) - 1;
  473. profile_print_entry(&entry->children.array[i], indent_buffer, output_buffer, indent + 1, active, calls);
  474. }
  475. }
  476. static void gather_stats_between(profiler_time_entries_t *entries, uint64_t calls, uint64_t lower_bound,
  477. uint64_t upper_bound, uint64_t min_, uint64_t max_, uint64_t *median, double *percent,
  478. double *lower, double *higher)
  479. {
  480. *median = 0;
  481. *percent = 0.;
  482. *lower = 0.;
  483. *higher = 0.;
  484. if (!entries->num)
  485. return;
  486. uint64_t accu = 0;
  487. for (size_t i = 0; i < entries->num; i++) {
  488. accu += entries->array[i].count;
  489. if (accu < calls * 0.5)
  490. continue;
  491. *median = entries->array[i].time_delta;
  492. break;
  493. }
  494. bool found_upper_bound = max_ <= upper_bound;
  495. bool found_lower_bound = false;
  496. if (min_ >= upper_bound) {
  497. *higher = 100.;
  498. return;
  499. }
  500. if (found_upper_bound && min_ >= lower_bound) {
  501. *percent = 100.;
  502. return;
  503. }
  504. accu = 0;
  505. for (size_t i = 0; i < entries->num; i++) {
  506. uint64_t delta = entries->array[i].time_delta;
  507. if (!found_upper_bound && delta <= upper_bound) {
  508. *higher = (double)accu / calls * 100;
  509. accu = 0;
  510. found_upper_bound = true;
  511. }
  512. if (!found_lower_bound && delta < lower_bound) {
  513. *percent = (double)accu / calls * 100;
  514. accu = 0;
  515. found_lower_bound = true;
  516. }
  517. accu += entries->array[i].count;
  518. }
  519. if (!found_upper_bound) {
  520. *higher = 100.;
  521. } else if (!found_lower_bound) {
  522. *percent = (double)accu / calls * 100;
  523. } else {
  524. *lower = (double)accu / calls * 100;
  525. }
  526. }
  527. static void profile_print_entry_expected(profiler_snapshot_entry_t *entry, struct dstr *indent_buffer,
  528. struct dstr *output_buffer, unsigned indent, uint64_t active,
  529. uint64_t parent_calls)
  530. {
  531. UNUSED_PARAMETER(parent_calls);
  532. if (!entry->expected_time_between_calls)
  533. return;
  534. uint64_t expected_time = entry->expected_time_between_calls;
  535. uint64_t min_ = entry->min_time_between_calls;
  536. uint64_t max_ = entry->max_time_between_calls;
  537. uint64_t median = 0;
  538. double percent = 0.;
  539. double lower = 0.;
  540. double higher = 0.;
  541. gather_stats_between(&entry->times_between_calls, entry->overall_between_calls_count,
  542. (uint64_t)(expected_time * 0.98), (uint64_t)(expected_time * 1.02 + 0.5), min_, max_,
  543. &median, &percent, &lower, &higher);
  544. make_indent_string(indent_buffer, indent, active);
  545. blog(LOG_INFO,
  546. "%s%s: min=%" G_MS ", median=%" G_MS ", max=%" G_MS ", %g%% "
  547. "within ±2%% of %" G_MS " (%g%% lower, %g%% higher)",
  548. indent_buffer->array, entry->name, min_ / 1000., median / 1000., max_ / 1000., percent,
  549. expected_time / 1000., lower, higher);
  550. active |= (uint64_t)1 << indent;
  551. for (size_t i = 0; i < entry->children.num; i++) {
  552. if ((i + 1) == entry->children.num)
  553. active &= (1 << indent) - 1;
  554. profile_print_entry_expected(&entry->children.array[i], indent_buffer, output_buffer, indent + 1,
  555. active, 0);
  556. }
  557. }
  558. void profile_print_func(const char *intro, profile_entry_print_func print, profiler_snapshot_t *snap)
  559. {
  560. struct dstr indent_buffer = {0};
  561. struct dstr output_buffer = {0};
  562. bool free_snapshot = !snap;
  563. if (!snap)
  564. snap = profile_snapshot_create();
  565. blog(LOG_INFO, "%s", intro);
  566. for (size_t i = 0; i < snap->roots.num; i++) {
  567. print(&snap->roots.array[i], &indent_buffer, &output_buffer, 0, 0, 0);
  568. }
  569. blog(LOG_INFO, "=================================================");
  570. if (free_snapshot)
  571. profile_snapshot_free(snap);
  572. dstr_free(&output_buffer);
  573. dstr_free(&indent_buffer);
  574. }
  575. void profiler_print(profiler_snapshot_t *snap)
  576. {
  577. profile_print_func("== Profiler Results =============================", profile_print_entry, snap);
  578. }
  579. void profiler_print_time_between_calls(profiler_snapshot_t *snap)
  580. {
  581. profile_print_func("== Profiler Time Between Calls ==================", profile_print_entry_expected, snap);
  582. }
  583. static void free_call_children(profile_call *call)
  584. {
  585. if (!call)
  586. return;
  587. const size_t num = call->children.num;
  588. for (size_t i = 0; i < num; i++)
  589. free_call_children(&call->children.array[i]);
  590. da_free(call->children);
  591. }
  592. static void free_call_context(profile_call *context)
  593. {
  594. free_call_children(context);
  595. bfree(context);
  596. }
  597. static void free_hashmap(profile_times_table *map)
  598. {
  599. map->size = 0;
  600. bfree(map->entries);
  601. map->entries = NULL;
  602. bfree(map->old_entries);
  603. map->old_entries = NULL;
  604. }
  605. static void free_profile_entry(profile_entry *entry)
  606. {
  607. for (size_t i = 0; i < entry->children.num; i++)
  608. free_profile_entry(&entry->children.array[i]);
  609. free_hashmap(&entry->times);
  610. #ifdef TRACK_OVERHEAD
  611. free_hashmap(&entry->overhead);
  612. #endif
  613. free_hashmap(&entry->times_between_calls);
  614. da_free(entry->children);
  615. }
  616. void profiler_free(void)
  617. {
  618. DARRAY(profile_root_entry) old_root_entries = {0};
  619. pthread_mutex_lock(&root_mutex);
  620. enabled = false;
  621. da_move(old_root_entries, root_entries);
  622. pthread_mutex_unlock(&root_mutex);
  623. for (size_t i = 0; i < old_root_entries.num; i++) {
  624. profile_root_entry *entry = &old_root_entries.array[i];
  625. pthread_mutex_lock(entry->mutex);
  626. pthread_mutex_unlock(entry->mutex);
  627. pthread_mutex_destroy(entry->mutex);
  628. bfree(entry->mutex);
  629. entry->mutex = NULL;
  630. free_call_context(entry->prev_call);
  631. free_profile_entry(entry->entry);
  632. bfree(entry->entry);
  633. }
  634. da_free(old_root_entries);
  635. pthread_mutex_destroy(&root_mutex);
  636. }
  637. /* ------------------------------------------------------------------------- */
  638. /* Profiler name storage */
  639. struct profiler_name_store {
  640. pthread_mutex_t mutex;
  641. DARRAY(char *) names;
  642. };
  643. profiler_name_store_t *profiler_name_store_create(void)
  644. {
  645. profiler_name_store_t *store = bzalloc(sizeof(profiler_name_store_t));
  646. if (pthread_mutex_init(&store->mutex, NULL))
  647. goto error;
  648. return store;
  649. error:
  650. bfree(store);
  651. return NULL;
  652. }
  653. void profiler_name_store_free(profiler_name_store_t *store)
  654. {
  655. if (!store)
  656. return;
  657. for (size_t i = 0; i < store->names.num; i++)
  658. bfree(store->names.array[i]);
  659. da_free(store->names);
  660. pthread_mutex_destroy(&store->mutex);
  661. bfree(store);
  662. }
  663. const char *profile_store_name(profiler_name_store_t *store, const char *format, ...)
  664. {
  665. va_list args;
  666. va_start(args, format);
  667. struct dstr str = {0};
  668. dstr_vprintf(&str, format, args);
  669. va_end(args);
  670. const char *result = NULL;
  671. pthread_mutex_lock(&store->mutex);
  672. size_t idx = da_push_back(store->names, &str.array);
  673. result = store->names.array[idx];
  674. pthread_mutex_unlock(&store->mutex);
  675. return result;
  676. }
  677. /* ------------------------------------------------------------------------- */
  678. /* Profiler data access */
  679. static void add_entry_to_snapshot(profile_entry *entry, profiler_snapshot_entry_t *s_entry)
  680. {
  681. s_entry->name = entry->name;
  682. s_entry->overall_count =
  683. copy_map_to_array(&entry->times, &s_entry->times, &s_entry->min_time, &s_entry->max_time);
  684. if ((s_entry->expected_time_between_calls = entry->expected_time_between_calls))
  685. s_entry->overall_between_calls_count =
  686. copy_map_to_array(&entry->times_between_calls, &s_entry->times_between_calls,
  687. &s_entry->min_time_between_calls, &s_entry->max_time_between_calls);
  688. da_reserve(s_entry->children, entry->children.num);
  689. for (size_t i = 0; i < entry->children.num; i++)
  690. add_entry_to_snapshot(&entry->children.array[i], da_push_back_new(s_entry->children));
  691. }
  692. static void sort_snapshot_entry(profiler_snapshot_entry_t *entry)
  693. {
  694. qsort(entry->times.array, entry->times.num, sizeof(profiler_time_entry), profiler_time_entry_compare);
  695. if (entry->expected_time_between_calls)
  696. qsort(entry->times_between_calls.array, entry->times_between_calls.num, sizeof(profiler_time_entry),
  697. profiler_time_entry_compare);
  698. for (size_t i = 0; i < entry->children.num; i++)
  699. sort_snapshot_entry(&entry->children.array[i]);
  700. }
  701. profiler_snapshot_t *profile_snapshot_create(void)
  702. {
  703. profiler_snapshot_t *snap = bzalloc(sizeof(profiler_snapshot_t));
  704. pthread_mutex_lock(&root_mutex);
  705. da_reserve(snap->roots, root_entries.num);
  706. for (size_t i = 0; i < root_entries.num; i++) {
  707. pthread_mutex_lock(root_entries.array[i].mutex);
  708. add_entry_to_snapshot(root_entries.array[i].entry, da_push_back_new(snap->roots));
  709. pthread_mutex_unlock(root_entries.array[i].mutex);
  710. }
  711. pthread_mutex_unlock(&root_mutex);
  712. for (size_t i = 0; i < snap->roots.num; i++)
  713. sort_snapshot_entry(&snap->roots.array[i]);
  714. return snap;
  715. }
  716. static void free_snapshot_entry(profiler_snapshot_entry_t *entry)
  717. {
  718. for (size_t i = 0; i < entry->children.num; i++)
  719. free_snapshot_entry(&entry->children.array[i]);
  720. da_free(entry->children);
  721. da_free(entry->times_between_calls);
  722. da_free(entry->times);
  723. }
  724. void profile_snapshot_free(profiler_snapshot_t *snap)
  725. {
  726. if (!snap)
  727. return;
  728. for (size_t i = 0; i < snap->roots.num; i++)
  729. free_snapshot_entry(&snap->roots.array[i]);
  730. da_free(snap->roots);
  731. bfree(snap);
  732. }
  733. typedef void (*dump_csv_func)(void *data, struct dstr *buffer);
  734. static void entry_dump_csv(struct dstr *buffer, const profiler_snapshot_entry_t *parent,
  735. const profiler_snapshot_entry_t *entry, dump_csv_func func, void *data)
  736. {
  737. const char *parent_name = parent ? parent->name : NULL;
  738. for (size_t i = 0; i < entry->times.num; i++) {
  739. dstr_printf(buffer,
  740. "%p,%p,%p,%p,%s,0,"
  741. "%" PRIu64 ",%" PRIu64 "\n",
  742. entry, parent, entry->name, parent_name, entry->name, entry->times.array[i].time_delta,
  743. entry->times.array[i].count);
  744. func(data, buffer);
  745. }
  746. for (size_t i = 0; i < entry->times_between_calls.num; i++) {
  747. dstr_printf(buffer,
  748. "%p,%p,%p,%p,%s,"
  749. "%" PRIu64 ",%" PRIu64 ",%" PRIu64 "\n",
  750. entry, parent, entry->name, parent_name, entry->name, entry->expected_time_between_calls,
  751. entry->times_between_calls.array[i].time_delta, entry->times_between_calls.array[i].count);
  752. func(data, buffer);
  753. }
  754. for (size_t i = 0; i < entry->children.num; i++)
  755. entry_dump_csv(buffer, entry, &entry->children.array[i], func, data);
  756. }
  757. static void profiler_snapshot_dump(const profiler_snapshot_t *snap, dump_csv_func func, void *data)
  758. {
  759. struct dstr buffer = {0};
  760. dstr_init_copy(&buffer, "id,parent_id,name_id,parent_name_id,name,"
  761. "time_between_calls,time_delta_µs,count\n");
  762. func(data, &buffer);
  763. for (size_t i = 0; i < snap->roots.num; i++)
  764. entry_dump_csv(&buffer, NULL, &snap->roots.array[i], func, data);
  765. dstr_free(&buffer);
  766. }
  767. static void dump_csv_fwrite(void *data, struct dstr *buffer)
  768. {
  769. fwrite(buffer->array, 1, buffer->len, data);
  770. }
  771. bool profiler_snapshot_dump_csv(const profiler_snapshot_t *snap, const char *filename)
  772. {
  773. FILE *f = os_fopen(filename, "wb+");
  774. if (!f)
  775. return false;
  776. profiler_snapshot_dump(snap, dump_csv_fwrite, f);
  777. fclose(f);
  778. return true;
  779. }
  780. static void dump_csv_gzwrite(void *data, struct dstr *buffer)
  781. {
  782. gzwrite(data, buffer->array, (unsigned)buffer->len);
  783. }
  784. bool profiler_snapshot_dump_csv_gz(const profiler_snapshot_t *snap, const char *filename)
  785. {
  786. gzFile gz;
  787. #ifdef _WIN32
  788. wchar_t *filename_w = NULL;
  789. os_utf8_to_wcs_ptr(filename, 0, &filename_w);
  790. if (!filename_w)
  791. return false;
  792. gz = gzopen_w(filename_w, "wb");
  793. bfree(filename_w);
  794. #else
  795. gz = gzopen(filename, "wb");
  796. #endif
  797. if (!gz)
  798. return false;
  799. profiler_snapshot_dump(snap, dump_csv_gzwrite, gz);
  800. #ifdef _WIN32
  801. gzclose_w(gz);
  802. #else
  803. gzclose(gz);
  804. #endif
  805. return true;
  806. }
  807. size_t profiler_snapshot_num_roots(profiler_snapshot_t *snap)
  808. {
  809. return snap ? snap->roots.num : 0;
  810. }
  811. void profiler_snapshot_enumerate_roots(profiler_snapshot_t *snap, profiler_entry_enum_func func, void *context)
  812. {
  813. if (!snap)
  814. return;
  815. for (size_t i = 0; i < snap->roots.num; i++)
  816. if (!func(context, &snap->roots.array[i]))
  817. break;
  818. }
  819. void profiler_snapshot_filter_roots(profiler_snapshot_t *snap, profiler_name_filter_func func, void *data)
  820. {
  821. for (size_t i = 0; i < snap->roots.num;) {
  822. bool remove = false;
  823. bool res = func(data, snap->roots.array[i].name, &remove);
  824. if (remove) {
  825. free_snapshot_entry(&snap->roots.array[i]);
  826. da_erase(snap->roots, i);
  827. }
  828. if (!res)
  829. break;
  830. if (!remove)
  831. i += 1;
  832. }
  833. }
  834. size_t profiler_snapshot_num_children(profiler_snapshot_entry_t *entry)
  835. {
  836. return entry ? entry->children.num : 0;
  837. }
  838. void profiler_snapshot_enumerate_children(profiler_snapshot_entry_t *entry, profiler_entry_enum_func func,
  839. void *context)
  840. {
  841. if (!entry)
  842. return;
  843. for (size_t i = 0; i < entry->children.num; i++)
  844. if (!func(context, &entry->children.array[i]))
  845. break;
  846. }
  847. const char *profiler_snapshot_entry_name(profiler_snapshot_entry_t *entry)
  848. {
  849. return entry ? entry->name : NULL;
  850. }
  851. profiler_time_entries_t *profiler_snapshot_entry_times(profiler_snapshot_entry_t *entry)
  852. {
  853. return entry ? &entry->times : NULL;
  854. }
  855. uint64_t profiler_snapshot_entry_overall_count(profiler_snapshot_entry_t *entry)
  856. {
  857. return entry ? entry->overall_count : 0;
  858. }
  859. uint64_t profiler_snapshot_entry_min_time(profiler_snapshot_entry_t *entry)
  860. {
  861. return entry ? entry->min_time : 0;
  862. }
  863. uint64_t profiler_snapshot_entry_max_time(profiler_snapshot_entry_t *entry)
  864. {
  865. return entry ? entry->max_time : 0;
  866. }
  867. profiler_time_entries_t *profiler_snapshot_entry_times_between_calls(profiler_snapshot_entry_t *entry)
  868. {
  869. return entry ? &entry->times_between_calls : NULL;
  870. }
  871. uint64_t profiler_snapshot_entry_expected_time_between_calls(profiler_snapshot_entry_t *entry)
  872. {
  873. return entry ? entry->expected_time_between_calls : 0;
  874. }
  875. uint64_t profiler_snapshot_entry_min_time_between_calls(profiler_snapshot_entry_t *entry)
  876. {
  877. return entry ? entry->min_time_between_calls : 0;
  878. }
  879. uint64_t profiler_snapshot_entry_max_time_between_calls(profiler_snapshot_entry_t *entry)
  880. {
  881. return entry ? entry->max_time_between_calls : 0;
  882. }
  883. uint64_t profiler_snapshot_entry_overall_between_calls_count(profiler_snapshot_entry_t *entry)
  884. {
  885. return entry ? entry->overall_between_calls_count : 0;
  886. }