1
0

source-profiler.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630
  1. /******************************************************************************
  2. Copyright (C) 2023 by Dennis Sädtler <[email protected]>
  3. This program is free software: you can redistribute it and/or modify
  4. it under the terms of the GNU General Public License as published by
  5. the Free Software Foundation, either version 2 of the License, or
  6. (at your option) any later version.
  7. This program is distributed in the hope that it will be useful,
  8. but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. GNU General Public License for more details.
  11. You should have received a copy of the GNU General Public License
  12. along with this program. If not, see <http://www.gnu.org/licenses/>.
  13. ******************************************************************************/
  14. #include "source-profiler.h"
  15. #include "darray.h"
  16. #include "obs-internal.h"
  17. #include "platform.h"
  18. #include "threading.h"
  19. #include "uthash.h"
  20. struct frame_sample {
  21. uint64_t tick;
  22. DARRAY(uint64_t) render_cpu;
  23. DARRAY(gs_timer_t *) render_timers;
  24. };
  25. /* Buffer frame data collection to give GPU time to finish rendering.
  26. * Set to the same as the rendering buffer (NUM_TEXTURES) */
  27. #define FRAME_BUFFER_SIZE NUM_TEXTURES
  28. struct source_samples {
  29. /* the pointer address of the source is the hashtable key */
  30. uintptr_t key;
  31. uint8_t frame_idx;
  32. struct frame_sample *frames[FRAME_BUFFER_SIZE];
  33. UT_hash_handle hh;
  34. };
  35. /* Basic fixed-size circular buffer to hold most recent N uint64_t values
  36. * (older items will be overwritten). */
  37. struct ucirclebuf {
  38. size_t idx;
  39. size_t capacity;
  40. size_t num;
  41. uint64_t *array;
  42. };
  43. struct profiler_entry {
  44. /* the pointer address of the source is the hashtable key */
  45. uintptr_t key;
  46. /* Tick times for last N frames */
  47. struct ucirclebuf tick;
  48. /* Time of first render pass in a frame, for last N frames */
  49. struct ucirclebuf render_cpu;
  50. struct ucirclebuf render_gpu;
  51. /* Sum of all render passes in a frame, for last N frames */
  52. struct ucirclebuf render_cpu_sum;
  53. struct ucirclebuf render_gpu_sum;
  54. /* Timestamps of last N async frame submissions */
  55. struct ucirclebuf async_frame_ts;
  56. /* Timestamps of last N async frames rendered */
  57. struct ucirclebuf async_rendered_ts;
  58. UT_hash_handle hh;
  59. };
  60. /* Hashmaps */
  61. struct source_samples *hm_samples = NULL;
  62. struct profiler_entry *hm_entries = NULL;
  63. /* GPU timer ranges (only required for DirectX) */
  64. static uint8_t timer_idx = 0;
  65. static gs_timer_range_t *timer_ranges[FRAME_BUFFER_SIZE] = {0};
  66. static uint64_t profiler_samples = 0;
  67. /* Sources can be rendered more than once per frame, to avoid reallocating
  68. * memory in the majority of cases, reserve at least two. */
  69. static const size_t render_times_reservation = 2;
  70. pthread_rwlock_t hm_rwlock = PTHREAD_RWLOCK_INITIALIZER;
  71. static bool enabled = false;
  72. static bool gpu_enabled = false;
  73. /* These can be set from other threads, mark them volatile */
  74. static volatile bool enable_next = false;
  75. static volatile bool gpu_enable_next = false;
  76. void ucirclebuf_init(struct ucirclebuf *buf, size_t capacity)
  77. {
  78. if (!capacity)
  79. return;
  80. memset(buf, 0, sizeof(struct ucirclebuf));
  81. buf->capacity = capacity;
  82. buf->array = bmalloc(sizeof(uint64_t) * capacity);
  83. }
  84. void ucirclebuf_free(struct ucirclebuf *buf)
  85. {
  86. bfree(buf->array);
  87. memset(buf, 0, sizeof(struct ucirclebuf));
  88. }
  89. void ucirclebuf_push(struct ucirclebuf *buf, uint64_t val)
  90. {
  91. if (buf->num == buf->capacity) {
  92. buf->idx %= buf->capacity;
  93. buf->array[buf->idx++] = val;
  94. return;
  95. }
  96. buf->array[buf->idx++] = val;
  97. buf->num++;
  98. }
  99. static struct frame_sample *frame_sample_create(void)
  100. {
  101. struct frame_sample *smp = bzalloc(sizeof(struct frame_sample));
  102. da_reserve(smp->render_cpu, render_times_reservation);
  103. da_reserve(smp->render_timers, render_times_reservation);
  104. return smp;
  105. }
  106. static void frame_sample_destroy(struct frame_sample *sample)
  107. {
  108. if (sample->render_timers.num) {
  109. gs_enter_context(obs->video.graphics);
  110. for (size_t i = 0; i < sample->render_timers.num; i++)
  111. gs_timer_destroy(sample->render_timers.array[i]);
  112. gs_leave_context();
  113. }
  114. da_free(sample->render_cpu);
  115. da_free(sample->render_timers);
  116. bfree(sample);
  117. }
  118. struct source_samples *source_samples_create(const uintptr_t key)
  119. {
  120. struct source_samples *smps = bzalloc(sizeof(struct source_samples));
  121. smps->key = key;
  122. for (size_t i = 0; i < FRAME_BUFFER_SIZE; i++)
  123. smps->frames[i] = frame_sample_create();
  124. return smps;
  125. }
  126. static void source_samples_destroy(struct source_samples *sample)
  127. {
  128. for (size_t i = 0; i < FRAME_BUFFER_SIZE; i++)
  129. frame_sample_destroy(sample->frames[i]);
  130. bfree(sample);
  131. }
  132. static struct profiler_entry *entry_create(const uintptr_t key)
  133. {
  134. struct profiler_entry *ent = bzalloc(sizeof(struct profiler_entry));
  135. ent->key = key;
  136. ucirclebuf_init(&ent->tick, profiler_samples);
  137. ucirclebuf_init(&ent->render_cpu, profiler_samples);
  138. ucirclebuf_init(&ent->render_gpu, profiler_samples);
  139. ucirclebuf_init(&ent->render_cpu_sum, profiler_samples);
  140. ucirclebuf_init(&ent->render_gpu_sum, profiler_samples);
  141. ucirclebuf_init(&ent->async_frame_ts, profiler_samples);
  142. ucirclebuf_init(&ent->async_rendered_ts, profiler_samples);
  143. return ent;
  144. }
  145. static void entry_destroy(struct profiler_entry *entry)
  146. {
  147. ucirclebuf_free(&entry->tick);
  148. ucirclebuf_free(&entry->render_cpu);
  149. ucirclebuf_free(&entry->render_gpu);
  150. ucirclebuf_free(&entry->render_cpu_sum);
  151. ucirclebuf_free(&entry->render_gpu_sum);
  152. ucirclebuf_free(&entry->async_frame_ts);
  153. ucirclebuf_free(&entry->async_rendered_ts);
  154. bfree(entry);
  155. }
  156. static void reset_gpu_timers(void)
  157. {
  158. gs_enter_context(obs->video.graphics);
  159. for (int i = 0; i < FRAME_BUFFER_SIZE; i++) {
  160. if (timer_ranges[i]) {
  161. gs_timer_range_destroy(timer_ranges[i]);
  162. timer_ranges[i] = NULL;
  163. }
  164. }
  165. gs_leave_context();
  166. }
  167. static void profiler_shutdown(void)
  168. {
  169. struct source_samples *smp, *tmp;
  170. HASH_ITER (hh, hm_samples, smp, tmp) {
  171. HASH_DEL(hm_samples, smp);
  172. source_samples_destroy(smp);
  173. }
  174. pthread_rwlock_wrlock(&hm_rwlock);
  175. struct profiler_entry *ent, *etmp;
  176. HASH_ITER (hh, hm_entries, ent, etmp) {
  177. HASH_DEL(hm_entries, ent);
  178. entry_destroy(ent);
  179. }
  180. pthread_rwlock_unlock(&hm_rwlock);
  181. reset_gpu_timers();
  182. }
  183. void source_profiler_enable(bool enable)
  184. {
  185. enable_next = enable;
  186. }
  187. void source_profiler_gpu_enable(bool enable)
  188. {
  189. gpu_enable_next = enable && enable_next;
  190. }
  191. void source_profiler_reset_video(struct obs_video_info *ovi)
  192. {
  193. double fps = ceil((double)ovi->fps_num / (double)ovi->fps_den);
  194. profiler_samples = (uint64_t)(fps * 5);
  195. /* This is fine because the video thread won't be running at this point */
  196. profiler_shutdown();
  197. }
  198. void source_profiler_render_begin(void)
  199. {
  200. if (!gpu_enabled)
  201. return;
  202. gs_enter_context(obs->video.graphics);
  203. if (!timer_ranges[timer_idx])
  204. timer_ranges[timer_idx] = gs_timer_range_create();
  205. gs_timer_range_begin(timer_ranges[timer_idx]);
  206. gs_leave_context();
  207. }
  208. void source_profiler_render_end(void)
  209. {
  210. if (!gpu_enabled || !timer_ranges[timer_idx])
  211. return;
  212. gs_enter_context(obs->video.graphics);
  213. gs_timer_range_end(timer_ranges[timer_idx]);
  214. gs_leave_context();
  215. }
  216. void source_profiler_frame_begin(void)
  217. {
  218. if (!enabled && enable_next)
  219. enabled = true;
  220. if (!gpu_enabled && enabled && gpu_enable_next) {
  221. gpu_enabled = true;
  222. } else if (gpu_enabled) {
  223. /* Advance timer idx if gpu enabled */
  224. timer_idx = (timer_idx + 1) % FRAME_BUFFER_SIZE;
  225. }
  226. }
  227. static inline bool is_async_video_source(const struct obs_source *source)
  228. {
  229. return (source->info.output_flags & OBS_SOURCE_ASYNC_VIDEO) == OBS_SOURCE_ASYNC_VIDEO;
  230. }
  231. static const char *source_profiler_frame_collect_name = "source_profiler_frame_collect";
  232. void source_profiler_frame_collect(void)
  233. {
  234. if (!enabled)
  235. return;
  236. profile_start(source_profiler_frame_collect_name);
  237. bool gpu_disjoint = false;
  238. bool gpu_ready = false;
  239. uint64_t freq = 0;
  240. if (gpu_enabled) {
  241. uint8_t timer_range_idx = (timer_idx + 1) % FRAME_BUFFER_SIZE;
  242. if (timer_ranges[timer_range_idx]) {
  243. gpu_ready = true;
  244. gs_enter_context(obs->video.graphics);
  245. gs_timer_range_get_data(timer_ranges[timer_range_idx], &gpu_disjoint, &freq);
  246. }
  247. if (gpu_disjoint) {
  248. blog(LOG_WARNING, "GPU Timers were disjoint, discarding samples.");
  249. }
  250. }
  251. pthread_rwlock_wrlock(&hm_rwlock);
  252. struct source_samples *smps = hm_samples;
  253. while (smps) {
  254. /* processing is delayed by FRAME_BUFFER_SIZE - 1 frames */
  255. uint8_t frame_idx = (smps->frame_idx + 1) % FRAME_BUFFER_SIZE;
  256. struct frame_sample *smp = smps->frames[frame_idx];
  257. if (!smp->tick) {
  258. /* No data yet */
  259. smps = smps->hh.next;
  260. continue;
  261. }
  262. struct profiler_entry *ent;
  263. HASH_FIND_PTR(hm_entries, &smps->key, ent);
  264. if (!ent) {
  265. ent = entry_create(smps->key);
  266. HASH_ADD_PTR(hm_entries, key, ent);
  267. }
  268. ucirclebuf_push(&ent->tick, smp->tick);
  269. if (smp->render_cpu.num) {
  270. uint64_t sum = 0;
  271. for (size_t idx = 0; idx < smp->render_cpu.num; idx++) {
  272. sum += smp->render_cpu.array[idx];
  273. }
  274. ucirclebuf_push(&ent->render_cpu, smp->render_cpu.array[0]);
  275. ucirclebuf_push(&ent->render_cpu_sum, sum);
  276. da_clear(smp->render_cpu);
  277. } else {
  278. ucirclebuf_push(&ent->render_cpu, 0);
  279. ucirclebuf_push(&ent->render_cpu_sum, 0);
  280. }
  281. /* Note that we still check this even if GPU profiling has been
  282. * disabled to destroy leftover timers. */
  283. if (smp->render_timers.num) {
  284. uint64_t sum = 0, first = 0, ticks = 0;
  285. for (size_t i = 0; i < smp->render_timers.num; i++) {
  286. gs_timer_t *timer = smp->render_timers.array[i];
  287. if (gpu_ready && !gpu_disjoint && gs_timer_get_data(timer, &ticks)) {
  288. /* Convert ticks to ns */
  289. sum += util_mul_div64(ticks, 1000000000ULL, freq);
  290. if (!first)
  291. first = sum;
  292. }
  293. gs_timer_destroy(timer);
  294. }
  295. if (first) {
  296. ucirclebuf_push(&ent->render_gpu, first);
  297. ucirclebuf_push(&ent->render_gpu_sum, sum);
  298. }
  299. da_clear(smp->render_timers);
  300. } else {
  301. ucirclebuf_push(&ent->render_gpu, 0);
  302. ucirclebuf_push(&ent->render_gpu_sum, 0);
  303. }
  304. const obs_source_t *src = *(const obs_source_t **)smps->hh.key;
  305. if (is_async_video_source(src)) {
  306. uint64_t ts = obs_source_get_last_async_ts(src);
  307. ucirclebuf_push(&ent->async_rendered_ts, ts);
  308. }
  309. smps = smps->hh.next;
  310. }
  311. pthread_rwlock_unlock(&hm_rwlock);
  312. if (gpu_enabled && gpu_ready)
  313. gs_leave_context();
  314. /* Apply updated states for next frame */
  315. if (!enable_next) {
  316. enabled = gpu_enabled = false;
  317. profiler_shutdown();
  318. } else if (!gpu_enable_next) {
  319. gpu_enabled = false;
  320. reset_gpu_timers();
  321. }
  322. profile_end(source_profiler_frame_collect_name);
  323. }
  324. void source_profiler_async_frame_received(obs_source_t *source)
  325. {
  326. if (!enabled)
  327. return;
  328. uint64_t ts = os_gettime_ns();
  329. pthread_rwlock_wrlock(&hm_rwlock);
  330. struct profiler_entry *ent;
  331. HASH_FIND_PTR(hm_entries, &source, ent);
  332. if (ent)
  333. ucirclebuf_push(&ent->async_frame_ts, ts);
  334. pthread_rwlock_unlock(&hm_rwlock);
  335. }
  336. uint64_t source_profiler_source_tick_start(void)
  337. {
  338. if (!enabled)
  339. return 0;
  340. return os_gettime_ns();
  341. }
  342. void source_profiler_source_tick_end(obs_source_t *source, uint64_t start)
  343. {
  344. if (!enabled)
  345. return;
  346. const uint64_t delta = os_gettime_ns() - start;
  347. struct source_samples *smp = NULL;
  348. HASH_FIND_PTR(hm_samples, &source, smp);
  349. if (!smp) {
  350. smp = source_samples_create((uintptr_t)source);
  351. HASH_ADD_PTR(hm_samples, key, smp);
  352. } else {
  353. /* Advance index here since tick happens first and only once
  354. * at the start of each frame. */
  355. smp->frame_idx = (smp->frame_idx + 1) % FRAME_BUFFER_SIZE;
  356. }
  357. smp->frames[smp->frame_idx]->tick = delta;
  358. }
  359. uint64_t source_profiler_source_render_begin(gs_timer_t **timer)
  360. {
  361. if (!enabled)
  362. return 0;
  363. if (gpu_enabled) {
  364. *timer = gs_timer_create();
  365. gs_timer_begin(*timer);
  366. } else {
  367. *timer = NULL;
  368. }
  369. return os_gettime_ns();
  370. }
  371. void source_profiler_source_render_end(obs_source_t *source, uint64_t start, gs_timer_t *timer)
  372. {
  373. if (!enabled)
  374. return;
  375. if (timer)
  376. gs_timer_end(timer);
  377. const uint64_t delta = os_gettime_ns() - start;
  378. struct source_samples *smp;
  379. HASH_FIND_PTR(hm_samples, &source, smp);
  380. if (smp) {
  381. da_push_back(smp->frames[smp->frame_idx]->render_cpu, &delta);
  382. if (timer) {
  383. da_push_back(smp->frames[smp->frame_idx]->render_timers, &timer);
  384. }
  385. } else if (timer) {
  386. gs_timer_destroy(timer);
  387. }
  388. }
  389. static void task_delete_source(void *key)
  390. {
  391. struct source_samples *smp;
  392. HASH_FIND_PTR(hm_samples, &key, smp);
  393. if (smp) {
  394. HASH_DEL(hm_samples, smp);
  395. source_samples_destroy(smp);
  396. }
  397. pthread_rwlock_rdlock(&hm_rwlock);
  398. struct profiler_entry *ent = NULL;
  399. HASH_FIND_PTR(hm_entries, &key, ent);
  400. if (ent) {
  401. HASH_DEL(hm_entries, ent);
  402. entry_destroy(ent);
  403. }
  404. pthread_rwlock_unlock(&hm_rwlock);
  405. }
  406. void source_profiler_remove_source(obs_source_t *source)
  407. {
  408. if (!enabled)
  409. return;
  410. /* Schedule deletion task on graphics thread */
  411. obs_queue_task(OBS_TASK_GRAPHICS, task_delete_source, source, false);
  412. }
  413. static inline void calculate_tick(struct profiler_entry *ent, struct profiler_result *result)
  414. {
  415. size_t idx = 0;
  416. uint64_t sum = 0;
  417. for (; idx < ent->tick.num; idx++) {
  418. const uint64_t delta = ent->tick.array[idx];
  419. if (delta > result->tick_max)
  420. result->tick_max = delta;
  421. sum += delta;
  422. }
  423. if (idx)
  424. result->tick_avg = sum / idx;
  425. }
  426. static inline void calculate_render(struct profiler_entry *ent, struct profiler_result *result)
  427. {
  428. size_t idx;
  429. uint64_t sum = 0, sum_sum = 0;
  430. for (idx = 0; idx < ent->render_cpu.num; idx++) {
  431. const uint64_t delta = ent->render_cpu.array[idx];
  432. if (delta > result->render_max)
  433. result->render_max = delta;
  434. sum += delta;
  435. sum_sum += ent->render_cpu_sum.array[idx];
  436. }
  437. if (idx) {
  438. result->render_avg = sum / idx;
  439. result->render_sum = sum_sum / idx;
  440. }
  441. if (!gpu_enabled)
  442. return;
  443. sum = sum_sum = 0;
  444. for (idx = 0; idx < ent->render_gpu.num; idx++) {
  445. const uint64_t delta = ent->render_gpu.array[idx];
  446. if (delta > result->render_gpu_max)
  447. result->render_gpu_max = delta;
  448. sum += delta;
  449. sum_sum += ent->render_gpu_sum.array[idx];
  450. }
  451. if (idx) {
  452. result->render_gpu_avg = sum / idx;
  453. result->render_gpu_sum = sum_sum / idx;
  454. }
  455. }
  456. static inline void calculate_fps(const struct ucirclebuf *frames, double *avg, uint64_t *best, uint64_t *worst)
  457. {
  458. uint64_t deltas = 0, delta_sum = 0, best_delta = 0, worst_delta = 0;
  459. for (size_t idx = 0; idx < frames->num; idx++) {
  460. const uint64_t ts = frames->array[idx];
  461. if (!ts)
  462. break;
  463. size_t prev_idx = idx ? idx - 1 : frames->num - 1;
  464. const uint64_t prev_ts = frames->array[prev_idx];
  465. if (!prev_ts || prev_ts >= ts)
  466. continue;
  467. uint64_t delta = (ts - prev_ts);
  468. if (delta < best_delta || !best_delta)
  469. best_delta = delta;
  470. if (delta > worst_delta)
  471. worst_delta = delta;
  472. delta_sum += delta;
  473. deltas++;
  474. }
  475. if (deltas && delta_sum) {
  476. *avg = 1.0E9 / ((double)delta_sum / (double)deltas);
  477. *best = best_delta;
  478. *worst = worst_delta;
  479. }
  480. }
  481. bool source_profiler_fill_result(obs_source_t *source, struct profiler_result *result)
  482. {
  483. if (!enabled || !result)
  484. return false;
  485. memset(result, 0, sizeof(struct profiler_result));
  486. pthread_rwlock_rdlock(&hm_rwlock);
  487. struct profiler_entry *ent = NULL;
  488. HASH_FIND_PTR(hm_entries, &source, ent);
  489. if (ent) {
  490. calculate_tick(ent, result);
  491. calculate_render(ent, result);
  492. if (is_async_video_source(source)) {
  493. calculate_fps(&ent->async_frame_ts, &result->async_input, &result->async_input_best,
  494. &result->async_input_worst);
  495. calculate_fps(&ent->async_rendered_ts, &result->async_rendered, &result->async_rendered_best,
  496. &result->async_rendered_worst);
  497. }
  498. }
  499. pthread_rwlock_unlock(&hm_rwlock);
  500. return !!ent;
  501. }
  502. profiler_result_t *source_profiler_get_result(obs_source_t *source)
  503. {
  504. profiler_result_t *ret = bmalloc(sizeof(profiler_result_t));
  505. if (!source_profiler_fill_result(source, ret)) {
  506. bfree(ret);
  507. return NULL;
  508. }
  509. return ret;
  510. }