bpm.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598
  1. #include "obs.h"
  2. #include "bpm-internal.h"
  3. static void render_metrics_time(struct metrics_time *m_time)
  4. {
  5. /* Generate the RFC3339 time string from the timespec struct, for example:
  6. *
  7. * "2024-05-31T12:26:03.591Z"
  8. */
  9. memset(&m_time->rfc3339_str, 0, sizeof(m_time->rfc3339_str));
  10. strftime(m_time->rfc3339_str, sizeof(m_time->rfc3339_str), "%Y-%m-%dT%T", gmtime(&m_time->tspec.tv_sec));
  11. sprintf(m_time->rfc3339_str + strlen(m_time->rfc3339_str), ".%03ldZ", m_time->tspec.tv_nsec / 1000000);
  12. m_time->valid = true;
  13. }
  14. static bool update_metrics(obs_output_t *output, const struct encoder_packet *pkt,
  15. const struct encoder_packet_time *ept, struct metrics_data *m_track)
  16. {
  17. if (!pkt) {
  18. blog(LOG_DEBUG, "%s: Null encoder_packet pointer", __FUNCTION__);
  19. return false;
  20. }
  21. if (!output || !ept || !m_track) {
  22. blog(LOG_DEBUG, "%s: Null arguments for track %lu", __FUNCTION__, pkt->track_idx);
  23. return false;
  24. }
  25. // Perform reads on all the counters as close together as possible
  26. m_track->session_frames_output.curr = obs_output_get_total_frames(output);
  27. m_track->session_frames_dropped.curr = obs_output_get_frames_dropped(output);
  28. m_track->session_frames_rendered.curr = obs_get_total_frames();
  29. m_track->session_frames_lagged.curr = obs_get_lagged_frames();
  30. const video_t *video = obs_encoder_video(pkt->encoder);
  31. if (video) {
  32. /* video_output_get_total_frames() returns the number of frames
  33. * before the framerate decimator. For example, if the OBS session
  34. * is rendering at 60fps, and the rendition is set for 30 fps,
  35. * the counter will increment by 60 per second, not 30 per second.
  36. * For metrics we will consider this value to be the number of
  37. * frames input to the obs_encoder_t instance.
  38. */
  39. m_track->rendition_frames_input.curr = video_output_get_total_frames(video);
  40. m_track->rendition_frames_skipped.curr = video_output_get_skipped_frames(video);
  41. /* obs_encoder_get_encoded_frames() returns the number of frames
  42. * successfully encoded by the obs_encoder_t instance.
  43. */
  44. m_track->rendition_frames_output.curr = obs_encoder_get_encoded_frames(pkt->encoder);
  45. } else {
  46. m_track->rendition_frames_input.curr = 0;
  47. m_track->rendition_frames_skipped.curr = 0;
  48. m_track->rendition_frames_output.curr = 0;
  49. blog(LOG_ERROR, "update_metrics(): *video_t==null");
  50. }
  51. // Set the diff values to 0 if PTS is 0
  52. if (pkt->pts == 0) {
  53. m_track->session_frames_output.diff = 0;
  54. m_track->session_frames_dropped.diff = 0;
  55. m_track->session_frames_rendered.diff = 0;
  56. m_track->session_frames_lagged.diff = 0;
  57. m_track->rendition_frames_input.diff = 0;
  58. m_track->rendition_frames_skipped.diff = 0;
  59. m_track->rendition_frames_output.diff = 0;
  60. blog(LOG_DEBUG, "update_metrics(): Setting diffs to 0");
  61. } else {
  62. // Calculate diff's
  63. m_track->session_frames_output.diff =
  64. m_track->session_frames_output.curr - m_track->session_frames_output.ref;
  65. m_track->session_frames_dropped.diff =
  66. m_track->session_frames_dropped.curr - m_track->session_frames_dropped.ref;
  67. m_track->session_frames_rendered.diff =
  68. m_track->session_frames_rendered.curr - m_track->session_frames_rendered.ref;
  69. m_track->session_frames_lagged.diff =
  70. m_track->session_frames_lagged.curr - m_track->session_frames_lagged.ref;
  71. m_track->rendition_frames_input.diff =
  72. m_track->rendition_frames_input.curr - m_track->rendition_frames_input.ref;
  73. m_track->rendition_frames_skipped.diff =
  74. m_track->rendition_frames_skipped.curr - m_track->rendition_frames_skipped.ref;
  75. m_track->rendition_frames_output.diff =
  76. m_track->rendition_frames_output.curr - m_track->rendition_frames_output.ref;
  77. }
  78. // Update the reference values
  79. m_track->session_frames_output.ref = m_track->session_frames_output.curr;
  80. m_track->session_frames_dropped.ref = m_track->session_frames_dropped.curr;
  81. m_track->session_frames_rendered.ref = m_track->session_frames_rendered.curr;
  82. m_track->session_frames_lagged.ref = m_track->session_frames_lagged.curr;
  83. m_track->rendition_frames_input.ref = m_track->rendition_frames_input.curr;
  84. m_track->rendition_frames_skipped.ref = m_track->rendition_frames_skipped.curr;
  85. m_track->rendition_frames_output.ref = m_track->rendition_frames_output.curr;
  86. /* BPM Timestamp Message */
  87. m_track->cts.valid = false;
  88. m_track->ferts.valid = false;
  89. m_track->fercts.valid = false;
  90. /* Generate the timestamp representations for CTS, FER, and FERC.
  91. * Check if each is non-zero and that temporal consistency is correct:
  92. * FEC > FERC > CTS
  93. * FEC and FERC depends on CTS, and FERC depends on FER, so ensure
  94. * we only signal an integral set of timestamps.
  95. */
  96. os_nstime_to_timespec(ept->cts, &m_track->cts.tspec);
  97. render_metrics_time(&m_track->cts);
  98. if (ept->fer && (ept->fer > ept->cts)) {
  99. os_nstime_to_timespec(ept->fer, &m_track->ferts.tspec);
  100. render_metrics_time(&m_track->ferts);
  101. if (ept->ferc && (ept->ferc > ept->fer)) {
  102. os_nstime_to_timespec(ept->ferc, &m_track->fercts.tspec);
  103. render_metrics_time(&m_track->fercts);
  104. }
  105. }
  106. // Always generate the timestamp representation for PIR
  107. m_track->pirts.valid = false;
  108. os_nstime_to_timespec(ept->pir, &m_track->pirts.tspec);
  109. render_metrics_time(&m_track->pirts);
  110. /* Log the BPM timestamp and frame counter information. This
  111. * provides visibility into the metrics when OBS is started
  112. * with "--verbose" and "--unfiltered_log".
  113. */
  114. blog(LOG_DEBUG,
  115. "BPM: %s, trk %lu: [CTS|FER-CTS|FERC-FER|PIR-CTS]:[%" PRIu64 " ms|%" PRIu64 " ms|%" PRIu64 " us|%" PRIu64
  116. " ms], [dts|pts]:[%" PRId64 "|%" PRId64 "], S[R:O:D:L],R[I:S:O]:%d:%d:%d:%d:%d:%d:%d",
  117. obs_encoder_get_name(pkt->encoder), pkt->track_idx, ept->cts / 1000000, (ept->fer - ept->cts) / 1000000,
  118. (ept->ferc - ept->fer) / 1000, (ept->pir - ept->cts) / 1000000, pkt->dts, pkt->pts,
  119. m_track->session_frames_rendered.diff, m_track->session_frames_output.diff,
  120. m_track->session_frames_dropped.diff, m_track->session_frames_lagged.diff,
  121. m_track->rendition_frames_input.diff, m_track->rendition_frames_skipped.diff,
  122. m_track->rendition_frames_output.diff);
  123. return true;
  124. }
  125. void bpm_ts_sei_render(struct metrics_data *m_track)
  126. {
  127. uint8_t num_timestamps = 0;
  128. struct serializer s;
  129. m_track->sei_rendered[BPM_TS_SEI] = false;
  130. // Initialize the output array here; caller is responsible to free it
  131. array_output_serializer_init(&s, &m_track->sei_payload[BPM_TS_SEI]);
  132. // Write the UUID for this SEI message
  133. s_write(&s, bpm_ts_uuid, sizeof(bpm_ts_uuid));
  134. // Determine how many timestamps are valid
  135. if (m_track->cts.valid)
  136. num_timestamps++;
  137. if (m_track->ferts.valid)
  138. num_timestamps++;
  139. if (m_track->fercts.valid)
  140. num_timestamps++;
  141. if (m_track->pirts.valid)
  142. num_timestamps++;
  143. /* Encode number of timestamps for this SEI. Upper 4 bits are
  144. * set to b0000 (reserved); lower 4-bits num_timestamps - 1.
  145. */
  146. s_w8(&s, (num_timestamps - 1) & 0x0F);
  147. if (m_track->cts.valid) {
  148. // Timestamp type
  149. s_w8(&s, BPM_TS_RFC3339);
  150. // Write the timestamp event tag (Composition Time Event)
  151. s_w8(&s, BPM_TS_EVENT_CTS);
  152. // Write the RFC3339-formatted string, including the null terminator
  153. s_write(&s, m_track->cts.rfc3339_str, strlen(m_track->cts.rfc3339_str) + 1);
  154. }
  155. if (m_track->ferts.valid) {
  156. // Timestamp type
  157. s_w8(&s, BPM_TS_RFC3339);
  158. // Write the timestamp event tag (Frame Encode Request Event)
  159. s_w8(&s, BPM_TS_EVENT_FER);
  160. // Write the RFC3339-formatted string, including the null terminator
  161. s_write(&s, m_track->ferts.rfc3339_str, strlen(m_track->ferts.rfc3339_str) + 1);
  162. }
  163. if (m_track->fercts.valid) {
  164. // Timestamp type
  165. s_w8(&s, BPM_TS_RFC3339);
  166. // Write the timestamp event tag (Frame Encode Request Complete Event)
  167. s_w8(&s, BPM_TS_EVENT_FERC);
  168. // Write the RFC3339-formatted string, including the null terminator
  169. s_write(&s, m_track->fercts.rfc3339_str, strlen(m_track->fercts.rfc3339_str) + 1);
  170. }
  171. if (m_track->pirts.valid) {
  172. // Timestamp type
  173. s_w8(&s, BPM_TS_RFC3339);
  174. // Write the timestamp event tag (Packet Interleave Request Event)
  175. s_w8(&s, BPM_TS_EVENT_PIR);
  176. // Write the RFC3339-formatted string, including the null terminator
  177. s_write(&s, m_track->pirts.rfc3339_str, strlen(m_track->pirts.rfc3339_str) + 1);
  178. }
  179. m_track->sei_rendered[BPM_TS_SEI] = true;
  180. }
  181. void bpm_sm_sei_render(struct metrics_data *m_track)
  182. {
  183. uint8_t num_timestamps = 0;
  184. uint8_t num_counters = 0;
  185. struct serializer s;
  186. m_track->sei_rendered[BPM_SM_SEI] = false;
  187. // Initialize the output array here; caller is responsible to free it
  188. array_output_serializer_init(&s, &m_track->sei_payload[BPM_SM_SEI]);
  189. // Write the UUID for this SEI message
  190. s_write(&s, bpm_sm_uuid, sizeof(bpm_sm_uuid));
  191. // Encode number of timestamps for this SEI
  192. num_timestamps = 1;
  193. // Upper 4 bits are set to b0000 (reserved); lower 4-bits num_timestamps - 1
  194. s_w8(&s, (num_timestamps - 1) & 0x0F);
  195. // Timestamp type
  196. s_w8(&s, BPM_TS_RFC3339);
  197. /* Write the timestamp event tag (Packet Interleave Request Event).
  198. * Use the PIR_TS timestamp because the data was all collected at that time.
  199. */
  200. s_w8(&s, BPM_TS_EVENT_PIR);
  201. // Write the RFC3339-formatted string, including the null terminator
  202. s_write(&s, m_track->pirts.rfc3339_str, strlen(m_track->pirts.rfc3339_str) + 1);
  203. // Session metrics has 4 counters
  204. num_counters = 4;
  205. /* Send all the counters with a tag(8-bit):value(32-bit) configuration.
  206. * Upper 4 bits are set to b0000 (reserved); lower 4-bits num_counters - 1.
  207. */
  208. s_w8(&s, (num_counters - 1) & 0x0F);
  209. s_w8(&s, BPM_SM_FRAMES_RENDERED);
  210. s_wb32(&s, m_track->session_frames_rendered.diff);
  211. s_w8(&s, BPM_SM_FRAMES_LAGGED);
  212. s_wb32(&s, m_track->session_frames_lagged.diff);
  213. s_w8(&s, BPM_SM_FRAMES_DROPPED);
  214. s_wb32(&s, m_track->session_frames_dropped.diff);
  215. s_w8(&s, BPM_SM_FRAMES_OUTPUT);
  216. s_wb32(&s, m_track->session_frames_output.diff);
  217. m_track->sei_rendered[BPM_SM_SEI] = true;
  218. }
  219. void bpm_erm_sei_render(struct metrics_data *m_track)
  220. {
  221. uint8_t num_timestamps = 0;
  222. uint8_t num_counters = 0;
  223. struct serializer s;
  224. m_track->sei_rendered[BPM_ERM_SEI] = false;
  225. // Initialize the output array here; caller is responsible to free it
  226. array_output_serializer_init(&s, &m_track->sei_payload[BPM_ERM_SEI]);
  227. // Write the UUID for this SEI message
  228. s_write(&s, bpm_erm_uuid, sizeof(bpm_erm_uuid));
  229. // Encode number of timestamps for this SEI
  230. num_timestamps = 1;
  231. // Upper 4 bits are set to b0000 (reserved); lower 4-bits num_timestamps - 1
  232. s_w8(&s, (num_timestamps - 1) & 0x0F);
  233. // Timestamp type
  234. s_w8(&s, BPM_TS_RFC3339);
  235. /* Write the timestamp event tag (Packet Interleave Request Event).
  236. * Use the PIRTS timestamp because the data was all collected at that time.
  237. */
  238. s_w8(&s, BPM_TS_EVENT_PIR);
  239. // Write the RFC3339-formatted string, including the null terminator
  240. s_write(&s, m_track->pirts.rfc3339_str, strlen(m_track->pirts.rfc3339_str) + 1);
  241. // Encoder rendition metrics has 3 counters
  242. num_counters = 3;
  243. /* Send all the counters with a tag(8-bit):value(32-bit) configuration.
  244. * Upper 4 bits are set to b0000 (reserved); lower 4-bits num_counters - 1.
  245. */
  246. s_w8(&s, (num_counters - 1) & 0x0F);
  247. s_w8(&s, BPM_ERM_FRAMES_INPUT);
  248. s_wb32(&s, m_track->rendition_frames_input.diff);
  249. s_w8(&s, BPM_ERM_FRAMES_SKIPPED);
  250. s_wb32(&s, m_track->rendition_frames_skipped.diff);
  251. s_w8(&s, BPM_ERM_FRAMES_OUTPUT);
  252. s_wb32(&s, m_track->rendition_frames_output.diff);
  253. m_track->sei_rendered[BPM_ERM_SEI] = true;
  254. }
  255. /* Note : extract_buffer_from_sei() and nal_start are also defined
  256. * in obs-output.c, however they are not public APIs. When the caption
  257. * library is re-worked, this code should be refactored into that.
  258. */
  259. static size_t extract_buffer_from_sei(sei_t *sei, uint8_t **data_out)
  260. {
  261. if (!sei || !sei->head) {
  262. return 0;
  263. }
  264. /* We should only need to get one payload, because the SEI that was
  265. * generated should only have one message, so no need to iterate. If
  266. * we did iterate, we would need to generate multiple OBUs. */
  267. sei_message_t *msg = sei_message_head(sei);
  268. int payload_size = (int)sei_message_size(msg);
  269. uint8_t *payload_data = sei_message_data(msg);
  270. *data_out = bmalloc(payload_size);
  271. memcpy(*data_out, payload_data, payload_size);
  272. return payload_size;
  273. }
  274. static const uint8_t nal_start[4] = {0, 0, 0, 1};
  275. /* process_metrics() will update and insert unregistered
  276. * SEI (AVC/HEVC) or OBU (AV1) messages into the encoded
  277. * video bitstream.
  278. */
  279. static bool process_metrics(obs_output_t *output, struct encoder_packet *out, struct encoder_packet_time *ept,
  280. struct metrics_data *m_track)
  281. {
  282. struct encoder_packet backup = *out;
  283. sei_t sei;
  284. uint8_t *data = NULL;
  285. size_t size;
  286. long ref = 1;
  287. bool avc = false;
  288. bool hevc = false;
  289. bool av1 = false;
  290. if (!m_track) {
  291. blog(LOG_DEBUG, "Metrics track for index: %lu had not be initialized", out->track_idx);
  292. return false;
  293. }
  294. // Update the metrics for this track
  295. if (!update_metrics(output, out, ept, m_track)) {
  296. // Something went wrong; log it and return
  297. blog(LOG_DEBUG, "update_metrics() for track index: %lu failed", out->track_idx);
  298. return false;
  299. }
  300. if (strcmp(obs_encoder_get_codec(out->encoder), "h264") == 0) {
  301. avc = true;
  302. } else if (strcmp(obs_encoder_get_codec(out->encoder), "av1") == 0) {
  303. av1 = true;
  304. #ifdef ENABLE_HEVC
  305. } else if (strcmp(obs_encoder_get_codec(out->encoder), "hevc") == 0) {
  306. hevc = true;
  307. #endif
  308. }
  309. #ifdef ENABLE_HEVC
  310. uint8_t hevc_nal_header[2];
  311. if (hevc) {
  312. size_t nal_header_index_start = 4;
  313. // Skip past the annex-b start code
  314. if (memcmp(out->data, nal_start + 1, 3) == 0) {
  315. nal_header_index_start = 3;
  316. } else if (memcmp(out->data, nal_start, 4) == 0) {
  317. nal_header_index_start = 4;
  318. } else {
  319. /* We shouldn't ever see this unless we start getting
  320. * packets without annex-b start codes. */
  321. blog(LOG_DEBUG, "Annex-B start code not found, we may not "
  322. "generate a valid hevc nal unit header "
  323. "for our caption");
  324. return false;
  325. }
  326. /* We will use the same 2 byte NAL unit header for the SEI,
  327. * but swap the NAL types out. */
  328. hevc_nal_header[0] = out->data[nal_header_index_start];
  329. hevc_nal_header[1] = out->data[nal_header_index_start + 1];
  330. }
  331. #endif
  332. // Create array for the original packet data + the SEI appended data
  333. DARRAY(uint8_t) out_data;
  334. da_init(out_data);
  335. // Copy the original packet
  336. da_push_back_array(out_data, (uint8_t *)&ref, sizeof(ref));
  337. da_push_back_array(out_data, out->data, out->size);
  338. // Build the SEI metrics message payload
  339. bpm_ts_sei_render(m_track);
  340. bpm_sm_sei_render(m_track);
  341. bpm_erm_sei_render(m_track);
  342. // Iterate over all the BPM SEI types
  343. for (uint8_t i = 0; i < BPM_MAX_SEI; ++i) {
  344. // Create and inject the syntax specific SEI messages in the bitstream if the rendering was successful
  345. if (m_track->sei_rendered[i]) {
  346. // Send one SEI message per NALU or OBU
  347. sei_init(&sei, 0.0);
  348. // Generate the formatted SEI message
  349. sei_message_t *msg = sei_message_new(sei_type_user_data_unregistered,
  350. m_track->sei_payload[i].bytes.array,
  351. m_track->sei_payload[i].bytes.num);
  352. sei_message_append(&sei, msg);
  353. // Free the SEI payload buffer in the metrics track
  354. array_output_serializer_free(&m_track->sei_payload[i]);
  355. // Update for any codec specific syntax and add to the output bitstream
  356. if (avc || hevc || av1) {
  357. if (avc || hevc) {
  358. data = bmalloc(sei_render_size(&sei));
  359. size = sei_render(&sei, data);
  360. }
  361. /* In each of these specs there is an identical structure that
  362. * carries user private metadata. We have an AVC SEI wrapped
  363. * version of that here. We will strip it out and repackage
  364. * it slightly to fit the different codec carrying mechanisms.
  365. * A slightly modified SEI for HEVC and a metadata OBU for AV1.
  366. */
  367. if (avc) {
  368. /* TODO: SEI should come after AUD/SPS/PPS,
  369. * but before any VCL */
  370. da_push_back_array(out_data, nal_start, 4);
  371. da_push_back_array(out_data, data, size);
  372. #ifdef ENABLE_HEVC
  373. } else if (hevc) {
  374. /* Only first NAL (VPS/PPS/SPS) should use the 4 byte
  375. * start code. SEIs use 3 byte version */
  376. da_push_back_array(out_data, nal_start + 1, 3);
  377. /* nal_unit_header( ) {
  378. * forbidden_zero_bit f(1)
  379. * nal_unit_type u(6)
  380. * nuh_layer_id u(6)
  381. * nuh_temporal_id_plus1 u(3)
  382. * }
  383. */
  384. const uint8_t prefix_sei_nal_type = 39;
  385. /* The first bit is always 0, so we just need to
  386. * save the last bit off the original header and
  387. * add the SEI NAL type. */
  388. uint8_t first_byte = (prefix_sei_nal_type << 1) | (0x01 & hevc_nal_header[0]);
  389. hevc_nal_header[0] = first_byte;
  390. /* The HEVC NAL unit header is 2 byte instead of
  391. * one, otherwise everything else is the
  392. * same. */
  393. da_push_back_array(out_data, hevc_nal_header, 2);
  394. da_push_back_array(out_data, &data[1], size - 1);
  395. #endif
  396. } else if (av1) {
  397. uint8_t *obu_buffer = NULL;
  398. size_t obu_buffer_size = 0;
  399. size = extract_buffer_from_sei(&sei, &data);
  400. metadata_obu(data, size, &obu_buffer, &obu_buffer_size,
  401. METADATA_TYPE_USER_PRIVATE_6);
  402. if (obu_buffer) {
  403. da_push_back_array(out_data, obu_buffer, obu_buffer_size);
  404. bfree(obu_buffer);
  405. }
  406. }
  407. if (data) {
  408. bfree(data);
  409. }
  410. }
  411. sei_free(&sei);
  412. }
  413. }
  414. obs_encoder_packet_release(out);
  415. *out = backup;
  416. out->data = (uint8_t *)out_data.array + sizeof(ref);
  417. out->size = out_data.num - sizeof(ref);
  418. if (avc || hevc || av1) {
  419. return true;
  420. }
  421. return false;
  422. }
  423. static struct metrics_data *bpm_create_metrics_track(void)
  424. {
  425. struct metrics_data *rval = bzalloc(sizeof(struct metrics_data));
  426. pthread_mutex_init_value(&rval->metrics_mutex);
  427. if (pthread_mutex_init(&rval->metrics_mutex, NULL) != 0) {
  428. bfree(rval);
  429. rval = NULL;
  430. }
  431. return rval;
  432. }
  433. static bool bpm_get_track(obs_output_t *output, size_t track, struct metrics_data **m_track)
  434. {
  435. bool found = false;
  436. // Walk the DARRAY looking for the output pointer
  437. pthread_mutex_lock(&bpm_metrics_mutex);
  438. for (size_t i = bpm_metrics.num; i > 0; i--) {
  439. if (output == bpm_metrics.array[i - 1].output) {
  440. *m_track = bpm_metrics.array[i - 1].metrics_tracks[track];
  441. found = true;
  442. break;
  443. }
  444. }
  445. if (!found) {
  446. // Create the new BPM metrics entries
  447. struct output_metrics_link *oml = da_push_back_new(bpm_metrics);
  448. oml->output = output;
  449. for (size_t i = 0; i < MAX_OUTPUT_VIDEO_ENCODERS; ++i) {
  450. oml->metrics_tracks[i] = bpm_create_metrics_track();
  451. }
  452. *m_track = oml->metrics_tracks[track];
  453. found = true;
  454. }
  455. pthread_mutex_unlock(&bpm_metrics_mutex);
  456. return found;
  457. }
  458. static void bpm_init(void)
  459. {
  460. pthread_mutex_init_value(&bpm_metrics_mutex);
  461. da_init(bpm_metrics);
  462. }
  463. void bpm_destroy(obs_output_t *output)
  464. {
  465. int64_t idx = -1;
  466. pthread_once(&bpm_once, bpm_init);
  467. pthread_mutex_lock(&bpm_metrics_mutex);
  468. // Walk the DARRAY looking for the index that matches the output
  469. for (size_t i = bpm_metrics.num; i > 0; i--) {
  470. if (output == bpm_metrics.array[i - 1].output) {
  471. idx = i - 1;
  472. break;
  473. }
  474. }
  475. if (idx >= 0) {
  476. struct output_metrics_link *oml = &bpm_metrics.array[idx];
  477. for (size_t i = 0; i < MAX_OUTPUT_VIDEO_ENCODERS; i++) {
  478. if (oml->metrics_tracks[i]) {
  479. struct metrics_data *m_track = oml->metrics_tracks[i];
  480. for (uint8_t j = 0; j < BPM_MAX_SEI; ++j) {
  481. array_output_serializer_free(&m_track->sei_payload[j]);
  482. }
  483. pthread_mutex_destroy(&m_track->metrics_mutex);
  484. bfree(m_track);
  485. m_track = NULL;
  486. }
  487. }
  488. da_erase(bpm_metrics, idx);
  489. if (bpm_metrics.num == 0)
  490. da_free(bpm_metrics);
  491. }
  492. pthread_mutex_unlock(&bpm_metrics_mutex);
  493. }
  494. /* bpm_inject() is the callback function that needs to be registered
  495. * with each output needing Broadcast Performance Metrics injected
  496. * into the video bitstream, using SEI (AVC/HEVC) and OBU (AV1) syntax.
  497. */
  498. void bpm_inject(obs_output_t *output, struct encoder_packet *pkt, struct encoder_packet_time *pkt_time, void *param)
  499. {
  500. UNUSED_PARAMETER(param);
  501. pthread_once(&bpm_once, bpm_init);
  502. if (!output || !pkt) {
  503. blog(LOG_DEBUG, "%s: Null pointer arguments supplied, returning", __FUNCTION__);
  504. return;
  505. }
  506. /* Insert BPM on video frames and only when a keyframe
  507. * is detected.
  508. */
  509. if (pkt->type == OBS_ENCODER_VIDEO && pkt->keyframe) {
  510. /* Video packet must have pkt_timing supplied for BPM */
  511. if (!pkt_time) {
  512. blog(LOG_DEBUG, "%s: Packet timing missing for track %ld, PTS %" PRId64, __FUNCTION__,
  513. pkt->track_idx, pkt->pts);
  514. return;
  515. }
  516. /* Get the metrics track associated with the output.
  517. * Allocate BPM metrics structures for the output if needed.
  518. */
  519. struct metrics_data *m_track = NULL;
  520. if (!bpm_get_track(output, pkt->track_idx, &m_track)) {
  521. blog(LOG_DEBUG, "%s: BPM metrics track not found!", __FUNCTION__);
  522. return;
  523. }
  524. pthread_mutex_lock(&m_track->metrics_mutex);
  525. /* Update the metrics and generate BPM messages. */
  526. if (!process_metrics(output, pkt, pkt_time, m_track)) {
  527. blog(LOG_DEBUG, "%s: BPM injection processing failed", __FUNCTION__);
  528. }
  529. pthread_mutex_unlock(&m_track->metrics_mutex);
  530. }
  531. }