ソースを参照

(API Change) Rename 'source_frame' + related

For the sake of naming consistency with the rest of obs.h, prefix this
structure and associated functions with obs_.

Renamed structures:
- struct source_frame (now obs_source_frame)

Renamed functions:
- source_frame_init (now obs_source_frame_init)
- source_frame_free (now obs_source_frame_free)
- source_frame_create (now obs_source_frame_create)
- source_frame_destroy (now obs_source_frame_destroy)

Affected functions:
- obs_source_output_video
- obs_source_get_frame
- obs_source_release_frame
jp9000 11 年 前
コミット
4122a5b9b5

+ 2 - 2
libobs/obs-internal.h

@@ -139,7 +139,7 @@ struct obs_core_video {
 	bool                            textures_output[NUM_TEXTURES];
 	bool                            textures_copied[NUM_TEXTURES];
 	bool                            textures_converted[NUM_TEXTURES];
-	struct source_frame             convert_frames[NUM_TEXTURES];
+	struct obs_source_frame         convert_frames[NUM_TEXTURES];
 	effect_t                        default_effect;
 	effect_t                        solid_effect;
 	effect_t                        conversion_effect;
@@ -350,7 +350,7 @@ struct obs_source {
 	float                           async_color_range_max[3];
 	int                             async_plane_offset[2];
 	bool                            async_flip;
-	DARRAY(struct source_frame*)    video_frames;
+	DARRAY(struct obs_source_frame*)video_frames;
 	pthread_mutex_t                 video_mutex;
 	uint32_t                        async_width;
 	uint32_t                        async_height;

+ 38 - 34
libobs/obs-source.c

@@ -194,8 +194,8 @@ fail:
 	return NULL;
 }
 
-void source_frame_init(struct source_frame *frame, enum video_format format,
-		uint32_t width, uint32_t height)
+void obs_source_frame_init(struct obs_source_frame *frame,
+		enum video_format format, uint32_t width, uint32_t height)
 {
 	struct video_frame vid_frame;
 
@@ -238,7 +238,7 @@ void obs_source_destroy(struct obs_source *source)
 		obs_source_release(source->filters.array[i]);
 
 	for (i = 0; i < source->video_frames.num; i++)
-		source_frame_destroy(source->video_frames.array[i]);
+		obs_source_frame_destroy(source->video_frames.array[i]);
 
 	gs_entercontext(obs->video.graphics);
 	texrender_destroy(source->async_convert_texrender);
@@ -699,7 +699,7 @@ static inline enum convert_type get_convert_type(enum video_format format)
 }
 
 static inline bool set_packed422_sizes(struct obs_source *source,
-		struct source_frame *frame)
+		struct obs_source_frame *frame)
 {
 	source->async_convert_height = frame->height;
 	source->async_convert_width  = frame->width / 2;
@@ -708,7 +708,7 @@ static inline bool set_packed422_sizes(struct obs_source *source,
 }
 
 static inline bool set_planar420_sizes(struct obs_source *source,
-		struct source_frame *frame)
+		struct obs_source_frame *frame)
 {
 	uint32_t size = frame->width * frame->height;
 	size += size/2;
@@ -723,7 +723,7 @@ static inline bool set_planar420_sizes(struct obs_source *source,
 }
 
 static inline bool init_gpu_conversion(struct obs_source *source,
-		struct source_frame *frame)
+		struct obs_source_frame *frame)
 {
 	switch (get_convert_type(frame->format)) {
 		case CONVERT_422_Y:
@@ -758,7 +758,7 @@ static inline enum gs_color_format convert_video_format(
 }
 
 static inline bool set_async_texture_size(struct obs_source *source,
-		struct source_frame *frame)
+		struct obs_source_frame *frame)
 {
 	enum convert_type prev, cur;
 	prev = get_convert_type(source->async_format);
@@ -804,7 +804,8 @@ static inline bool set_async_texture_size(struct obs_source *source,
 	return true;
 }
 
-static void upload_raw_frame(texture_t tex, const struct source_frame *frame)
+static void upload_raw_frame(texture_t tex,
+		const struct obs_source_frame *frame)
 {
 	switch (get_convert_type(frame->format)) {
 		case CONVERT_422_U:
@@ -864,7 +865,7 @@ static inline void set_eparam(effect_t effect, const char *name, float val)
 }
 
 static bool update_async_texrender(struct obs_source *source,
-		const struct source_frame *frame)
+		const struct obs_source_frame *frame)
 {
 	texture_t   tex       = source->async_texture;
 	texrender_t texrender = source->async_convert_texrender;
@@ -922,7 +923,7 @@ static bool update_async_texrender(struct obs_source *source,
 }
 
 static bool update_async_texture(struct obs_source *source,
-		const struct source_frame *frame)
+		const struct obs_source_frame *frame)
 {
 	texture_t         tex       = source->async_texture;
 	texrender_t       texrender = source->async_convert_texrender;
@@ -1036,7 +1037,7 @@ static void obs_source_draw_async_texture(struct obs_source *source)
 
 static void obs_source_render_async_video(obs_source_t source)
 {
-	struct source_frame *frame = obs_source_get_frame(source);
+	struct obs_source_frame *frame = obs_source_get_frame(source);
 	if (frame) {
 		if (!set_async_texture_size(source, frame))
 			return;
@@ -1237,8 +1238,8 @@ obs_data_t obs_source_getsettings(obs_source_t source)
 	return source->context.settings;
 }
 
-static inline struct source_frame *filter_async_video(obs_source_t source,
-		struct source_frame *in)
+static inline struct obs_source_frame *filter_async_video(obs_source_t source,
+		struct obs_source_frame *in)
 {
 	size_t i;
 	for (i = source->filters.num; i > 0; i--) {
@@ -1255,8 +1256,8 @@ static inline struct source_frame *filter_async_video(obs_source_t source,
 	return in;
 }
 
-static inline void copy_frame_data_line(struct source_frame *dst,
-		const struct source_frame *src, uint32_t plane, uint32_t y)
+static inline void copy_frame_data_line(struct obs_source_frame *dst,
+		const struct obs_source_frame *src, uint32_t plane, uint32_t y)
 {
 	uint32_t pos_src = y * src->linesize[plane];
 	uint32_t pos_dst = y * dst->linesize[plane];
@@ -1266,8 +1267,9 @@ static inline void copy_frame_data_line(struct source_frame *dst,
 	memcpy(dst->data[plane] + pos_dst, src->data[plane] + pos_src, bytes);
 }
 
-static inline void copy_frame_data_plane(struct source_frame *dst,
-		const struct source_frame *src, uint32_t plane, uint32_t lines)
+static inline void copy_frame_data_plane(struct obs_source_frame *dst,
+		const struct obs_source_frame *src,
+		uint32_t plane, uint32_t lines)
 {
 	if (dst->linesize[plane] != src->linesize[plane])
 		for (uint32_t y = 0; y < lines; y++)
@@ -1277,8 +1279,8 @@ static inline void copy_frame_data_plane(struct source_frame *dst,
 				dst->linesize[plane] * lines);
 }
 
-static void copy_frame_data(struct source_frame *dst,
-		const struct source_frame *src)
+static void copy_frame_data(struct obs_source_frame *dst,
+		const struct obs_source_frame *src)
 {
 	dst->flip         = src->flip;
 	dst->full_range   = src->full_range;
@@ -1313,11 +1315,12 @@ static void copy_frame_data(struct source_frame *dst,
 	}
 }
 
-static inline struct source_frame *cache_video(const struct source_frame *frame)
+static inline struct obs_source_frame *cache_video(
+		const struct obs_source_frame *frame)
 {
 	/* TODO: use an actual cache */
-	struct source_frame *new_frame = source_frame_create(frame->format,
-			frame->width, frame->height);
+	struct obs_source_frame *new_frame = obs_source_frame_create(
+			frame->format, frame->width, frame->height);
 
 	copy_frame_data(new_frame, frame);
 	return new_frame;
@@ -1332,12 +1335,12 @@ static inline void cycle_frames(struct obs_source *source)
 }
 
 void obs_source_output_video(obs_source_t source,
-		const struct source_frame *frame)
+		const struct obs_source_frame *frame)
 {
 	if (!source || !frame)
 		return;
 
-	struct source_frame *output = cache_video(frame);
+	struct obs_source_frame *output = cache_video(frame);
 
 	pthread_mutex_lock(&source->filter_mutex);
 	output = filter_async_video(source, output);
@@ -1502,8 +1505,8 @@ static inline bool frame_out_of_bounds(obs_source_t source, uint64_t ts)
 
 static bool ready_async_frame(obs_source_t source, uint64_t sys_time)
 {
-	struct source_frame *next_frame = source->video_frames.array[0];
-	struct source_frame *frame      = NULL;
+	struct obs_source_frame *next_frame = source->video_frames.array[0];
+	struct obs_source_frame *frame      = NULL;
 	uint64_t sys_offset = sys_time - source->last_sys_timestamp;
 	uint64_t frame_time = next_frame->timestamp;
 	uint64_t frame_offset = 0;
@@ -1518,7 +1521,7 @@ static bool ready_async_frame(obs_source_t source, uint64_t sys_time)
 	}
 
 	while (frame_offset <= sys_offset) {
-		source_frame_destroy(frame);
+		obs_source_frame_destroy(frame);
 
 		if (source->video_frames.num == 1)
 			return true;
@@ -1538,16 +1541,16 @@ static bool ready_async_frame(obs_source_t source, uint64_t sys_time)
 		frame_offset = frame_time - source->last_frame_ts;
 	}
 
-	source_frame_destroy(frame);
+	obs_source_frame_destroy(frame);
 
 	return frame != NULL;
 }
 
-static inline struct source_frame *get_closest_frame(obs_source_t source,
+static inline struct obs_source_frame *get_closest_frame(obs_source_t source,
 		uint64_t sys_time)
 {
 	if (ready_async_frame(source, sys_time)) {
-		struct source_frame *frame = source->video_frames.array[0];
+		struct obs_source_frame *frame = source->video_frames.array[0];
 		da_erase(source->video_frames, 0);
 		return frame;
 	}
@@ -1561,9 +1564,9 @@ static inline struct source_frame *get_closest_frame(obs_source_t source,
  * the frame with the closest timing to ensure sync.  Also ensures that timing
  * with audio is synchronized.
  */
-struct source_frame *obs_source_get_frame(obs_source_t source)
+struct obs_source_frame *obs_source_get_frame(obs_source_t source)
 {
-	struct source_frame *frame = NULL;
+	struct obs_source_frame *frame = NULL;
 	uint64_t sys_time;
 
 	if (!source)
@@ -1602,10 +1605,11 @@ unlock:
 	return frame;
 }
 
-void obs_source_release_frame(obs_source_t source, struct source_frame *frame)
+void obs_source_release_frame(obs_source_t source,
+		struct obs_source_frame *frame)
 {
 	if (source && frame) {
-		source_frame_destroy(frame);
+		obs_source_frame_destroy(frame);
 		obs_source_release(source);
 	}
 }

+ 2 - 2
libobs/obs-source.h

@@ -234,8 +234,8 @@ struct obs_source_info {
 	 * @return        New video frame data.  This can defer video data to
 	 *                be drawn later if time is needed for processing
 	 */
-	struct source_frame *(*filter_video)(void *data,
-			const struct source_frame *frame);
+	struct obs_source_frame *(*filter_video)(void *data,
+			const struct obs_source_frame *frame);
 
 	/**
 	 * Called to filter raw audio data.

+ 4 - 2
libobs/obs-video.c

@@ -305,7 +305,8 @@ static inline uint32_t make_aligned_linesize_offset(uint32_t offset,
 static void fix_gpu_converted_alignment(struct obs_core_video *video,
 		struct video_data *frame, int cur_texture)
 {
-	struct source_frame *new_frame = &video->convert_frames[cur_texture];
+	struct obs_source_frame *new_frame =
+		&video->convert_frames[cur_texture];
 	uint32_t src_linesize = frame->linesize[0];
 	uint32_t dst_linesize = video->output_width * 4;
 	uint32_t src_pos      = 0;
@@ -353,7 +354,8 @@ static bool convert_frame(struct obs_core_video *video,
 		struct video_data *frame,
 		const struct video_output_info *info, int cur_texture)
 {
-	struct source_frame *new_frame = &video->convert_frames[cur_texture];
+	struct obs_source_frame *new_frame =
+		&video->convert_frames[cur_texture];
 
 	if (info->format == VIDEO_FORMAT_I420) {
 		compress_uyvx_to_i420(

+ 3 - 3
libobs/obs.c

@@ -187,9 +187,9 @@ static bool obs_init_textures(struct obs_video_info *ovi)
 			return false;
 
 		if (yuv)
-			source_frame_init(&video->convert_frames[i],
+			obs_source_frame_init(&video->convert_frames[i],
 					ovi->output_format,
-					ovi->output_width, ovi->output_height);
+					ovi->output_width,ovi->output_height);
 	}
 
 	return true;
@@ -349,7 +349,7 @@ static void obs_free_video(void)
 			texture_destroy(video->render_textures[i]);
 			texture_destroy(video->convert_textures[i]);
 			texture_destroy(video->output_textures[i]);
-			source_frame_free(&video->convert_frames[i]);
+			obs_source_frame_free(&video->convert_frames[i]);
 
 			video->copy_surfaces[i]    = NULL;
 			video->render_textures[i]  = NULL;

+ 12 - 12
libobs/obs.h

@@ -182,7 +182,7 @@ struct source_audio {
  * If a YUV format is specified, it will be automatically upsampled and
  * converted to RGB via shader on the graphics processor.
  */
-struct source_frame {
+struct obs_source_frame {
 	uint8_t             *data[MAX_AV_PLANES];
 	uint32_t            linesize[MAX_AV_PLANES];
 	uint32_t            width;
@@ -708,18 +708,18 @@ EXPORT void obs_source_load(obs_source_t source);
 
 /** Outputs asynchronous video data */
 EXPORT void obs_source_output_video(obs_source_t source,
-		const struct source_frame *frame);
+		const struct obs_source_frame *frame);
 
 /** Outputs audio data (always asynchronous) */
 EXPORT void obs_source_output_audio(obs_source_t source,
 		const struct source_audio *audio);
 
 /** Gets the current async video frame */
-EXPORT struct source_frame *obs_source_get_frame(obs_source_t source);
+EXPORT struct obs_source_frame *obs_source_get_frame(obs_source_t source);
 
 /** Releases the current async video frame */
 EXPORT void obs_source_release_frame(obs_source_t source,
-		struct source_frame *frame);
+		struct obs_source_frame *frame);
 
 /** Default RGB filter handler for generic effect filters */
 EXPORT void obs_source_process_filter(obs_source_t filter, effect_t effect,
@@ -1127,28 +1127,28 @@ EXPORT const char *obs_service_get_password(obs_service_t service);
 
 /* ------------------------------------------------------------------------- */
 /* Source frame allocation functions */
-EXPORT void source_frame_init(struct source_frame *frame,
+EXPORT void obs_source_frame_init(struct obs_source_frame *frame,
 		enum video_format format, uint32_t width, uint32_t height);
 
-static inline void source_frame_free(struct source_frame *frame)
+static inline void obs_source_frame_free(struct obs_source_frame *frame)
 {
 	if (frame) {
 		bfree(frame->data[0]);
-		memset(frame, 0, sizeof(struct source_frame));
+		memset(frame, 0, sizeof(*frame));
 	}
 }
 
-static inline struct source_frame *source_frame_create(
+static inline struct obs_source_frame *obs_source_frame_create(
 		enum video_format format, uint32_t width, uint32_t height)
 {
-	struct source_frame *frame;
+	struct obs_source_frame *frame;
 
-	frame = (struct source_frame*)bzalloc(sizeof(struct source_frame));
-	source_frame_init(frame, format, width, height);
+	frame = (struct obs_source_frame*)bzalloc(sizeof(*frame));
+	obs_source_frame_init(frame, format, width, height);
 	return frame;
 }
 
-static inline void source_frame_destroy(struct source_frame *frame)
+static inline void obs_source_frame_destroy(struct obs_source_frame *frame)
 {
 	if (frame) {
 		bfree(frame->data[0]);

+ 5 - 5
plugins/linux-v4l2/v4l2-input.c

@@ -276,17 +276,17 @@ static void v4l2_destroy_mmap(struct v4l2_data *data)
  * Prepare the output frame structure for obs and compute plane offsets
  *
  * Basically all data apart from memory pointers and the timestamp is known
- * before the capture starts. This function prepares the source_frame struct
- * with all the data that is already known.
+ * before the capture starts. This function prepares the obs_source_frame
+ * struct with all the data that is already known.
  *
  * v4l2 uses a continuous memory segment for all planes so we simply compute
  * offsets to add to the start address in order to give obs the correct data
  * pointers for the individual planes.
  */
 static void v4l2_prep_obs_frame(struct v4l2_data *data,
-	struct source_frame *frame, size_t *plane_offsets)
+	struct obs_source_frame *frame, size_t *plane_offsets)
 {
-	memset(frame, 0, sizeof(struct source_frame));
+	memset(frame, 0, sizeof(struct obs_source_frame));
 	memset(plane_offsets, 0, sizeof(size_t) * MAX_AV_PLANES);
 
 	frame->width = data->width;
@@ -333,7 +333,7 @@ static void *v4l2_thread(void *vptr)
 	uint8_t *start;
 	struct timeval tv;
 	struct v4l2_buffer buf;
-	struct source_frame out;
+	struct obs_source_frame out;
 	size_t plane_offsets[MAX_AV_PLANES];
 
 	if (v4l2_start_capture(data) < 0)

+ 4 - 4
plugins/mac-avcapture/av-capture.m

@@ -71,7 +71,7 @@ struct av_capture {
 
 	obs_source_t source;
 
-	struct source_frame frame;
+	struct obs_source_frame frame;
 };
 
 static inline enum video_format format_from_subtype(FourCharCode subtype)
@@ -124,7 +124,7 @@ static inline enum video_colorspace get_colorspace(CMFormatDescriptionRef desc)
 }
 
 static inline bool update_colorspace(struct av_capture *capture,
-		struct source_frame *frame, CMFormatDescriptionRef desc,
+		struct obs_source_frame *frame, CMFormatDescriptionRef desc,
 		bool full_range)
 {
 	enum video_colorspace colorspace = get_colorspace(desc);
@@ -151,7 +151,7 @@ static inline bool update_colorspace(struct av_capture *capture,
 }
 
 static inline bool update_frame(struct av_capture *capture,
-		struct source_frame *frame, CMSampleBufferRef sample_buffer)
+		struct obs_source_frame *frame, CMSampleBufferRef sample_buffer)
 {
 	CMFormatDescriptionRef desc =
 		CMSampleBufferGetFormatDescription(sample_buffer);
@@ -225,7 +225,7 @@ static inline bool update_frame(struct av_capture *capture,
 	if (count < 1 || !capture)
 		return;
 
-	struct source_frame *frame = &capture->frame;
+	struct obs_source_frame *frame = &capture->frame;
 
 	CMTime target_pts =
 		CMSampleBufferGetOutputPresentationTimeStamp(sampleBuffer);

+ 1 - 1
plugins/win-dshow/win-dshow.cpp

@@ -60,7 +60,7 @@ struct DShowInput {
 	VideoConfig  videoConfig;
 	AudioConfig  audioConfig;
 
-	source_frame frame;
+	obs_source_frame frame;
 
 	inline DShowInput(obs_source_t source_)
 		: source         (source_),

+ 1 - 1
test/test-input/test-random.c

@@ -53,7 +53,7 @@ static void *video_thread(void *data)
 	uint32_t            pixels[20*20];
 	uint64_t            cur_time = os_gettime_ns();
 
-	struct source_frame frame = {
+	struct obs_source_frame frame = {
 		.data     = {[0] = (uint8_t*)pixels},
 		.linesize = {[0] = 20*4},
 		.width    = 20,