| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666 | 
							- /*
 
-  * CVideoHandler.cpp, part of VCMI engine
 
-  *
 
-  * Authors: listed in file AUTHORS in main folder
 
-  *
 
-  * License: GNU General Public License v2.0 or later
 
-  * Full text of license available in license.txt file, in main folder
 
-  *
 
-  */
 
- #include "StdInc.h"
 
- #include "CVideoHandler.h"
 
- #ifndef DISABLE_VIDEO
 
- #include "ISoundPlayer.h"
 
- #include "../CGameInfo.h"
 
- #include "../CMT.h"
 
- #include "../eventsSDL/InputHandler.h"
 
- #include "../gui/CGuiHandler.h"
 
- #include "../render/Canvas.h"
 
- #include "../render/IScreenHandler.h"
 
- #include "../renderSDL/SDL_Extensions.h"
 
- #include "../../lib/filesystem/CInputStream.h"
 
- #include "../../lib/filesystem/Filesystem.h"
 
- #include "../../lib/texts/CGeneralTextHandler.h"
 
- #include "../../lib/texts/Languages.h"
 
- #include <SDL_render.h>
 
- extern "C" {
 
- #include <libavformat/avformat.h>
 
- #include <libavcodec/avcodec.h>
 
- #include <libavutil/imgutils.h>
 
- #include <libswscale/swscale.h>
 
- }
 
- // Define a set of functions to read data
 
- static int lodRead(void * opaque, uint8_t * buf, int size)
 
- {
 
- 	auto * data = static_cast<CInputStream *>(opaque);
 
- 	auto bytesRead = data->read(buf, size);
 
- 	if(bytesRead == 0)
 
- 		return AVERROR_EOF;
 
- 	return bytesRead;
 
- }
 
- static si64 lodSeek(void * opaque, si64 pos, int whence)
 
- {
 
- 	auto * data = static_cast<CInputStream *>(opaque);
 
- 	if(whence & AVSEEK_SIZE)
 
- 		return data->getSize();
 
- 	return data->seek(pos);
 
- }
 
- [[noreturn]] static void throwFFmpegError(int errorCode)
 
- {
 
- 	std::array<char, AV_ERROR_MAX_STRING_SIZE> errorMessage{};
 
- 	av_strerror(errorCode, errorMessage.data(), errorMessage.size());
 
- 	throw std::runtime_error(errorMessage.data());
 
- }
 
- static std::unique_ptr<CInputStream> findVideoData(const VideoPath & videoToOpen)
 
- {
 
- 	if(CResourceHandler::get()->existsResource(videoToOpen))
 
- 		return CResourceHandler::get()->load(videoToOpen);
 
- 	auto highQualityVideoToOpenWithDir = videoToOpen.addPrefix("VIDEO/");
 
- 	auto lowQualityVideo = videoToOpen.toType<EResType::VIDEO_LOW_QUALITY>();
 
- 	auto lowQualityVideoWithDir = highQualityVideoToOpenWithDir.toType<EResType::VIDEO_LOW_QUALITY>();
 
- 	if(CResourceHandler::get()->existsResource(highQualityVideoToOpenWithDir))
 
- 		return CResourceHandler::get()->load(highQualityVideoToOpenWithDir);
 
- 	if(CResourceHandler::get()->existsResource(lowQualityVideo))
 
- 		return CResourceHandler::get()->load(lowQualityVideo);
 
- 	if(CResourceHandler::get()->existsResource(lowQualityVideoWithDir))
 
- 		return CResourceHandler::get()->load(lowQualityVideoWithDir);
 
- 	return nullptr;
 
- }
 
- bool FFMpegStream::openInput(const VideoPath & videoToOpen)
 
- {
 
- 	input = findVideoData(videoToOpen);
 
- 	return input != nullptr;
 
- }
 
- void FFMpegStream::openContext()
 
- {
 
- 	static const int BUFFER_SIZE = 4096;
 
- 	input->seek(0);
 
- 	auto * buffer = static_cast<unsigned char *>(av_malloc(BUFFER_SIZE)); // will be freed by ffmpeg
 
- 	context = avio_alloc_context(buffer, BUFFER_SIZE, 0, input.get(), lodRead, nullptr, lodSeek);
 
- 	formatContext = avformat_alloc_context();
 
- 	formatContext->pb = context;
 
- 	// filename is not needed - file was already open and stored in this->data;
 
- 	int avfopen = avformat_open_input(&formatContext, "dummyFilename", nullptr, nullptr);
 
- 	if(avfopen != 0)
 
- 		throwFFmpegError(avfopen);
 
- 	// Retrieve stream information
 
- 	int findStreamInfo = avformat_find_stream_info(formatContext, nullptr);
 
- 	if(avfopen < 0)
 
- 		throwFFmpegError(findStreamInfo);
 
- }
 
- void FFMpegStream::openCodec(int desiredStreamIndex)
 
- {
 
- 	streamIndex = desiredStreamIndex;
 
- 	// Find the decoder for the stream
 
- 	codec = avcodec_find_decoder(formatContext->streams[streamIndex]->codecpar->codec_id);
 
- 	if(codec == nullptr)
 
- 		throw std::runtime_error("Unsupported codec");
 
- 	codecContext = avcodec_alloc_context3(codec);
 
- 	if(codecContext == nullptr)
 
- 		throw std::runtime_error("Failed to create codec context");
 
- 	// Get a pointer to the codec context for the video stream
 
- 	int ret = avcodec_parameters_to_context(codecContext, formatContext->streams[streamIndex]->codecpar);
 
- 	if(ret < 0)
 
- 	{
 
- 		//We cannot get codec from parameters
 
- 		avcodec_free_context(&codecContext);
 
- 		throwFFmpegError(ret);
 
- 	}
 
- 	// Open codec
 
- 	ret = avcodec_open2(codecContext, codec, nullptr);
 
- 	if(ret < 0)
 
- 	{
 
- 		// Could not open codec
 
- 		codec = nullptr;
 
- 		throwFFmpegError(ret);
 
- 	}
 
- 	// Allocate video frame
 
- 	frame = av_frame_alloc();
 
- }
 
- const AVCodecParameters * FFMpegStream::getCodecParameters() const
 
- {
 
- 	return formatContext->streams[streamIndex]->codecpar;
 
- }
 
- const AVCodecContext * FFMpegStream::getCodecContext() const
 
- {
 
- 	return codecContext;
 
- }
 
- const AVFrame * FFMpegStream::getCurrentFrame() const
 
- {
 
- 	return frame;
 
- }
 
- void CVideoInstance::openVideo()
 
- {
 
- 	openContext();
 
- 	openCodec(findVideoStream());
 
- }
 
- void CVideoInstance::prepareOutput(bool scaleToScreenSize, bool useTextureOutput)
 
- {
 
- 	//setup scaling
 
- 	if(scaleToScreenSize)
 
- 	{
 
- 		dimensions.x = screen->w;
 
- 		dimensions.y = screen->h;
 
- 	}
 
- 	else
 
- 	{
 
- 		dimensions = Point(getCodecContext()->width, getCodecContext()->height) * GH.screenHandler().getScalingFactor();
 
- 	}
 
- 	// Allocate a place to put our YUV image on that screen
 
- 	if (useTextureOutput)
 
- 	{
 
- 		std::array potentialFormats = {
 
- 			AV_PIX_FMT_YUV420P, // -> SDL_PIXELFORMAT_IYUV - most of H3 videos use YUV format, so it is preferred to save some space & conversion time
 
- 			AV_PIX_FMT_RGB32,   // -> SDL_PIXELFORMAT_ARGB8888 - some .smk videos actually use palette, so RGB > YUV. This is also our screen texture format
 
- 			AV_PIX_FMT_NONE
 
- 		};
 
- 		auto preferredFormat = avcodec_find_best_pix_fmt_of_list(potentialFormats.data(), getCodecContext()->pix_fmt, false, nullptr);
 
- 		if (preferredFormat == AV_PIX_FMT_YUV420P)
 
- 			textureYUV = SDL_CreateTexture( mainRenderer, SDL_PIXELFORMAT_IYUV, SDL_TEXTUREACCESS_STREAMING, dimensions.x, dimensions.y);
 
- 		else
 
- 			textureRGB = SDL_CreateTexture( mainRenderer, SDL_PIXELFORMAT_ARGB8888, SDL_TEXTUREACCESS_STREAMING, dimensions.x, dimensions.y);
 
- 		sws = sws_getContext(getCodecContext()->width, getCodecContext()->height, getCodecContext()->pix_fmt,
 
- 							dimensions.x, dimensions.y, preferredFormat,
 
- 							 SWS_BICUBIC, nullptr, nullptr, nullptr);
 
- 	}
 
- 	else
 
- 	{
 
- 		surface = CSDL_Ext::newSurface(dimensions);
 
- 		sws = sws_getContext(getCodecContext()->width, getCodecContext()->height, getCodecContext()->pix_fmt,
 
- 							 dimensions.x, dimensions.y, AV_PIX_FMT_RGB32,
 
- 							 SWS_BICUBIC, nullptr, nullptr, nullptr);
 
- 	}
 
- 	if (sws == nullptr)
 
- 		throw std::runtime_error("Failed to create sws");
 
- }
 
- void FFMpegStream::decodeNextFrame()
 
- {
 
- 	int rc = avcodec_receive_frame(codecContext, frame);
 
- 	// frame extracted - data that was sent to codecContext before was sufficient
 
- 	if (rc == 0)
 
- 		return;
 
- 	// returning AVERROR(EAGAIN) is legal - this indicates that codec requires more data from input stream to decode next frame
 
- 	if(rc != AVERROR(EAGAIN))
 
- 		throwFFmpegError(rc);
 
- 	for(;;)
 
- 	{
 
- 		AVPacket packet;
 
- 		// codecContext does not have enough input data - read next packet from input stream
 
- 		int ret = av_read_frame(formatContext, &packet);
 
- 		if(ret < 0)
 
- 		{
 
- 			if(ret == AVERROR_EOF)
 
- 			{
 
- 				av_packet_unref(&packet);
 
- 				av_frame_free(&frame);
 
- 				frame = nullptr;
 
- 				return;
 
- 			}
 
- 			throwFFmpegError(ret);
 
- 		}
 
- 		// Is this a packet from the stream that needs decoding?
 
- 		if(packet.stream_index == streamIndex)
 
- 		{
 
- 			// Decode read packet
 
- 			// Note: this method may return AVERROR(EAGAIN). However this should never happen with ffmpeg API
 
- 			// since there is guaranteed call to avcodec_receive_frame and ffmpeg API promises that *both* of these methods will never return AVERROR(EAGAIN).
 
- 			int rc = avcodec_send_packet(codecContext, &packet);
 
- 			if(rc < 0)
 
- 				throwFFmpegError(rc);
 
- 			rc = avcodec_receive_frame(codecContext, frame);
 
- 			if(rc == AVERROR(EAGAIN))
 
- 			{
 
- 				// still need more data - read next packet
 
- 				av_packet_unref(&packet);
 
- 				continue;
 
- 			}
 
- 			else if(rc < 0)
 
- 			{
 
- 				throwFFmpegError(rc);
 
- 			}
 
- 			else
 
- 			{
 
- 				// read succesful. Exit the loop
 
- 				av_packet_unref(&packet);
 
- 				return;
 
- 			}
 
- 		}
 
- 		av_packet_unref(&packet);
 
- 	}
 
- }
 
- bool CVideoInstance::loadNextFrame()
 
- {
 
- 	decodeNextFrame();
 
- 	const AVFrame * frame = getCurrentFrame();
 
- 	if(!frame)
 
- 		return false;
 
- 	uint8_t * data[4] = {};
 
- 	int linesize[4] = {};
 
- 	if(textureYUV)
 
- 	{
 
- 		av_image_alloc(data, linesize, dimensions.x, dimensions.y, AV_PIX_FMT_YUV420P, 1);
 
- 		sws_scale(sws, frame->data, frame->linesize, 0, getCodecContext()->height, data, linesize);
 
- 		SDL_UpdateYUVTexture(textureYUV, nullptr, data[0], linesize[0], data[1], linesize[1], data[2], linesize[2]);
 
- 		av_freep(&data[0]);
 
- 	}
 
- 	if(textureRGB)
 
- 	{
 
- 		av_image_alloc(data, linesize, dimensions.x, dimensions.y, AV_PIX_FMT_RGB32, 1);
 
- 		sws_scale(sws, frame->data, frame->linesize, 0, getCodecContext()->height, data, linesize);
 
- 		SDL_UpdateTexture(textureRGB, nullptr, data[0], linesize[0]);
 
- 		av_freep(&data[0]);
 
- 	}
 
- 	if(surface)
 
- 	{
 
- 		// Avoid buffer overflow caused by sws_scale():
 
- 		// http://trac.ffmpeg.org/ticket/9254
 
- 		size_t pic_bytes = surface->pitch * surface->h;
 
- 		size_t ffmped_pad = 1024; /* a few bytes of overflow will go here */
 
- 		void * for_sws = av_malloc(pic_bytes + ffmped_pad);
 
- 		data[0] = (ui8 *)for_sws;
 
- 		linesize[0] = surface->pitch;
 
- 		sws_scale(sws, frame->data, frame->linesize, 0, getCodecContext()->height, data, linesize);
 
- 		memcpy(surface->pixels, for_sws, pic_bytes);
 
- 		av_free(for_sws);
 
- 	}
 
- 	return true;
 
- }
 
- bool CVideoInstance::videoEnded()
 
- {
 
- 	return getCurrentFrame() == nullptr;
 
- }
 
- CVideoInstance::~CVideoInstance()
 
- {
 
- 	sws_freeContext(sws);
 
- 	SDL_DestroyTexture(textureYUV);
 
- 	SDL_DestroyTexture(textureRGB);
 
- 	SDL_FreeSurface(surface);
 
- }
 
- FFMpegStream::~FFMpegStream()
 
- {
 
- 	av_frame_free(&frame);
 
- #if (LIBAVCODEC_VERSION_MAJOR < 61 )
 
- 	// deprecated, apparently no longer necessary - avcodec_free_context should suffice
 
- 	avcodec_close(codecContext);
 
- #endif
 
- 	avcodec_free_context(&codecContext);
 
- 	avformat_close_input(&formatContext);
 
- 	av_free(context);
 
- }
 
- Point CVideoInstance::size()
 
- {
 
- 	if(!getCurrentFrame())
 
- 		throw std::runtime_error("Invalid video frame!");
 
- 	return Point(getCurrentFrame()->width, getCurrentFrame()->height);
 
- }
 
- void CVideoInstance::show(const Point & position, Canvas & canvas)
 
- {
 
- 	if(sws == nullptr)
 
- 		throw std::runtime_error("No video to show!");
 
- 	CSDL_Ext::blitSurface(surface, canvas.getInternalSurface(), position * GH.screenHandler().getScalingFactor());
 
- }
 
- double FFMpegStream::getCurrentFrameEndTime() const
 
- {
 
- #if(LIBAVUTIL_VERSION_MAJOR < 58)
 
- 	auto packet_duration = frame->pkt_duration;
 
- #else
 
- 	auto packet_duration = frame->duration;
 
- #endif
 
- 	return (frame->pts + packet_duration) * av_q2d(formatContext->streams[streamIndex]->time_base);
 
- }
 
- double FFMpegStream::getCurrentFrameDuration() const
 
- {
 
- #if(LIBAVUTIL_VERSION_MAJOR < 58)
 
- 	auto packet_duration = frame->pkt_duration;
 
- #else
 
- 	auto packet_duration = frame->duration;
 
- #endif
 
- 	return packet_duration * av_q2d(formatContext->streams[streamIndex]->time_base);
 
- }
 
- void CVideoInstance::tick(uint32_t msPassed)
 
- {
 
- 	if(sws == nullptr)
 
- 		throw std::runtime_error("No video to show!");
 
- 	if(videoEnded())
 
- 		throw std::runtime_error("Video already ended!");
 
- 	frameTime += msPassed / 1000.0;
 
- 	if(frameTime >= getCurrentFrameEndTime())
 
- 		loadNextFrame();
 
- }
 
- struct FFMpegFormatDescription
 
- {
 
- 	uint8_t sampleSizeBytes;
 
- 	uint8_t wavFormatID;
 
- 	bool isPlanar;
 
- };
 
- static FFMpegFormatDescription getAudioFormatProperties(int audioFormat)
 
- {
 
- 	switch (audioFormat)
 
- 	{
 
- 		case AV_SAMPLE_FMT_U8:   return { 1, 1, false};
 
- 		case AV_SAMPLE_FMT_U8P:  return { 1, 1, true};
 
- 		case AV_SAMPLE_FMT_S16:  return { 2, 1, false};
 
- 		case AV_SAMPLE_FMT_S16P: return { 2, 1, true};
 
- 		case AV_SAMPLE_FMT_S32:  return { 4, 1, false};
 
- 		case AV_SAMPLE_FMT_S32P: return { 4, 1, true};
 
- 		case AV_SAMPLE_FMT_S64:  return { 8, 1, false};
 
- 		case AV_SAMPLE_FMT_S64P: return { 8, 1, true};
 
- 		case AV_SAMPLE_FMT_FLT:  return { 4, 3, false};
 
- 		case AV_SAMPLE_FMT_FLTP: return { 4, 3, true};
 
- 		case AV_SAMPLE_FMT_DBL:  return { 8, 3, false};
 
- 		case AV_SAMPLE_FMT_DBLP: return { 8, 3, true};
 
- 	}
 
- 	throw std::runtime_error("Invalid audio format");
 
- }
 
- int FFMpegStream::findAudioStream() const
 
- {
 
- 	std::vector<int> audioStreamIndices;
 
- 	for(int i = 0; i < formatContext->nb_streams; i++)
 
- 		if(formatContext->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_AUDIO)
 
- 			audioStreamIndices.push_back(i);
 
- 	if (audioStreamIndices.empty())
 
- 		return -1;
 
- 	if (audioStreamIndices.size() == 1)
 
- 		return audioStreamIndices.front();
 
- 	// multiple audio streams - try to pick best one based on language settings
 
- 	std::map<int, std::string> streamToLanguage;
 
- 	// Approach 1 - check if stream has language set in metadata
 
- 	for (auto const & index : audioStreamIndices)
 
- 	{
 
- 		const AVDictionaryEntry *e = av_dict_get(formatContext->streams[index]->metadata, "language", nullptr, 0);
 
- 		if (e)
 
- 			streamToLanguage[index]	= e->value;
 
- 	}
 
- 	// Approach 2 - no metadata found. This may be video from Chronicles which have predefined (presumably hardcoded) list of languages
 
- 	if (streamToLanguage.empty())
 
- 	{
 
- 		if (audioStreamIndices.size() == 2)
 
- 		{
 
- 			streamToLanguage[audioStreamIndices[0]] = Languages::getLanguageOptions(Languages::ELanguages::ENGLISH).tagISO2;
 
- 			streamToLanguage[audioStreamIndices[1]] = Languages::getLanguageOptions(Languages::ELanguages::GERMAN).tagISO2;
 
- 		}
 
- 		if (audioStreamIndices.size() == 5)
 
- 		{
 
- 			streamToLanguage[audioStreamIndices[0]] = Languages::getLanguageOptions(Languages::ELanguages::ENGLISH).tagISO2;
 
- 			streamToLanguage[audioStreamIndices[1]] = Languages::getLanguageOptions(Languages::ELanguages::FRENCH).tagISO2;
 
- 			streamToLanguage[audioStreamIndices[2]] = Languages::getLanguageOptions(Languages::ELanguages::GERMAN).tagISO2;
 
- 			streamToLanguage[audioStreamIndices[3]] = Languages::getLanguageOptions(Languages::ELanguages::ITALIAN).tagISO2;
 
- 			streamToLanguage[audioStreamIndices[4]] = Languages::getLanguageOptions(Languages::ELanguages::SPANISH).tagISO2;
 
- 		}
 
- 	}
 
- 	std::string preferredLanguageName = CGI->generaltexth->getPreferredLanguage();
 
- 	std::string preferredTag = Languages::getLanguageOptions(preferredLanguageName).tagISO2;
 
- 	for (auto const & entry : streamToLanguage)
 
- 		if (entry.second == preferredTag)
 
- 			return entry.first;
 
- 	return audioStreamIndices.front();
 
- }
 
- int FFMpegStream::findVideoStream() const
 
- {
 
- 	for(int i = 0; i < formatContext->nb_streams; i++)
 
- 		if(formatContext->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)
 
- 			return i;
 
- 	return -1;
 
- }
 
- std::pair<std::unique_ptr<ui8 []>, si64> CAudioInstance::extractAudio(const VideoPath & videoToOpen)
 
- {
 
- 	if (!openInput(videoToOpen))
 
- 		return { nullptr, 0};
 
- 	openContext();
 
- 	int audioStreamIndex = findAudioStream();
 
- 	if (audioStreamIndex == -1)
 
- 		return { nullptr, 0};
 
- 	openCodec(audioStreamIndex);
 
- 	const auto * codecpar = getCodecParameters();
 
- 	std::vector<ui8> samples;
 
- 	auto formatProperties = getAudioFormatProperties(codecpar->format);
 
- #if(LIBAVUTIL_VERSION_MAJOR < 58)
 
- 	int numChannels = codecpar->channels;
 
- #else
 
- 	int numChannels = codecpar->ch_layout.nb_channels;
 
- #endif
 
- 	samples.reserve(44100 * 5); // arbitrary 5-second buffer
 
- 	for (;;)
 
- 	{
 
- 		decodeNextFrame();
 
- 		const AVFrame * frame = getCurrentFrame();
 
- 		if (!frame)
 
- 			break;
 
- 		int samplesToRead = frame->nb_samples * numChannels;
 
- 		int bytesToRead = samplesToRead * formatProperties.sampleSizeBytes;
 
- 		if (formatProperties.isPlanar && numChannels > 1)
 
- 		{
 
- 			// Workaround for lack of resampler
 
- 			// Currently, ffmpeg on conan systems is built without sws resampler
 
- 			// Because of that, and because wav format does not supports 'planar' formats from ffmpeg
 
- 			// we need to de-planarize it and convert to "normal" (non-planar / interleaved) stream
 
- 			samples.reserve(samples.size() + bytesToRead);
 
- 			for (int sm = 0; sm < frame->nb_samples; ++sm)
 
- 				for (int ch = 0; ch < numChannels; ++ch)
 
- 					samples.insert(samples.end(), frame->data[ch] + sm * formatProperties.sampleSizeBytes, frame->data[ch] + (sm+1) * formatProperties.sampleSizeBytes );
 
- 		}
 
- 		else
 
- 		{
 
- 			samples.insert(samples.end(), frame->data[0], frame->data[0] + bytesToRead);
 
- 		}
 
- 	}
 
- 	struct WavHeader {
 
- 		ui8 RIFF[4] = {'R', 'I', 'F', 'F'};
 
- 		ui32 ChunkSize;
 
- 		ui8 WAVE[4] = {'W', 'A', 'V', 'E'};
 
- 		ui8 fmt[4] = {'f', 'm', 't', ' '};
 
- 		ui32 Subchunk1Size = 16;
 
- 		ui16 AudioFormat = 1;
 
- 		ui16 NumOfChan = 2;
 
- 		ui32 SamplesPerSec = 22050;
 
- 		ui32 bytesPerSec = 22050 * 2;
 
- 		ui16 blockAlign = 1;
 
- 		ui16 bitsPerSample = 32;
 
- 		ui8 Subchunk2ID[4] = {'d', 'a', 't', 'a'};
 
- 		ui32 Subchunk2Size;
 
- 	};
 
- 	WavHeader wav;
 
- 	wav.ChunkSize = samples.size() + sizeof(WavHeader) - 8;
 
- 	wav.AudioFormat = formatProperties.wavFormatID; // 1 = PCM, 3 = IEEE float
 
- 	wav.NumOfChan = numChannels;
 
- 	wav.SamplesPerSec = codecpar->sample_rate;
 
- 	wav.bytesPerSec = codecpar->sample_rate * formatProperties.sampleSizeBytes;
 
- 	wav.bitsPerSample = formatProperties.sampleSizeBytes * 8;
 
- 	wav.Subchunk2Size = samples.size() + sizeof(WavHeader) - 44;
 
- 	auto * wavPtr = reinterpret_cast<ui8*>(&wav);
 
- 	auto dat = std::make_pair(std::make_unique<ui8[]>(samples.size() + sizeof(WavHeader)), samples.size() + sizeof(WavHeader));
 
- 	std::copy(wavPtr, wavPtr + sizeof(WavHeader), dat.first.get());
 
- 	std::copy(samples.begin(), samples.end(), dat.first.get() + sizeof(WavHeader));
 
- 	return dat;
 
- }
 
- bool CVideoPlayer::openAndPlayVideoImpl(const VideoPath & name, const Point & position, bool useOverlay, bool scale, bool stopOnKey)
 
- {
 
- 	CVideoInstance instance;
 
- 	CAudioInstance audio;
 
- 	auto extractedAudio = audio.extractAudio(name);
 
- 	int audioHandle = CCS->soundh->playSound(extractedAudio);
 
- 	if (!instance.openInput(name))
 
- 		return true;
 
- 	instance.openVideo();
 
- 	instance.prepareOutput(scale, true);
 
- 	auto lastTimePoint = boost::chrono::steady_clock::now();
 
- 	while(instance.loadNextFrame())
 
- 	{
 
- 		if(stopOnKey)
 
- 		{
 
- 			GH.input().fetchEvents();
 
- 			if(GH.input().ignoreEventsUntilInput())
 
- 			{
 
- 				CCS->soundh->stopSound(audioHandle);
 
- 				return false;
 
- 			}
 
- 		}
 
- 		SDL_Rect rect;
 
- 		rect.x = position.x;
 
- 		rect.y = position.y;
 
- 		rect.w = instance.dimensions.x;
 
- 		rect.h = instance.dimensions.y;
 
- 		SDL_RenderFillRect(mainRenderer, &rect);
 
- 		if(instance.textureYUV)
 
- 			SDL_RenderCopy(mainRenderer, instance.textureYUV, nullptr, &rect);
 
- 		else
 
- 			SDL_RenderCopy(mainRenderer, instance.textureRGB, nullptr, &rect);
 
- 		SDL_RenderPresent(mainRenderer);
 
- 		// Framerate delay
 
- 		double targetFrameTimeSeconds = instance.getCurrentFrameDuration();
 
- 		auto targetFrameTime = boost::chrono::milliseconds(static_cast<int>(1000 * targetFrameTimeSeconds));
 
- 		auto timePointAfterPresent = boost::chrono::steady_clock::now();
 
- 		auto timeSpentBusy = boost::chrono::duration_cast<boost::chrono::milliseconds>(timePointAfterPresent - lastTimePoint);
 
- 		if(targetFrameTime > timeSpentBusy)
 
- 			boost::this_thread::sleep_for(targetFrameTime - timeSpentBusy);
 
- 		lastTimePoint = boost::chrono::steady_clock::now();
 
- 	}
 
- 	return true;
 
- }
 
- bool CVideoPlayer::playIntroVideo(const VideoPath & name)
 
- {
 
- 	return openAndPlayVideoImpl(name, Point(0, 0), true, true, true);
 
- }
 
- void CVideoPlayer::playSpellbookAnimation(const VideoPath & name, const Point & position)
 
- {
 
- 	openAndPlayVideoImpl(name, position * GH.screenHandler().getScalingFactor(), false, false, false);
 
- }
 
- std::unique_ptr<IVideoInstance> CVideoPlayer::open(const VideoPath & name, bool scaleToScreen)
 
- {
 
- 	auto result = std::make_unique<CVideoInstance>();
 
- 	if (!result->openInput(name))
 
- 		return nullptr;
 
- 	result->openVideo();
 
- 	result->prepareOutput(scaleToScreen, false);
 
- 	result->loadNextFrame(); // prepare 1st frame
 
- 	return result;
 
- }
 
- std::pair<std::unique_ptr<ui8[]>, si64> CVideoPlayer::getAudio(const VideoPath & videoToOpen)
 
- {
 
- 	CAudioInstance audio;
 
- 	return audio.extractAudio(videoToOpen);
 
- }
 
- #endif
 
 
  |