CVideoHandler.cpp 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666
  1. /*
  2. * CVideoHandler.cpp, part of VCMI engine
  3. *
  4. * Authors: listed in file AUTHORS in main folder
  5. *
  6. * License: GNU General Public License v2.0 or later
  7. * Full text of license available in license.txt file, in main folder
  8. *
  9. */
  10. #include "StdInc.h"
  11. #include "CVideoHandler.h"
  12. #ifndef DISABLE_VIDEO
  13. #include "ISoundPlayer.h"
  14. #include "../CGameInfo.h"
  15. #include "../CMT.h"
  16. #include "../eventsSDL/InputHandler.h"
  17. #include "../gui/CGuiHandler.h"
  18. #include "../render/Canvas.h"
  19. #include "../render/IScreenHandler.h"
  20. #include "../renderSDL/SDL_Extensions.h"
  21. #include "../../lib/filesystem/CInputStream.h"
  22. #include "../../lib/filesystem/Filesystem.h"
  23. #include "../../lib/texts/CGeneralTextHandler.h"
  24. #include "../../lib/texts/Languages.h"
  25. #include <SDL_render.h>
  26. extern "C" {
  27. #include <libavformat/avformat.h>
  28. #include <libavcodec/avcodec.h>
  29. #include <libavutil/imgutils.h>
  30. #include <libswscale/swscale.h>
  31. }
  32. // Define a set of functions to read data
  33. static int lodRead(void * opaque, uint8_t * buf, int size)
  34. {
  35. auto * data = static_cast<CInputStream *>(opaque);
  36. auto bytesRead = data->read(buf, size);
  37. if(bytesRead == 0)
  38. return AVERROR_EOF;
  39. return bytesRead;
  40. }
  41. static si64 lodSeek(void * opaque, si64 pos, int whence)
  42. {
  43. auto * data = static_cast<CInputStream *>(opaque);
  44. if(whence & AVSEEK_SIZE)
  45. return data->getSize();
  46. return data->seek(pos);
  47. }
  48. [[noreturn]] static void throwFFmpegError(int errorCode)
  49. {
  50. std::array<char, AV_ERROR_MAX_STRING_SIZE> errorMessage{};
  51. av_strerror(errorCode, errorMessage.data(), errorMessage.size());
  52. throw std::runtime_error(errorMessage.data());
  53. }
  54. static std::unique_ptr<CInputStream> findVideoData(const VideoPath & videoToOpen)
  55. {
  56. if(CResourceHandler::get()->existsResource(videoToOpen))
  57. return CResourceHandler::get()->load(videoToOpen);
  58. auto highQualityVideoToOpenWithDir = videoToOpen.addPrefix("VIDEO/");
  59. auto lowQualityVideo = videoToOpen.toType<EResType::VIDEO_LOW_QUALITY>();
  60. auto lowQualityVideoWithDir = highQualityVideoToOpenWithDir.toType<EResType::VIDEO_LOW_QUALITY>();
  61. if(CResourceHandler::get()->existsResource(highQualityVideoToOpenWithDir))
  62. return CResourceHandler::get()->load(highQualityVideoToOpenWithDir);
  63. if(CResourceHandler::get()->existsResource(lowQualityVideo))
  64. return CResourceHandler::get()->load(lowQualityVideo);
  65. if(CResourceHandler::get()->existsResource(lowQualityVideoWithDir))
  66. return CResourceHandler::get()->load(lowQualityVideoWithDir);
  67. return nullptr;
  68. }
  69. bool FFMpegStream::openInput(const VideoPath & videoToOpen)
  70. {
  71. input = findVideoData(videoToOpen);
  72. return input != nullptr;
  73. }
  74. void FFMpegStream::openContext()
  75. {
  76. static const int BUFFER_SIZE = 4096;
  77. input->seek(0);
  78. auto * buffer = static_cast<unsigned char *>(av_malloc(BUFFER_SIZE)); // will be freed by ffmpeg
  79. context = avio_alloc_context(buffer, BUFFER_SIZE, 0, input.get(), lodRead, nullptr, lodSeek);
  80. formatContext = avformat_alloc_context();
  81. formatContext->pb = context;
  82. // filename is not needed - file was already open and stored in this->data;
  83. int avfopen = avformat_open_input(&formatContext, "dummyFilename", nullptr, nullptr);
  84. if(avfopen != 0)
  85. throwFFmpegError(avfopen);
  86. // Retrieve stream information
  87. int findStreamInfo = avformat_find_stream_info(formatContext, nullptr);
  88. if(avfopen < 0)
  89. throwFFmpegError(findStreamInfo);
  90. }
  91. void FFMpegStream::openCodec(int desiredStreamIndex)
  92. {
  93. streamIndex = desiredStreamIndex;
  94. // Find the decoder for the stream
  95. codec = avcodec_find_decoder(formatContext->streams[streamIndex]->codecpar->codec_id);
  96. if(codec == nullptr)
  97. throw std::runtime_error("Unsupported codec");
  98. codecContext = avcodec_alloc_context3(codec);
  99. if(codecContext == nullptr)
  100. throw std::runtime_error("Failed to create codec context");
  101. // Get a pointer to the codec context for the video stream
  102. int ret = avcodec_parameters_to_context(codecContext, formatContext->streams[streamIndex]->codecpar);
  103. if(ret < 0)
  104. {
  105. //We cannot get codec from parameters
  106. avcodec_free_context(&codecContext);
  107. throwFFmpegError(ret);
  108. }
  109. // Open codec
  110. ret = avcodec_open2(codecContext, codec, nullptr);
  111. if(ret < 0)
  112. {
  113. // Could not open codec
  114. codec = nullptr;
  115. throwFFmpegError(ret);
  116. }
  117. // Allocate video frame
  118. frame = av_frame_alloc();
  119. }
  120. const AVCodecParameters * FFMpegStream::getCodecParameters() const
  121. {
  122. return formatContext->streams[streamIndex]->codecpar;
  123. }
  124. const AVCodecContext * FFMpegStream::getCodecContext() const
  125. {
  126. return codecContext;
  127. }
  128. const AVFrame * FFMpegStream::getCurrentFrame() const
  129. {
  130. return frame;
  131. }
  132. void CVideoInstance::openVideo()
  133. {
  134. openContext();
  135. openCodec(findVideoStream());
  136. }
  137. void CVideoInstance::prepareOutput(bool scaleToScreenSize, bool useTextureOutput)
  138. {
  139. //setup scaling
  140. if(scaleToScreenSize)
  141. {
  142. dimensions.x = screen->w;
  143. dimensions.y = screen->h;
  144. }
  145. else
  146. {
  147. dimensions = Point(getCodecContext()->width, getCodecContext()->height) * GH.screenHandler().getScalingFactor();
  148. }
  149. // Allocate a place to put our YUV image on that screen
  150. if (useTextureOutput)
  151. {
  152. std::array potentialFormats = {
  153. AV_PIX_FMT_YUV420P, // -> SDL_PIXELFORMAT_IYUV - most of H3 videos use YUV format, so it is preferred to save some space & conversion time
  154. AV_PIX_FMT_RGB32, // -> SDL_PIXELFORMAT_ARGB8888 - some .smk videos actually use palette, so RGB > YUV. This is also our screen texture format
  155. AV_PIX_FMT_NONE
  156. };
  157. auto preferredFormat = avcodec_find_best_pix_fmt_of_list(potentialFormats.data(), getCodecContext()->pix_fmt, false, nullptr);
  158. if (preferredFormat == AV_PIX_FMT_YUV420P)
  159. textureYUV = SDL_CreateTexture( mainRenderer, SDL_PIXELFORMAT_IYUV, SDL_TEXTUREACCESS_STREAMING, dimensions.x, dimensions.y);
  160. else
  161. textureRGB = SDL_CreateTexture( mainRenderer, SDL_PIXELFORMAT_ARGB8888, SDL_TEXTUREACCESS_STREAMING, dimensions.x, dimensions.y);
  162. sws = sws_getContext(getCodecContext()->width, getCodecContext()->height, getCodecContext()->pix_fmt,
  163. dimensions.x, dimensions.y, preferredFormat,
  164. SWS_BICUBIC, nullptr, nullptr, nullptr);
  165. }
  166. else
  167. {
  168. surface = CSDL_Ext::newSurface(dimensions);
  169. sws = sws_getContext(getCodecContext()->width, getCodecContext()->height, getCodecContext()->pix_fmt,
  170. dimensions.x, dimensions.y, AV_PIX_FMT_RGB32,
  171. SWS_BICUBIC, nullptr, nullptr, nullptr);
  172. }
  173. if (sws == nullptr)
  174. throw std::runtime_error("Failed to create sws");
  175. }
  176. void FFMpegStream::decodeNextFrame()
  177. {
  178. int rc = avcodec_receive_frame(codecContext, frame);
  179. // frame extracted - data that was sent to codecContext before was sufficient
  180. if (rc == 0)
  181. return;
  182. // returning AVERROR(EAGAIN) is legal - this indicates that codec requires more data from input stream to decode next frame
  183. if(rc != AVERROR(EAGAIN))
  184. throwFFmpegError(rc);
  185. for(;;)
  186. {
  187. AVPacket packet;
  188. // codecContext does not have enough input data - read next packet from input stream
  189. int ret = av_read_frame(formatContext, &packet);
  190. if(ret < 0)
  191. {
  192. if(ret == AVERROR_EOF)
  193. {
  194. av_packet_unref(&packet);
  195. av_frame_free(&frame);
  196. frame = nullptr;
  197. return;
  198. }
  199. throwFFmpegError(ret);
  200. }
  201. // Is this a packet from the stream that needs decoding?
  202. if(packet.stream_index == streamIndex)
  203. {
  204. // Decode read packet
  205. // Note: this method may return AVERROR(EAGAIN). However this should never happen with ffmpeg API
  206. // since there is guaranteed call to avcodec_receive_frame and ffmpeg API promises that *both* of these methods will never return AVERROR(EAGAIN).
  207. int rc = avcodec_send_packet(codecContext, &packet);
  208. if(rc < 0)
  209. throwFFmpegError(rc);
  210. rc = avcodec_receive_frame(codecContext, frame);
  211. if(rc == AVERROR(EAGAIN))
  212. {
  213. // still need more data - read next packet
  214. av_packet_unref(&packet);
  215. continue;
  216. }
  217. else if(rc < 0)
  218. {
  219. throwFFmpegError(rc);
  220. }
  221. else
  222. {
  223. // read succesful. Exit the loop
  224. av_packet_unref(&packet);
  225. return;
  226. }
  227. }
  228. av_packet_unref(&packet);
  229. }
  230. }
  231. bool CVideoInstance::loadNextFrame()
  232. {
  233. decodeNextFrame();
  234. const AVFrame * frame = getCurrentFrame();
  235. if(!frame)
  236. return false;
  237. uint8_t * data[4] = {};
  238. int linesize[4] = {};
  239. if(textureYUV)
  240. {
  241. av_image_alloc(data, linesize, dimensions.x, dimensions.y, AV_PIX_FMT_YUV420P, 1);
  242. sws_scale(sws, frame->data, frame->linesize, 0, getCodecContext()->height, data, linesize);
  243. SDL_UpdateYUVTexture(textureYUV, nullptr, data[0], linesize[0], data[1], linesize[1], data[2], linesize[2]);
  244. av_freep(&data[0]);
  245. }
  246. if(textureRGB)
  247. {
  248. av_image_alloc(data, linesize, dimensions.x, dimensions.y, AV_PIX_FMT_RGB32, 1);
  249. sws_scale(sws, frame->data, frame->linesize, 0, getCodecContext()->height, data, linesize);
  250. SDL_UpdateTexture(textureRGB, nullptr, data[0], linesize[0]);
  251. av_freep(&data[0]);
  252. }
  253. if(surface)
  254. {
  255. // Avoid buffer overflow caused by sws_scale():
  256. // http://trac.ffmpeg.org/ticket/9254
  257. size_t pic_bytes = surface->pitch * surface->h;
  258. size_t ffmped_pad = 1024; /* a few bytes of overflow will go here */
  259. void * for_sws = av_malloc(pic_bytes + ffmped_pad);
  260. data[0] = (ui8 *)for_sws;
  261. linesize[0] = surface->pitch;
  262. sws_scale(sws, frame->data, frame->linesize, 0, getCodecContext()->height, data, linesize);
  263. memcpy(surface->pixels, for_sws, pic_bytes);
  264. av_free(for_sws);
  265. }
  266. return true;
  267. }
  268. bool CVideoInstance::videoEnded()
  269. {
  270. return getCurrentFrame() == nullptr;
  271. }
  272. CVideoInstance::~CVideoInstance()
  273. {
  274. sws_freeContext(sws);
  275. SDL_DestroyTexture(textureYUV);
  276. SDL_DestroyTexture(textureRGB);
  277. SDL_FreeSurface(surface);
  278. }
  279. FFMpegStream::~FFMpegStream()
  280. {
  281. av_frame_free(&frame);
  282. #if (LIBAVCODEC_VERSION_MAJOR < 61 )
  283. // deprecated, apparently no longer necessary - avcodec_free_context should suffice
  284. avcodec_close(codecContext);
  285. #endif
  286. avcodec_free_context(&codecContext);
  287. avformat_close_input(&formatContext);
  288. av_free(context);
  289. }
  290. Point CVideoInstance::size()
  291. {
  292. if(!getCurrentFrame())
  293. throw std::runtime_error("Invalid video frame!");
  294. return Point(getCurrentFrame()->width, getCurrentFrame()->height);
  295. }
  296. void CVideoInstance::show(const Point & position, Canvas & canvas)
  297. {
  298. if(sws == nullptr)
  299. throw std::runtime_error("No video to show!");
  300. CSDL_Ext::blitSurface(surface, canvas.getInternalSurface(), position * GH.screenHandler().getScalingFactor());
  301. }
  302. double FFMpegStream::getCurrentFrameEndTime() const
  303. {
  304. #if(LIBAVUTIL_VERSION_MAJOR < 58)
  305. auto packet_duration = frame->pkt_duration;
  306. #else
  307. auto packet_duration = frame->duration;
  308. #endif
  309. return (frame->pts + packet_duration) * av_q2d(formatContext->streams[streamIndex]->time_base);
  310. }
  311. double FFMpegStream::getCurrentFrameDuration() const
  312. {
  313. #if(LIBAVUTIL_VERSION_MAJOR < 58)
  314. auto packet_duration = frame->pkt_duration;
  315. #else
  316. auto packet_duration = frame->duration;
  317. #endif
  318. return packet_duration * av_q2d(formatContext->streams[streamIndex]->time_base);
  319. }
  320. void CVideoInstance::tick(uint32_t msPassed)
  321. {
  322. if(sws == nullptr)
  323. throw std::runtime_error("No video to show!");
  324. if(videoEnded())
  325. throw std::runtime_error("Video already ended!");
  326. frameTime += msPassed / 1000.0;
  327. if(frameTime >= getCurrentFrameEndTime())
  328. loadNextFrame();
  329. }
  330. struct FFMpegFormatDescription
  331. {
  332. uint8_t sampleSizeBytes;
  333. uint8_t wavFormatID;
  334. bool isPlanar;
  335. };
  336. static FFMpegFormatDescription getAudioFormatProperties(int audioFormat)
  337. {
  338. switch (audioFormat)
  339. {
  340. case AV_SAMPLE_FMT_U8: return { 1, 1, false};
  341. case AV_SAMPLE_FMT_U8P: return { 1, 1, true};
  342. case AV_SAMPLE_FMT_S16: return { 2, 1, false};
  343. case AV_SAMPLE_FMT_S16P: return { 2, 1, true};
  344. case AV_SAMPLE_FMT_S32: return { 4, 1, false};
  345. case AV_SAMPLE_FMT_S32P: return { 4, 1, true};
  346. case AV_SAMPLE_FMT_S64: return { 8, 1, false};
  347. case AV_SAMPLE_FMT_S64P: return { 8, 1, true};
  348. case AV_SAMPLE_FMT_FLT: return { 4, 3, false};
  349. case AV_SAMPLE_FMT_FLTP: return { 4, 3, true};
  350. case AV_SAMPLE_FMT_DBL: return { 8, 3, false};
  351. case AV_SAMPLE_FMT_DBLP: return { 8, 3, true};
  352. }
  353. throw std::runtime_error("Invalid audio format");
  354. }
  355. int FFMpegStream::findAudioStream() const
  356. {
  357. std::vector<int> audioStreamIndices;
  358. for(int i = 0; i < formatContext->nb_streams; i++)
  359. if(formatContext->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_AUDIO)
  360. audioStreamIndices.push_back(i);
  361. if (audioStreamIndices.empty())
  362. return -1;
  363. if (audioStreamIndices.size() == 1)
  364. return audioStreamIndices.front();
  365. // multiple audio streams - try to pick best one based on language settings
  366. std::map<int, std::string> streamToLanguage;
  367. // Approach 1 - check if stream has language set in metadata
  368. for (auto const & index : audioStreamIndices)
  369. {
  370. const AVDictionaryEntry *e = av_dict_get(formatContext->streams[index]->metadata, "language", nullptr, 0);
  371. if (e)
  372. streamToLanguage[index] = e->value;
  373. }
  374. // Approach 2 - no metadata found. This may be video from Chronicles which have predefined (presumably hardcoded) list of languages
  375. if (streamToLanguage.empty())
  376. {
  377. if (audioStreamIndices.size() == 2)
  378. {
  379. streamToLanguage[audioStreamIndices[0]] = Languages::getLanguageOptions(Languages::ELanguages::ENGLISH).tagISO2;
  380. streamToLanguage[audioStreamIndices[1]] = Languages::getLanguageOptions(Languages::ELanguages::GERMAN).tagISO2;
  381. }
  382. if (audioStreamIndices.size() == 5)
  383. {
  384. streamToLanguage[audioStreamIndices[0]] = Languages::getLanguageOptions(Languages::ELanguages::ENGLISH).tagISO2;
  385. streamToLanguage[audioStreamIndices[1]] = Languages::getLanguageOptions(Languages::ELanguages::FRENCH).tagISO2;
  386. streamToLanguage[audioStreamIndices[2]] = Languages::getLanguageOptions(Languages::ELanguages::GERMAN).tagISO2;
  387. streamToLanguage[audioStreamIndices[3]] = Languages::getLanguageOptions(Languages::ELanguages::ITALIAN).tagISO2;
  388. streamToLanguage[audioStreamIndices[4]] = Languages::getLanguageOptions(Languages::ELanguages::SPANISH).tagISO2;
  389. }
  390. }
  391. std::string preferredLanguageName = CGI->generaltexth->getPreferredLanguage();
  392. std::string preferredTag = Languages::getLanguageOptions(preferredLanguageName).tagISO2;
  393. for (auto const & entry : streamToLanguage)
  394. if (entry.second == preferredTag)
  395. return entry.first;
  396. return audioStreamIndices.front();
  397. }
  398. int FFMpegStream::findVideoStream() const
  399. {
  400. for(int i = 0; i < formatContext->nb_streams; i++)
  401. if(formatContext->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)
  402. return i;
  403. return -1;
  404. }
  405. std::pair<std::unique_ptr<ui8 []>, si64> CAudioInstance::extractAudio(const VideoPath & videoToOpen)
  406. {
  407. if (!openInput(videoToOpen))
  408. return { nullptr, 0};
  409. openContext();
  410. int audioStreamIndex = findAudioStream();
  411. if (audioStreamIndex == -1)
  412. return { nullptr, 0};
  413. openCodec(audioStreamIndex);
  414. const auto * codecpar = getCodecParameters();
  415. std::vector<ui8> samples;
  416. auto formatProperties = getAudioFormatProperties(codecpar->format);
  417. #if(LIBAVUTIL_VERSION_MAJOR < 58)
  418. int numChannels = codecpar->channels;
  419. #else
  420. int numChannels = codecpar->ch_layout.nb_channels;
  421. #endif
  422. samples.reserve(44100 * 5); // arbitrary 5-second buffer
  423. for (;;)
  424. {
  425. decodeNextFrame();
  426. const AVFrame * frame = getCurrentFrame();
  427. if (!frame)
  428. break;
  429. int samplesToRead = frame->nb_samples * numChannels;
  430. int bytesToRead = samplesToRead * formatProperties.sampleSizeBytes;
  431. if (formatProperties.isPlanar && numChannels > 1)
  432. {
  433. // Workaround for lack of resampler
  434. // Currently, ffmpeg on conan systems is built without sws resampler
  435. // Because of that, and because wav format does not supports 'planar' formats from ffmpeg
  436. // we need to de-planarize it and convert to "normal" (non-planar / interleaved) stream
  437. samples.reserve(samples.size() + bytesToRead);
  438. for (int sm = 0; sm < frame->nb_samples; ++sm)
  439. for (int ch = 0; ch < numChannels; ++ch)
  440. samples.insert(samples.end(), frame->data[ch] + sm * formatProperties.sampleSizeBytes, frame->data[ch] + (sm+1) * formatProperties.sampleSizeBytes );
  441. }
  442. else
  443. {
  444. samples.insert(samples.end(), frame->data[0], frame->data[0] + bytesToRead);
  445. }
  446. }
  447. struct WavHeader {
  448. ui8 RIFF[4] = {'R', 'I', 'F', 'F'};
  449. ui32 ChunkSize;
  450. ui8 WAVE[4] = {'W', 'A', 'V', 'E'};
  451. ui8 fmt[4] = {'f', 'm', 't', ' '};
  452. ui32 Subchunk1Size = 16;
  453. ui16 AudioFormat = 1;
  454. ui16 NumOfChan = 2;
  455. ui32 SamplesPerSec = 22050;
  456. ui32 bytesPerSec = 22050 * 2;
  457. ui16 blockAlign = 1;
  458. ui16 bitsPerSample = 32;
  459. ui8 Subchunk2ID[4] = {'d', 'a', 't', 'a'};
  460. ui32 Subchunk2Size;
  461. };
  462. WavHeader wav;
  463. wav.ChunkSize = samples.size() + sizeof(WavHeader) - 8;
  464. wav.AudioFormat = formatProperties.wavFormatID; // 1 = PCM, 3 = IEEE float
  465. wav.NumOfChan = numChannels;
  466. wav.SamplesPerSec = codecpar->sample_rate;
  467. wav.bytesPerSec = codecpar->sample_rate * formatProperties.sampleSizeBytes;
  468. wav.bitsPerSample = formatProperties.sampleSizeBytes * 8;
  469. wav.Subchunk2Size = samples.size() + sizeof(WavHeader) - 44;
  470. auto * wavPtr = reinterpret_cast<ui8*>(&wav);
  471. auto dat = std::make_pair(std::make_unique<ui8[]>(samples.size() + sizeof(WavHeader)), samples.size() + sizeof(WavHeader));
  472. std::copy(wavPtr, wavPtr + sizeof(WavHeader), dat.first.get());
  473. std::copy(samples.begin(), samples.end(), dat.first.get() + sizeof(WavHeader));
  474. return dat;
  475. }
  476. bool CVideoPlayer::openAndPlayVideoImpl(const VideoPath & name, const Point & position, bool useOverlay, bool scale, bool stopOnKey)
  477. {
  478. CVideoInstance instance;
  479. CAudioInstance audio;
  480. auto extractedAudio = audio.extractAudio(name);
  481. int audioHandle = CCS->soundh->playSound(extractedAudio);
  482. if (!instance.openInput(name))
  483. return true;
  484. instance.openVideo();
  485. instance.prepareOutput(scale, true);
  486. auto lastTimePoint = boost::chrono::steady_clock::now();
  487. while(instance.loadNextFrame())
  488. {
  489. if(stopOnKey)
  490. {
  491. GH.input().fetchEvents();
  492. if(GH.input().ignoreEventsUntilInput())
  493. {
  494. CCS->soundh->stopSound(audioHandle);
  495. return false;
  496. }
  497. }
  498. SDL_Rect rect;
  499. rect.x = position.x;
  500. rect.y = position.y;
  501. rect.w = instance.dimensions.x;
  502. rect.h = instance.dimensions.y;
  503. SDL_RenderFillRect(mainRenderer, &rect);
  504. if(instance.textureYUV)
  505. SDL_RenderCopy(mainRenderer, instance.textureYUV, nullptr, &rect);
  506. else
  507. SDL_RenderCopy(mainRenderer, instance.textureRGB, nullptr, &rect);
  508. SDL_RenderPresent(mainRenderer);
  509. // Framerate delay
  510. double targetFrameTimeSeconds = instance.getCurrentFrameDuration();
  511. auto targetFrameTime = boost::chrono::milliseconds(static_cast<int>(1000 * targetFrameTimeSeconds));
  512. auto timePointAfterPresent = boost::chrono::steady_clock::now();
  513. auto timeSpentBusy = boost::chrono::duration_cast<boost::chrono::milliseconds>(timePointAfterPresent - lastTimePoint);
  514. if(targetFrameTime > timeSpentBusy)
  515. boost::this_thread::sleep_for(targetFrameTime - timeSpentBusy);
  516. lastTimePoint = boost::chrono::steady_clock::now();
  517. }
  518. return true;
  519. }
  520. bool CVideoPlayer::playIntroVideo(const VideoPath & name)
  521. {
  522. return openAndPlayVideoImpl(name, Point(0, 0), true, true, true);
  523. }
  524. void CVideoPlayer::playSpellbookAnimation(const VideoPath & name, const Point & position)
  525. {
  526. openAndPlayVideoImpl(name, position * GH.screenHandler().getScalingFactor(), false, false, false);
  527. }
  528. std::unique_ptr<IVideoInstance> CVideoPlayer::open(const VideoPath & name, bool scaleToScreen)
  529. {
  530. auto result = std::make_unique<CVideoInstance>();
  531. if (!result->openInput(name))
  532. return nullptr;
  533. result->openVideo();
  534. result->prepareOutput(scaleToScreen, false);
  535. result->loadNextFrame(); // prepare 1st frame
  536. return result;
  537. }
  538. std::pair<std::unique_ptr<ui8[]>, si64> CVideoPlayer::getAudio(const VideoPath & videoToOpen)
  539. {
  540. CAudioInstance audio;
  541. return audio.extractAudio(videoToOpen);
  542. }
  543. #endif