CVideoHandler.cpp 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605
  1. /*
  2. * CVideoHandler.cpp, part of VCMI engine
  3. *
  4. * Authors: listed in file AUTHORS in main folder
  5. *
  6. * License: GNU General Public License v2.0 or later
  7. * Full text of license available in license.txt file, in main folder
  8. *
  9. */
  10. #include "StdInc.h"
  11. #include "CVideoHandler.h"
  12. #ifndef DISABLE_VIDEO
  13. #include "ISoundPlayer.h"
  14. #include "../CGameInfo.h"
  15. #include "../CMT.h"
  16. #include "../CPlayerInterface.h"
  17. #include "../eventsSDL/InputHandler.h"
  18. #include "../gui/CGuiHandler.h"
  19. #include "../gui/FramerateManager.h"
  20. #include "../render/Canvas.h"
  21. #include "../renderSDL/SDL_Extensions.h"
  22. #include "../../lib/filesystem/CInputStream.h"
  23. #include "../../lib/filesystem/Filesystem.h"
  24. #include <SDL_render.h>
  25. extern "C" {
  26. #include <libavformat/avformat.h>
  27. #include <libavcodec/avcodec.h>
  28. #include <libavutil/imgutils.h>
  29. #include <libswscale/swscale.h>
  30. }
  31. // Define a set of functions to read data
  32. static int lodRead(void * opaque, uint8_t * buf, int size)
  33. {
  34. auto * data = static_cast<CInputStream *>(opaque);
  35. int bytes = static_cast<int>(data->read(buf, size));
  36. if(bytes == 0)
  37. return AVERROR_EOF;
  38. return bytes;
  39. }
  40. static si64 lodSeek(void * opaque, si64 pos, int whence)
  41. {
  42. auto * data = static_cast<CInputStream *>(opaque);
  43. if(whence & AVSEEK_SIZE)
  44. return data->getSize();
  45. return data->seek(pos);
  46. }
  47. [[noreturn]] static void throwFFmpegError(int errorCode)
  48. {
  49. std::array<char, AV_ERROR_MAX_STRING_SIZE> errorMessage{};
  50. av_strerror(errorCode, errorMessage.data(), errorMessage.size());
  51. throw std::runtime_error(errorMessage.data());
  52. }
  53. static std::unique_ptr<CInputStream> findVideoData(const VideoPath & videoToOpen)
  54. {
  55. if(CResourceHandler::get()->existsResource(videoToOpen))
  56. return CResourceHandler::get()->load(videoToOpen);
  57. auto highQualityVideoToOpenWithDir = videoToOpen.addPrefix("VIDEO/");
  58. auto lowQualityVideo = videoToOpen.toType<EResType::VIDEO_LOW_QUALITY>();
  59. auto lowQualityVideoWithDir = highQualityVideoToOpenWithDir.toType<EResType::VIDEO_LOW_QUALITY>();
  60. if(CResourceHandler::get()->existsResource(highQualityVideoToOpenWithDir))
  61. return CResourceHandler::get()->load(highQualityVideoToOpenWithDir);
  62. if(CResourceHandler::get()->existsResource(lowQualityVideo))
  63. return CResourceHandler::get()->load(lowQualityVideo);
  64. return CResourceHandler::get()->load(lowQualityVideoWithDir);
  65. }
  66. void FFMpegStream::openInput(const VideoPath & videoToOpen)
  67. {
  68. input = findVideoData(videoToOpen);
  69. }
  70. void FFMpegStream::openContext()
  71. {
  72. static const int BUFFER_SIZE = 4096;
  73. input->seek(0);
  74. auto * buffer = static_cast<unsigned char *>(av_malloc(BUFFER_SIZE)); // will be freed by ffmpeg
  75. context = avio_alloc_context(buffer, BUFFER_SIZE, 0, input.get(), lodRead, nullptr, lodSeek);
  76. formatContext = avformat_alloc_context();
  77. formatContext->pb = context;
  78. // filename is not needed - file was already open and stored in this->data;
  79. int avfopen = avformat_open_input(&formatContext, "dummyFilename", nullptr, nullptr);
  80. if(avfopen != 0)
  81. throwFFmpegError(avfopen);
  82. // Retrieve stream information
  83. int findStreamInfo = avformat_find_stream_info(formatContext, nullptr);
  84. if(avfopen < 0)
  85. throwFFmpegError(findStreamInfo);
  86. }
  87. void FFMpegStream::openCodec(int desiredStreamIndex)
  88. {
  89. streamIndex = desiredStreamIndex;
  90. // Find the decoder for the stream
  91. codec = avcodec_find_decoder(formatContext->streams[streamIndex]->codecpar->codec_id);
  92. if(codec == nullptr)
  93. throw std::runtime_error("Unsupported codec");
  94. codecContext = avcodec_alloc_context3(codec);
  95. if(codecContext == nullptr)
  96. throw std::runtime_error("Failed to create codec context");
  97. // Get a pointer to the codec context for the video stream
  98. int ret = avcodec_parameters_to_context(codecContext, formatContext->streams[streamIndex]->codecpar);
  99. if(ret < 0)
  100. {
  101. //We cannot get codec from parameters
  102. avcodec_free_context(&codecContext);
  103. throwFFmpegError(ret);
  104. }
  105. // Open codec
  106. ret = avcodec_open2(codecContext, codec, nullptr);
  107. if(ret < 0)
  108. {
  109. // Could not open codec
  110. codec = nullptr;
  111. throwFFmpegError(ret);
  112. }
  113. // Allocate video frame
  114. frame = av_frame_alloc();
  115. }
  116. const AVCodecParameters * FFMpegStream::getCodecParameters()
  117. {
  118. return formatContext->streams[streamIndex]->codecpar;
  119. }
  120. const AVCodecContext * FFMpegStream::getCodecContext()
  121. {
  122. return codecContext;
  123. }
  124. const AVFrame * FFMpegStream::getCurrentFrame()
  125. {
  126. return frame;
  127. }
  128. void CVideoInstance::openVideo()
  129. {
  130. openContext();
  131. openCodec(findVideoStream());
  132. }
  133. void CVideoInstance::prepareOutput(bool scaleToScreenSize, bool useTextureOutput)
  134. {
  135. //setup scaling
  136. if(scaleToScreenSize)
  137. {
  138. dimensions.x = screen->w;
  139. dimensions.y = screen->h;
  140. }
  141. else
  142. {
  143. dimensions.x = getCodecContext()->width;
  144. dimensions.y = getCodecContext()->height;
  145. }
  146. // Allocate a place to put our YUV image on that screen
  147. if (useTextureOutput)
  148. {
  149. std::array potentialFormats = {
  150. AV_PIX_FMT_YUV420P, // -> SDL_PIXELFORMAT_IYUV - most of H3 videos use YUV format, so it is preferred to save some space & conversion time
  151. AV_PIX_FMT_RGB32, // -> SDL_PIXELFORMAT_ARGB8888 - some .smk videos actually use palette, so RGB > YUV. This is also our screen texture format
  152. AV_PIX_FMT_NONE
  153. };
  154. auto preferredFormat = avcodec_find_best_pix_fmt_of_list(potentialFormats.data(), getCodecContext()->pix_fmt, false, nullptr);
  155. if (preferredFormat == AV_PIX_FMT_YUV420P)
  156. textureYUV = SDL_CreateTexture( mainRenderer, SDL_PIXELFORMAT_IYUV, SDL_TEXTUREACCESS_STREAMING, dimensions.x, dimensions.y);
  157. else
  158. textureRGB = SDL_CreateTexture( mainRenderer, SDL_PIXELFORMAT_ARGB8888, SDL_TEXTUREACCESS_STREAMING, dimensions.x, dimensions.y);
  159. sws = sws_getContext(getCodecContext()->width, getCodecContext()->height, getCodecContext()->pix_fmt,
  160. dimensions.x, dimensions.y, preferredFormat,
  161. SWS_BICUBIC, nullptr, nullptr, nullptr);
  162. }
  163. else
  164. {
  165. surface = CSDL_Ext::newSurface(dimensions.x, dimensions.y);
  166. sws = sws_getContext(getCodecContext()->width, getCodecContext()->height, getCodecContext()->pix_fmt,
  167. dimensions.x, dimensions.y, AV_PIX_FMT_RGB32,
  168. SWS_BICUBIC, nullptr, nullptr, nullptr);
  169. }
  170. if (sws == nullptr)
  171. throw std::runtime_error("Failed to create sws");
  172. }
  173. void FFMpegStream::decodeNextFrame()
  174. {
  175. AVPacket packet;
  176. for(;;)
  177. {
  178. int rc = avcodec_receive_frame(codecContext, frame);
  179. if(rc == AVERROR(EAGAIN))
  180. break;
  181. if(rc < 0)
  182. throwFFmpegError(rc);
  183. return;
  184. }
  185. for(;;)
  186. {
  187. int ret = av_read_frame(formatContext, &packet);
  188. if(ret < 0)
  189. {
  190. if(ret == AVERROR_EOF)
  191. {
  192. av_packet_unref(&packet);
  193. av_frame_free(&frame);
  194. frame = nullptr;
  195. return;
  196. }
  197. throwFFmpegError(ret);
  198. }
  199. // Is this a packet from the video stream?
  200. if(packet.stream_index == streamIndex)
  201. {
  202. // Decode video frame
  203. int rc = avcodec_send_packet(codecContext, &packet);
  204. if(rc < 0 && rc != AVERROR(EAGAIN))
  205. throwFFmpegError(rc);
  206. rc = avcodec_receive_frame(codecContext, frame);
  207. if(rc == AVERROR(EAGAIN))
  208. {
  209. av_packet_unref(&packet);
  210. continue;
  211. }
  212. if(rc < 0)
  213. throwFFmpegError(rc);
  214. av_packet_unref(&packet);
  215. return;
  216. }
  217. av_packet_unref(&packet);
  218. }
  219. }
  220. bool CVideoInstance::loadNextFrame()
  221. {
  222. decodeNextFrame();
  223. const AVFrame * frame = getCurrentFrame();
  224. if(!frame)
  225. return false;
  226. uint8_t * data[4] = {};
  227. int linesize[4] = {};
  228. if(textureYUV)
  229. {
  230. av_image_alloc(data, linesize, dimensions.x, dimensions.y, AV_PIX_FMT_YUV420P, 1);
  231. sws_scale(sws, frame->data, frame->linesize, 0, getCodecContext()->height, data, linesize);
  232. SDL_UpdateYUVTexture(textureYUV, nullptr, data[0], linesize[0], data[1], linesize[1], data[2], linesize[2]);
  233. av_freep(&data[0]);
  234. }
  235. if(textureRGB)
  236. {
  237. av_image_alloc(data, linesize, dimensions.x, dimensions.y, AV_PIX_FMT_RGB32, 1);
  238. sws_scale(sws, frame->data, frame->linesize, 0, getCodecContext()->height, data, linesize);
  239. SDL_UpdateTexture(textureRGB, nullptr, data[0], linesize[0]);
  240. av_freep(&data[0]);
  241. }
  242. if(surface)
  243. {
  244. // Avoid buffer overflow caused by sws_scale():
  245. // http://trac.ffmpeg.org/ticket/9254
  246. size_t pic_bytes = surface->pitch * surface->h;
  247. size_t ffmped_pad = 1024; /* a few bytes of overflow will go here */
  248. void * for_sws = av_malloc(pic_bytes + ffmped_pad);
  249. data[0] = (ui8 *)for_sws;
  250. linesize[0] = surface->pitch;
  251. sws_scale(sws, frame->data, frame->linesize, 0, getCodecContext()->height, data, linesize);
  252. memcpy(surface->pixels, for_sws, pic_bytes);
  253. av_free(for_sws);
  254. }
  255. return true;
  256. }
  257. bool CVideoInstance::videoEnded()
  258. {
  259. return getCurrentFrame() == nullptr;
  260. }
  261. CVideoInstance::~CVideoInstance()
  262. {
  263. sws_freeContext(sws);
  264. SDL_DestroyTexture(textureYUV);
  265. SDL_DestroyTexture(textureRGB);
  266. SDL_FreeSurface(surface);
  267. }
  268. FFMpegStream::~FFMpegStream()
  269. {
  270. // state.videoStream.codec???
  271. // state.audioStream.codec???
  272. av_frame_free(&frame);
  273. avcodec_close(codecContext);
  274. avcodec_free_context(&codecContext);
  275. avcodec_close(codecContext);
  276. avcodec_free_context(&codecContext);
  277. avformat_close_input(&formatContext);
  278. av_free(context);
  279. }
  280. Point CVideoInstance::size()
  281. {
  282. if(!getCurrentFrame())
  283. throw std::runtime_error("Invalid video frame!");
  284. return Point(getCurrentFrame()->width, getCurrentFrame()->height);
  285. }
  286. void CVideoInstance::show(const Point & position, Canvas & canvas)
  287. {
  288. if(sws == nullptr)
  289. throw std::runtime_error("No video to show!");
  290. CSDL_Ext::blitSurface(surface, canvas.getInternalSurface(), position);
  291. }
  292. double FFMpegStream::getCurrentFrameEndTime()
  293. {
  294. #if(LIBAVUTIL_VERSION_MAJOR < 58)
  295. auto packet_duration = frame->pkt_duration;
  296. #else
  297. auto packet_duration = frame->duration;
  298. #endif
  299. return (frame->pts + packet_duration) * av_q2d(formatContext->streams[streamIndex]->time_base);
  300. }
  301. double FFMpegStream::getCurrentFrameDuration()
  302. {
  303. #if(LIBAVUTIL_VERSION_MAJOR < 58)
  304. auto packet_duration = frame->pkt_duration;
  305. #else
  306. auto packet_duration = frame->duration;
  307. #endif
  308. return (packet_duration) * av_q2d(formatContext->streams[streamIndex]->time_base);
  309. }
  310. void CVideoInstance::tick(uint32_t msPassed)
  311. {
  312. if(sws == nullptr)
  313. throw std::runtime_error("No video to show!");
  314. if(videoEnded())
  315. throw std::runtime_error("Video already ended!");
  316. frameTime += msPassed / 1000.0;
  317. if(frameTime >= getCurrentFrameEndTime())
  318. loadNextFrame();
  319. }
  320. struct FFMpegFormatDescription
  321. {
  322. uint8_t sampleSizeBytes;
  323. uint8_t wavFormatID;
  324. bool isPlanar;
  325. };
  326. static FFMpegFormatDescription getAudioFormatProperties(int audioFormat)
  327. {
  328. switch (audioFormat)
  329. {
  330. case AV_SAMPLE_FMT_U8: return { 1, 1, false};
  331. case AV_SAMPLE_FMT_U8P: return { 1, 1, true};
  332. case AV_SAMPLE_FMT_S16: return { 2, 1, false};
  333. case AV_SAMPLE_FMT_S16P: return { 2, 1, true};
  334. case AV_SAMPLE_FMT_S32: return { 4, 1, false};
  335. case AV_SAMPLE_FMT_S32P: return { 4, 1, true};
  336. case AV_SAMPLE_FMT_S64: return { 8, 1, false};
  337. case AV_SAMPLE_FMT_S64P: return { 8, 1, true};
  338. case AV_SAMPLE_FMT_FLT: return { 4, 3, false};
  339. case AV_SAMPLE_FMT_FLTP: return { 4, 3, true};
  340. case AV_SAMPLE_FMT_DBL: return { 8, 3, false};
  341. case AV_SAMPLE_FMT_DBLP: return { 8, 3, true};
  342. }
  343. throw std::runtime_error("Invalid audio format");
  344. }
  345. int FFMpegStream::findAudioStream()
  346. {
  347. for(int i = 0; i < formatContext->nb_streams; i++)
  348. if(formatContext->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_AUDIO)
  349. return i;
  350. return -1;
  351. }
  352. int FFMpegStream::findVideoStream()
  353. {
  354. for(int i = 0; i < formatContext->nb_streams; i++)
  355. if(formatContext->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)
  356. return i;
  357. return -1;
  358. }
  359. std::pair<std::unique_ptr<ui8 []>, si64> CAudioInstance::extractAudio(const VideoPath & videoToOpen)
  360. {
  361. openInput(videoToOpen);
  362. openContext();
  363. openCodec(findAudioStream());
  364. const auto * codecpar = getCodecParameters();
  365. std::vector<ui8> samples;
  366. auto formatProperties = getAudioFormatProperties(codecpar->format);
  367. #if(LIBAVUTIL_VERSION_MAJOR < 58)
  368. int numChannels = codecpar->channels;
  369. #else
  370. int numChannels = codecpar->ch_layout.nb_channels;
  371. #endif
  372. samples.reserve(44100 * 5); // arbitrary 5-second buffer
  373. for (;;)
  374. {
  375. decodeNextFrame();
  376. const AVFrame * frame = getCurrentFrame();
  377. if (!frame)
  378. break;
  379. int samplesToRead = frame->nb_samples * numChannels;
  380. int bytesToRead = samplesToRead * formatProperties.sampleSizeBytes;
  381. if (formatProperties.isPlanar && numChannels > 1)
  382. {
  383. // Workaround for lack of resampler
  384. // Currently, ffmpeg on conan systems is built without sws resampler
  385. // Because of that, and because wav format does not supports 'planar' formats from ffmpeg
  386. // we need to de-planarize it and convert to "normal" (non-planar / interleaved) steram
  387. samples.reserve(samples.size() + bytesToRead);
  388. for (int sm = 0; sm < frame->nb_samples; ++sm)
  389. for (int ch = 0; ch < numChannels; ++ch)
  390. samples.insert(samples.end(), frame->data[ch] + sm * formatProperties.sampleSizeBytes, frame->data[ch] + (sm+1) * formatProperties.sampleSizeBytes );
  391. }
  392. else
  393. {
  394. samples.insert(samples.end(), frame->data[0], frame->data[0] + bytesToRead);
  395. }
  396. }
  397. typedef struct WAV_HEADER {
  398. ui8 RIFF[4] = {'R', 'I', 'F', 'F'};
  399. ui32 ChunkSize;
  400. ui8 WAVE[4] = {'W', 'A', 'V', 'E'};
  401. ui8 fmt[4] = {'f', 'm', 't', ' '};
  402. ui32 Subchunk1Size = 16;
  403. ui16 AudioFormat = 1;
  404. ui16 NumOfChan = 2;
  405. ui32 SamplesPerSec = 22050;
  406. ui32 bytesPerSec = 22050 * 2;
  407. ui16 blockAlign = 2;
  408. ui16 bitsPerSample = 32;
  409. ui8 Subchunk2ID[4] = {'d', 'a', 't', 'a'};
  410. ui32 Subchunk2Size;
  411. } wav_hdr;
  412. wav_hdr wav;
  413. wav.ChunkSize = samples.size() + sizeof(wav_hdr) - 8;
  414. wav.AudioFormat = formatProperties.wavFormatID; // 1 = PCM, 3 = IEEE float
  415. wav.NumOfChan = numChannels;
  416. wav.SamplesPerSec = codecpar->sample_rate;
  417. wav.bytesPerSec = codecpar->sample_rate * formatProperties.sampleSizeBytes;
  418. wav.bitsPerSample = formatProperties.sampleSizeBytes * 8;
  419. wav.Subchunk2Size = samples.size() + sizeof(wav_hdr) - 44;
  420. auto wavPtr = reinterpret_cast<ui8*>(&wav);
  421. auto dat = std::make_pair(std::make_unique<ui8[]>(samples.size() + sizeof(wav_hdr)), samples.size() + sizeof(wav_hdr));
  422. std::copy(wavPtr, wavPtr + sizeof(wav_hdr), dat.first.get());
  423. std::copy(samples.begin(), samples.end(), dat.first.get() + sizeof(wav_hdr));
  424. return dat;
  425. //CCS->soundh->playSound(dat);
  426. }
  427. bool CVideoPlayer::openAndPlayVideoImpl(const VideoPath & name, const Point & position, bool useOverlay, bool scale, bool stopOnKey)
  428. {
  429. CVideoInstance instance;
  430. CAudioInstance audio;
  431. auto extractedAudio = audio.extractAudio(name);
  432. int audioHandle = CCS->soundh->playSound(extractedAudio);
  433. instance.openInput(name);
  434. instance.openVideo();
  435. instance.prepareOutput(scale, useOverlay);
  436. auto lastTimePoint = boost::chrono::steady_clock::now();
  437. while(instance.loadNextFrame())
  438. {
  439. if(stopOnKey)
  440. {
  441. GH.input().fetchEvents();
  442. if(GH.input().ignoreEventsUntilInput())
  443. {
  444. CCS->soundh->stopSound(audioHandle);
  445. return false;
  446. }
  447. }
  448. SDL_Rect rect;
  449. rect.x = position.x;
  450. rect.y = position.y;
  451. rect.w = instance.dimensions.x;
  452. rect.h = instance.dimensions.y;
  453. if(useOverlay)
  454. SDL_RenderFillRect(mainRenderer, &rect);
  455. else
  456. SDL_RenderClear(mainRenderer);
  457. if(instance.textureYUV)
  458. SDL_RenderCopy(mainRenderer, instance.textureYUV, nullptr, &rect);
  459. else
  460. SDL_RenderCopy(mainRenderer, instance.textureRGB, nullptr, &rect);
  461. SDL_RenderPresent(mainRenderer);
  462. // Framerate delay
  463. double targetFrameTimeSeconds = instance.getCurrentFrameDuration();
  464. auto targetFrameTime = boost::chrono::milliseconds(static_cast<int>(1000 * (targetFrameTimeSeconds)));
  465. auto timePointAfterPresent = boost::chrono::steady_clock::now();
  466. auto timeSpentBusy = boost::chrono::duration_cast<boost::chrono::milliseconds>(timePointAfterPresent - lastTimePoint);
  467. logGlobal->info("Sleeping for %d", (targetFrameTime - timeSpentBusy).count());
  468. if(targetFrameTime > timeSpentBusy)
  469. boost::this_thread::sleep_for(targetFrameTime - timeSpentBusy);
  470. lastTimePoint = boost::chrono::steady_clock::now();
  471. }
  472. return true;
  473. }
  474. bool CVideoPlayer::playIntroVideo(const VideoPath & name)
  475. {
  476. return openAndPlayVideoImpl(name, Point(0, 0), true, true, true);
  477. }
  478. void CVideoPlayer::playSpellbookAnimation(const VideoPath & name, const Point & position)
  479. {
  480. openAndPlayVideoImpl(name, position, false, false, false);
  481. }
  482. std::unique_ptr<IVideoInstance> CVideoPlayer::open(const VideoPath & name, bool scaleToScreen)
  483. {
  484. auto result = std::make_unique<CVideoInstance>();
  485. result->openInput(name);
  486. result->openVideo();
  487. result->prepareOutput(scaleToScreen, false);
  488. return result;
  489. }
  490. std::pair<std::unique_ptr<ui8[]>, si64> CVideoPlayer::getAudio(const VideoPath & videoToOpen)
  491. {
  492. CAudioInstance audio;
  493. return audio.extractAudio(videoToOpen);
  494. }
  495. #endif