CVideoHandler.cpp 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466
  1. /*
  2. * CVideoHandler.cpp, part of VCMI engine
  3. *
  4. * Authors: listed in file AUTHORS in main folder
  5. *
  6. * License: GNU General Public License v2.0 or later
  7. * Full text of license available in license.txt file, in main folder
  8. *
  9. */
  10. #include "StdInc.h"
  11. #include "CVideoHandler.h"
  12. #ifndef DISABLE_VIDEO
  13. #include "../CMT.h"
  14. #include "../CPlayerInterface.h"
  15. #include "../eventsSDL/InputHandler.h"
  16. #include "../gui/CGuiHandler.h"
  17. #include "../gui/FramerateManager.h"
  18. #include "../render/Canvas.h"
  19. #include "../renderSDL/SDL_Extensions.h"
  20. #include "../../lib/filesystem/CInputStream.h"
  21. #include "../../lib/filesystem/Filesystem.h"
  22. #include <SDL_render.h>
  23. extern "C" {
  24. #include <libavformat/avformat.h>
  25. #include <libavcodec/avcodec.h>
  26. #include <libavutil/imgutils.h>
  27. #include <libswscale/swscale.h>
  28. }
  29. // Define a set of functions to read data
  30. static int lodRead(void * opaque, uint8_t * buf, int size)
  31. {
  32. auto * data = static_cast<CInputStream *>(opaque);
  33. int bytes = static_cast<int>(data->read(buf, size));
  34. if(bytes == 0)
  35. return AVERROR_EOF;
  36. return bytes;
  37. }
  38. static si64 lodSeek(void * opaque, si64 pos, int whence)
  39. {
  40. auto * data = static_cast<CInputStream *>(opaque);
  41. if(whence & AVSEEK_SIZE)
  42. return data->getSize();
  43. return data->seek(pos);
  44. }
  45. [[noreturn]] static void throwFFmpegError(int errorCode)
  46. {
  47. std::array<char, AV_ERROR_MAX_STRING_SIZE> errorMessage{};
  48. av_strerror(errorCode, errorMessage.data(), errorMessage.size());
  49. throw std::runtime_error(errorMessage.data());
  50. }
  51. void CVideoInstance::open(const VideoPath & videoToOpen)
  52. {
  53. if(CResourceHandler::get()->existsResource(videoToOpen))
  54. state.actualPath = videoToOpen;
  55. else
  56. state.actualPath = videoToOpen.addPrefix("VIDEO/");
  57. state.videoData = CResourceHandler::get()->load(state.actualPath);
  58. static const int BUFFER_SIZE = 4096;
  59. auto * buffer = static_cast<unsigned char *>(av_malloc(BUFFER_SIZE)); // will be freed by ffmpeg
  60. state.context = avio_alloc_context(buffer, BUFFER_SIZE, 0, state.videoData.get(), lodRead, nullptr, lodSeek);
  61. state.formatContext = avformat_alloc_context();
  62. state.formatContext->pb = state.context;
  63. // filename is not needed - file was already open and stored in this->data;
  64. int avfopen = avformat_open_input(&state.formatContext, "dummyFilename", nullptr, nullptr);
  65. if(avfopen != 0)
  66. throwFFmpegError(avfopen);
  67. // Retrieve stream information
  68. int findStreamInfo = avformat_find_stream_info(state.formatContext, nullptr);
  69. if(avfopen < 0)
  70. throwFFmpegError(findStreamInfo);
  71. for(int i = 0; i < state.formatContext->nb_streams; i++)
  72. {
  73. if(state.formatContext->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && video.streamIndex == -1)
  74. {
  75. openStream(video, i);
  76. }
  77. if(state.formatContext->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && audio.streamIndex == -1)
  78. openStream(audio, i);
  79. }
  80. }
  81. void CVideoInstance::openStream(FFMpegStreamState & streamState, int streamIndex)
  82. {
  83. streamState.streamIndex = streamIndex;
  84. // Find the decoder for the stream
  85. streamState.codec = avcodec_find_decoder(state.formatContext->streams[streamIndex]->codecpar->codec_id);
  86. if(streamState.codec == nullptr)
  87. throw std::runtime_error("Unsupported codec");
  88. streamState.codecContext = avcodec_alloc_context3(streamState.codec);
  89. if(streamState.codecContext == nullptr)
  90. throw std::runtime_error("Failed to create codec context");
  91. // Get a pointer to the codec context for the video stream
  92. int ret = avcodec_parameters_to_context(streamState.codecContext, state.formatContext->streams[streamIndex]->codecpar);
  93. if(ret < 0)
  94. {
  95. //We cannot get codec from parameters
  96. avcodec_free_context(&streamState.codecContext);
  97. throwFFmpegError(ret);
  98. }
  99. // Open codec
  100. ret = avcodec_open2(streamState.codecContext, streamState.codec, nullptr);
  101. if(ret < 0)
  102. {
  103. // Could not open codec
  104. streamState.codec = nullptr;
  105. throwFFmpegError(ret);
  106. }
  107. }
  108. void CVideoInstance::prepareOutput(bool scaleToScreenSize, bool useTextureOutput)
  109. {
  110. if (video.streamIndex == -1)
  111. throw std::runtime_error("Invalid file state! No video stream!");
  112. // Allocate video frame
  113. output.frame = av_frame_alloc();
  114. //setup scaling
  115. if(scaleToScreenSize)
  116. {
  117. output.dimensions.x = screen->w;
  118. output.dimensions.y = screen->h;
  119. }
  120. else
  121. {
  122. output.dimensions.x = video.codecContext->width;
  123. output.dimensions.y = video.codecContext->height;
  124. }
  125. // Allocate a place to put our YUV image on that screen
  126. if (useTextureOutput)
  127. {
  128. output.texture = SDL_CreateTexture( mainRenderer, SDL_PIXELFORMAT_IYUV, SDL_TEXTUREACCESS_STREAMING, output.dimensions.x, output.dimensions.y);
  129. output.sws = sws_getContext(video.codecContext->width, video.codecContext->height, video.codecContext->pix_fmt,
  130. output.dimensions.x, output.dimensions.y, AV_PIX_FMT_YUV420P,
  131. SWS_BICUBIC, nullptr, nullptr, nullptr);
  132. }
  133. else
  134. {
  135. output.surface = CSDL_Ext::newSurface(output.dimensions.x, output.dimensions.y);
  136. output.sws = sws_getContext(video.codecContext->width, video.codecContext->height, video.codecContext->pix_fmt,
  137. output.dimensions.x, output.dimensions.y, AV_PIX_FMT_RGB32,
  138. SWS_BICUBIC, nullptr, nullptr, nullptr);
  139. }
  140. if (output.sws == nullptr)
  141. throw std::runtime_error("Failed to create sws");
  142. }
  143. bool CVideoInstance::nextFrame()
  144. {
  145. AVPacket packet;
  146. for(;;)
  147. {
  148. int ret = av_read_frame(state.formatContext, &packet);
  149. if(ret < 0)
  150. {
  151. if(ret == AVERROR_EOF)
  152. return false;
  153. throwFFmpegError(ret);
  154. }
  155. // Is this a packet from the video stream?
  156. if(packet.stream_index == video.streamIndex)
  157. {
  158. // Decode video frame
  159. int rc = avcodec_send_packet(video.codecContext, &packet);
  160. if(rc < 0)
  161. throwFFmpegError(ret);
  162. rc = avcodec_receive_frame(video.codecContext, output.frame);
  163. if(rc < 0)
  164. throwFFmpegError(ret);
  165. uint8_t * data[4];
  166. int linesize[4];
  167. if(output.texture)
  168. {
  169. av_image_alloc(data, linesize, output.dimensions.x, output.dimensions.y, AV_PIX_FMT_YUV420P, 1);
  170. sws_scale(output.sws, output.frame->data, output.frame->linesize, 0, video.codecContext->height, data, linesize);
  171. SDL_UpdateYUVTexture(output.texture, nullptr, data[0], linesize[0], data[1], linesize[1], data[2], linesize[2]);
  172. av_freep(&data[0]);
  173. }
  174. else
  175. {
  176. // Avoid buffer overflow caused by sws_scale():
  177. // http://trac.ffmpeg.org/ticket/9254
  178. size_t pic_bytes = output.surface->pitch * output.surface->h;
  179. size_t ffmped_pad = 1024; /* a few bytes of overflow will go here */
  180. void * for_sws = av_malloc(pic_bytes + ffmped_pad);
  181. data[0] = (ui8 *)for_sws;
  182. linesize[0] = output.surface->pitch;
  183. sws_scale(output.sws, output.frame->data, output.frame->linesize, 0, video.codecContext->height, data, linesize);
  184. memcpy(output.surface->pixels, for_sws, pic_bytes);
  185. av_free(for_sws);
  186. }
  187. av_packet_unref(&packet);
  188. return true;
  189. }
  190. }
  191. }
  192. bool CVideoInstance::videoEnded()
  193. {
  194. return output.videoEnded;
  195. }
  196. void CVideoInstance::close()
  197. {
  198. sws_freeContext(output.sws);
  199. av_frame_free(&output.frame);
  200. SDL_DestroyTexture(output.texture);
  201. SDL_FreeSurface(output.surface);
  202. // state.videoStream.codec???
  203. // state.audioStream.codec???
  204. avcodec_close(video.codecContext);
  205. avcodec_free_context(&video.codecContext);
  206. avcodec_close(audio.codecContext);
  207. avcodec_free_context(&audio.codecContext);
  208. avformat_close_input(&state.formatContext);
  209. av_free(state.context);
  210. output = FFMpegVideoOutput();
  211. video = FFMpegStreamState();
  212. audio = FFMpegStreamState();
  213. state = FFMpegFileState();
  214. }
  215. CVideoInstance::~CVideoInstance()
  216. {
  217. close();
  218. }
  219. Point CVideoInstance::size()
  220. {
  221. if(!output.frame)
  222. throw std::runtime_error("Invalid video frame!");
  223. return Point(output.frame->width, output.frame->height);
  224. }
  225. void CVideoInstance::show(const Point & position, Canvas & canvas)
  226. {
  227. if(output.sws == nullptr)
  228. throw std::runtime_error("No video to show!");
  229. CSDL_Ext::blitSurface(output.surface, canvas.getInternalSurface(), position);
  230. }
  231. void CVideoInstance::tick(uint32_t msPassed)
  232. {
  233. if(output.sws == nullptr)
  234. throw std::runtime_error("No video to show!");
  235. if(output.videoEnded)
  236. throw std::runtime_error("Video already ended!");
  237. # if(LIBAVUTIL_VERSION_MAJOR < 58)
  238. auto packet_duration = output.frame->pkt_duration;
  239. # else
  240. auto packet_duration = frame->duration;
  241. # endif
  242. double frameEndTime = (output.frame->pts + packet_duration) * av_q2d(state.formatContext->streams[video.streamIndex]->time_base);
  243. output.frameTime += msPassed / 1000.0;
  244. if(output.frameTime >= frameEndTime)
  245. {
  246. if(!nextFrame())
  247. output.videoEnded = true;
  248. }
  249. }
  250. # if 0
  251. std::pair<std::unique_ptr<ui8 []>, si64> CVideoPlayer::getAudio(const VideoPath & videoToOpen)
  252. {
  253. std::pair<std::unique_ptr<ui8 []>, si64> dat(std::make_pair(nullptr, 0));
  254. FFMpegFileState audio;
  255. openVideoFile(audio, videoToOpen);
  256. if (audio.audioStream.streamIndex < 0)
  257. {
  258. closeVideoFile(audio);
  259. return { nullptr, 0};
  260. }
  261. // Open codec
  262. AVFrame *frameAudio = av_frame_alloc();
  263. AVPacket packet;
  264. std::vector<ui8> samples;
  265. while (av_read_frame(audio.formatContext, &packet) >= 0)
  266. {
  267. if(packet.stream_index == audio.audioStream.streamIndex)
  268. {
  269. int rc = avcodec_send_packet(audio.audioStream.codecContext, &packet);
  270. if (rc >= 0)
  271. packet.size = 0;
  272. rc = avcodec_receive_frame(audio.audioStream.codecContext, frameAudio);
  273. int bytesToRead = (frameAudio->nb_samples * 2 * (audio.formatContext->streams[audio.audioStream.streamIndex]->codecpar->bits_per_coded_sample / 8));
  274. if (rc >= 0)
  275. for (int s = 0; s < bytesToRead; s += sizeof(ui8))
  276. {
  277. ui8 value;
  278. memcpy(&value, &frameAudio->data[0][s], sizeof(ui8));
  279. samples.push_back(value);
  280. }
  281. }
  282. av_packet_unref(&packet);
  283. }
  284. typedef struct WAV_HEADER {
  285. ui8 RIFF[4] = {'R', 'I', 'F', 'F'};
  286. ui32 ChunkSize;
  287. ui8 WAVE[4] = {'W', 'A', 'V', 'E'};
  288. ui8 fmt[4] = {'f', 'm', 't', ' '};
  289. ui32 Subchunk1Size = 16;
  290. ui16 AudioFormat = 1;
  291. ui16 NumOfChan = 2;
  292. ui32 SamplesPerSec = 22050;
  293. ui32 bytesPerSec = 22050 * 2;
  294. ui16 blockAlign = 2;
  295. ui16 bitsPerSample = 16;
  296. ui8 Subchunk2ID[4] = {'d', 'a', 't', 'a'};
  297. ui32 Subchunk2Size;
  298. } wav_hdr;
  299. wav_hdr wav;
  300. wav.ChunkSize = samples.size() + sizeof(wav_hdr) - 8;
  301. wav.Subchunk2Size = samples.size() + sizeof(wav_hdr) - 44;
  302. wav.SamplesPerSec = audio.formatContext->streams[audio.audioStream.streamIndex]->codecpar->sample_rate;
  303. wav.bitsPerSample = audio.formatContext->streams[audio.audioStream.streamIndex]->codecpar->bits_per_coded_sample;
  304. auto wavPtr = reinterpret_cast<ui8*>(&wav);
  305. dat = std::make_pair(std::make_unique<ui8[]>(samples.size() + sizeof(wav_hdr)), samples.size() + sizeof(wav_hdr));
  306. std::copy(wavPtr, wavPtr + sizeof(wav_hdr), dat.first.get());
  307. std::copy(samples.begin(), samples.end(), dat.first.get() + sizeof(wav_hdr));
  308. if (frameAudio)
  309. av_frame_free(&frameAudio);
  310. closeVideoFile(audio);
  311. return dat;
  312. }
  313. # endif
  314. bool CVideoPlayer::openAndPlayVideoImpl(const VideoPath & name, const Point & position, bool useOverlay, bool scale, bool stopOnKey)
  315. {
  316. CVideoInstance instance;
  317. instance.open(name);
  318. instance.prepareOutput(scale, useOverlay);
  319. auto lastTimePoint = boost::chrono::steady_clock::now();
  320. while(instance.nextFrame())
  321. {
  322. if(stopOnKey)
  323. {
  324. GH.input().fetchEvents();
  325. if(GH.input().ignoreEventsUntilInput())
  326. return false;
  327. }
  328. SDL_Rect rect;
  329. rect.x = position.x;
  330. rect.y = position.y;
  331. rect.w = instance.output.dimensions.x;
  332. rect.h = instance.output.dimensions.y;
  333. if(useOverlay)
  334. SDL_RenderFillRect(mainRenderer, &rect);
  335. else
  336. SDL_RenderClear(mainRenderer);
  337. SDL_RenderCopy(mainRenderer, instance.output.texture, nullptr, &rect);
  338. SDL_RenderPresent(mainRenderer);
  339. #if (LIBAVUTIL_VERSION_MAJOR < 58)
  340. auto packet_duration = instance.output.frame->pkt_duration;
  341. #else
  342. auto packet_duration = output.frame->duration;
  343. #endif
  344. // Framerate delay
  345. double targetFrameTimeSeconds = packet_duration * av_q2d(instance.state.formatContext->streams[instance.video.streamIndex]->time_base);
  346. auto targetFrameTime = boost::chrono::milliseconds(static_cast<int>(1000 * (targetFrameTimeSeconds)));
  347. auto timePointAfterPresent = boost::chrono::steady_clock::now();
  348. auto timeSpentBusy = boost::chrono::duration_cast<boost::chrono::milliseconds>(timePointAfterPresent - lastTimePoint);
  349. if (targetFrameTime > timeSpentBusy)
  350. boost::this_thread::sleep_for(targetFrameTime - timeSpentBusy);
  351. lastTimePoint = boost::chrono::steady_clock::now();
  352. }
  353. return true;
  354. }
  355. bool CVideoPlayer::playIntroVideo(const VideoPath & name)
  356. {
  357. return openAndPlayVideoImpl(name, Point(0,0), true, true, true);
  358. }
  359. void CVideoPlayer::playSpellbookAnimation(const VideoPath & name, const Point & position)
  360. {
  361. openAndPlayVideoImpl(name, position, false, false, false);
  362. }
  363. std::unique_ptr<IVideoInstance> CVideoPlayer::open(const VideoPath & name, bool scaleToScreen)
  364. {
  365. auto result = std::make_unique<CVideoInstance>();
  366. result->open(name);
  367. result->prepareOutput(scaleToScreen, false);
  368. return result;
  369. }
  370. std::pair<std::unique_ptr<ui8 []>, si64> CVideoPlayer::getAudio(const VideoPath & videoToOpen)
  371. {
  372. return {nullptr, 0};
  373. }
  374. #endif