So I've been trying to go through the following tutorial on ffmpeg: http://dranger.com/ffmpeg/tutorial02.html
However, when I try to compile using gcc, I get the following output:
root:/Users/mbrodeur/Downloads/HACKATHON CONTENT/Tutorials-> gcc -o tutorial02 tutorial02.c -lavutil -lavformat -lavcodec -lz -lavutil -lm -lswscale -D_THREAD_SAFE -lSDL2
tutorial02.c: In function ‘main’:
tutorial02.c:41: error: ‘SDL_Overlay’ undeclared (first use in this function)
tutorial02.c:41: error: (Each undeclared identifier is reported only once
tutorial02.c:41: error: for each function it appears in.)
tutorial02.c:41: error: ‘bmp’ undeclared (first use in this function)
tutorial02.c:98: warning: assignment makes pointer from integer without a cast
tutorial02.c:110: error: ‘SDL_YV12_OVERLAY’ undeclared (first use in this function)
Now, I read that SDL_Overlay is no longer used in SDL2, so therein lies the problem. I've been poking around, but can't seem to find anything helpful. Is there a replacement for SDL_Overlay? Is it necessary?
SDL_Overlay is used in the following context:
SDL_Overlay *bmp;
bmp = SDL_CreateYUVOverlay(pCodecCtx->width, pCodecCtx->height,
SDL_YV12_OVERLAY, screen);
I have updated the tutorial to work with SDL 2.0.1. It replaces SDL_Overlay with SDL_Texture in YV12 format.
int main(int argc, char *argv[]) {
AVFormatContext *pFormatCtx = NULL;
int videoStream;
unsigned i;
AVCodecContext *pCodecCtxOrig = NULL;
AVCodecContext *pCodecCtx = NULL;
AVCodec *pCodec = NULL;
AVFrame *pFrame = NULL;
AVPacket packet;
int frameFinished;
struct SwsContext *sws_ctx = NULL;
SDL_Event event;
SDL_Window *screen;
SDL_Renderer *renderer;
SDL_Texture *texture;
Uint8 *yPlane, *uPlane, *vPlane;
size_t yPlaneSz, uvPlaneSz;
int uvPitch;
if (argc < 2) {
fprintf(stderr, "Usage: test <file>\n");
exit(1);
}
// Register all formats and codecs
av_register_all();
if (SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER)) {
fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
exit(1);
}
// Open video file
if (avformat_open_input(&pFormatCtx, argv[1], NULL, NULL) != 0)
return -1; // Couldn't open file
// Retrieve stream information
if (avformat_find_stream_info(pFormatCtx, NULL) < 0)
return -1; // Couldn't find stream information
// Dump information about file onto standard error
av_dump_format(pFormatCtx, 0, argv[1], 0);
// Find the first video stream
videoStream = -1;
for (i = 0; i < pFormatCtx->nb_streams; i++)
if (pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
videoStream = i;
break;
}
if (videoStream == -1)
return -1; // Didn't find a video stream
// Get a pointer to the codec context for the video stream
pCodecCtxOrig = pFormatCtx->streams[videoStream]->codec;
// Find the decoder for the video stream
pCodec = avcodec_find_decoder(pCodecCtxOrig->codec_id);
if (pCodec == NULL) {
fprintf(stderr, "Unsupported codec!\n");
return -1; // Codec not found
}
// Copy context
pCodecCtx = avcodec_alloc_context3(pCodec);
if (avcodec_copy_context(pCodecCtx, pCodecCtxOrig) != 0) {
fprintf(stderr, "Couldn't copy codec context");
return -1; // Error copying codec context
}
// Open codec
if (avcodec_open2(pCodecCtx, pCodec, NULL) < 0)
return -1; // Could not open codec
// Allocate video frame
pFrame = av_frame_alloc();
// Make a screen to put our video
screen = SDL_CreateWindow(
"FFmpeg Tutorial",
SDL_WINDOWPOS_UNDEFINED,
SDL_WINDOWPOS_UNDEFINED,
pCodecCtx->width,
pCodecCtx->height,
0
);
if (!screen) {
fprintf(stderr, "SDL: could not create window - exiting\n");
exit(1);
}
renderer = SDL_CreateRenderer(screen, -1, 0);
if (!renderer) {
fprintf(stderr, "SDL: could not create renderer - exiting\n");
exit(1);
}
// Allocate a place to put our YUV image on that screen
texture = SDL_CreateTexture(
renderer,
SDL_PIXELFORMAT_YV12,
SDL_TEXTUREACCESS_STREAMING,
pCodecCtx->width,
pCodecCtx->height
);
if (!texture) {
fprintf(stderr, "SDL: could not create texture - exiting\n");
exit(1);
}
// initialize SWS context for software scaling
sws_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height,
pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height,
AV_PIX_FMT_YUV420P,
SWS_BILINEAR,
NULL,
NULL,
NULL);
// set up YV12 pixel array (12 bits per pixel)
yPlaneSz = pCodecCtx->width * pCodecCtx->height;
uvPlaneSz = pCodecCtx->width * pCodecCtx->height / 4;
yPlane = (Uint8*)malloc(yPlaneSz);
uPlane = (Uint8*)malloc(uvPlaneSz);
vPlane = (Uint8*)malloc(uvPlaneSz);
if (!yPlane || !uPlane || !vPlane) {
fprintf(stderr, "Could not allocate pixel buffers - exiting\n");
exit(1);
}
uvPitch = pCodecCtx->width / 2;
while (av_read_frame(pFormatCtx, &packet) >= 0) {
// Is this a packet from the video stream?
if (packet.stream_index == videoStream) {
// Decode video frame
avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, &packet);
// Did we get a video frame?
if (frameFinished) {
AVPicture pict;
pict.data[0] = yPlane;
pict.data[1] = uPlane;
pict.data[2] = vPlane;
pict.linesize[0] = pCodecCtx->width;
pict.linesize[1] = uvPitch;
pict.linesize[2] = uvPitch;
// Convert the image into YUV format that SDL uses
sws_scale(sws_ctx, (uint8_t const * const *) pFrame->data,
pFrame->linesize, 0, pCodecCtx->height, pict.data,
pict.linesize);
SDL_UpdateYUVTexture(
texture,
NULL,
yPlane,
pCodecCtx->width,
uPlane,
uvPitch,
vPlane,
uvPitch
);
SDL_RenderClear(renderer);
SDL_RenderCopy(renderer, texture, NULL, NULL);
SDL_RenderPresent(renderer);
}
}
// Free the packet that was allocated by av_read_frame
av_free_packet(&packet);
SDL_PollEvent(&event);
switch (event.type) {
case SDL_QUIT:
SDL_DestroyTexture(texture);
SDL_DestroyRenderer(renderer);
SDL_DestroyWindow(screen);
SDL_Quit();
exit(0);
break;
default:
break;
}
}
// Free the YUV frame
av_frame_free(&pFrame);
free(yPlane);
free(uPlane);
free(vPlane);
// Close the codec
avcodec_close(pCodecCtx);
avcodec_close(pCodecCtxOrig);
// Close the video file
avformat_close_input(&pFormatCtx);
return 0;
}