Contenu | Rechercher | Menus

Annonce

Si vous avez des soucis pour rester connecté, déconnectez-vous puis reconnectez-vous depuis ce lien en cochant la case
Me connecter automatiquement lors de mes prochaines visites.

À propos de l'équipe du forum.

#1 Le 30/12/2012, à 12:18

yannoo75020

pb avec avcodec_decode_audio2() sous Ubuntu 12.10

Bonjour,


J'ai un pb sous Ubuntu 12.10 concernant la gestion du décodage du son avec libavcodec  sad
(aucun pb pour  la video)

La fonction avcodec_decode_audio2() est maintenant dépréciée et il faut à première vue maintenant utiliser  avcodec_decode_audio3()  à la place mais je n'arrive pas à trouver de tutoriels expliquant clairement comment il faut employer cette xième version de avcodec_decode_audio()  ...

Ci-joint le code que j'utilise pour l'instant (ça marche pour la video mais c'est buggé à mort pour le son ... et pour cause car j'ai dû commenter les appels à av_decode_audioqqchose() pour que ça puisse ne serait-ce que se compiler)

// gdl.c
//
// A basic video player that will stream through every video frame as fast as it can.
//
// Code based on tutorial02 and tutorial03 from http://dranger.com/ffmpeg/tutorial0x.html 
//
// Tested on Ubuntu 12.10
//
// gcc -o gdl gdl.c -lavformat -lavcodec -lswscale -lz -lm `sdl-config --cflags --libs`
//
// Assuming that libavformat, libavcodec, libswscale are correctly installed 
// And that you have sdl-config (please refer to SDL docs for your installation)
//
// Run using
// gdl myvideofile.mpg
//
// to play the video stream on your screen.

#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libswscale/swscale.h>

// TODO : replace SDL by OpenGL + YUV420P shader support 
#include <SDL.h>
#include <SDL_thread.h>
#ifdef __MINGW32__
#undef main /* Prevents SDL from overriding main() */
#endif


typedef struct
{
	int width;
        int height;
	int format;
	int interpolation;

	float scale;
	float ratio;

  	SDL_Overlay     *bmp;
  	SDL_Surface     *screen;
  	SDL_Rect         rect;
  	SDL_Event        event;

} window_t;

// TODO : handle correctly the audio frames/packets decoding 
// => how to use avcodec_decode_audio[2,3,4] on Ubuntu 12.10 ???

typedef struct PacketQueue 
{
  AVPacketList *first_pkt, *last_pkt;
  int nb_packets;
  int size;
  SDL_mutex *mutex;
  SDL_cond *cond;

} PacketQueue;


int quit = 0; 

window_t window = { 0, 0, PIX_FMT_YUV420P, SWS_BILINEAR };

PacketQueue audioq;

#include <stdio.h>

void packet_queue_init(PacketQueue *q) 
{
  memset(q, 0, sizeof(PacketQueue));
  q->mutex = SDL_CreateMutex();
  q->cond = SDL_CreateCond();
}

#define av_malloc(x) malloc(x)
#define av_free(x) free(x)

int packet_queue_put(PacketQueue *q, AVPacket *pkt) 
{

  AVPacketList *pkt1;
  if(av_dup_packet(pkt) < 0) 
  {
    return -1;
  }
  pkt1 = av_malloc(sizeof(AVPacketList));
  if (!pkt1)
    return -1;
  pkt1->pkt = *pkt;
  pkt1->next = NULL;
  
  SDL_LockMutex(q->mutex);
  
  if (!q->last_pkt)
    q->first_pkt = pkt1;
  else
    q->last_pkt->next = pkt1;
  q->last_pkt = pkt1;
  q->nb_packets++;
  q->size += pkt1->pkt.size;
  SDL_CondSignal(q->cond);
  
  SDL_UnlockMutex(q->mutex);
  return 0;
}

static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block) 
{
  AVPacketList *pkt1;
  int ret;
  
  SDL_LockMutex(q->mutex);
  
  for(;;) 
  {
    
    if(quit) 
    {
      ret = -1;
      break;
    }

    pkt1 = q->first_pkt;
    if (pkt1) {
      q->first_pkt = pkt1->next;
      if (!q->first_pkt)
	q->last_pkt = NULL;
      q->nb_packets--;
      q->size -= pkt1->pkt.size;
      *pkt = pkt1->pkt;
      av_free(pkt1);
      ret = 1;
      break;
    } else if (!block) {
      ret = 0;
      break;
    } else {
      SDL_CondWait(q->cond, q->mutex);
    }
  }
  SDL_UnlockMutex(q->mutex);
  return ret;
}

int decode_interrupt_cb(void) 
{
  return quit;
}


void audio_callback(void *userdata, Uint8 *stream, int len) 
{

  AVCodecContext *aCodecCtx = (AVCodecContext *)userdata;
  int len1, audio_size;

  static uint8_t audio_buf[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
  static unsigned int audio_buf_size = 0;
  static unsigned int audio_buf_index = 0;

  while(len > 0) 
  {
    if(audio_buf_index >= audio_buf_size) 
    {
      /* We have already sent all our data; get more */
      audio_size = audio_decode_frame(aCodecCtx, audio_buf,
                                      sizeof(audio_buf));
      if(audio_size < 0) 
      {
	/* If error, output silence */
	audio_buf_size = 1024;
	memset(audio_buf, 0, audio_buf_size);
      } else {
	audio_buf_size = audio_size;
      }
      audio_buf_index = 0;
    }
    len1 = audio_buf_size - audio_buf_index;
    if(len1 > len)
      len1 = len;
    memcpy(stream, (uint8_t *)audio_buf + audio_buf_index, len1);
    len -= len1;
    stream += len1;
    audio_buf_index += len1;
  }
}

int audio_decode_frame(AVCodecContext *aCodecCtx, uint8_t *audio_buf, int buf_size) 
{

  static AVPacket pkt;
  static uint8_t *audio_pkt_data = NULL;
  static int audio_pkt_size = 0;

  int len1 = 0, data_size;

  for(;;) 
   {
    while(audio_pkt_size > 0) 
    {
      data_size = buf_size;
      // len1 = avcodec_decode_audio2(aCodecCtx, (int16_t *)audio_buf, &data_size, audio_pkt_data, audio_pkt_size);
      if(len1 < 0) 
      {
	/* if error, skip frame */
	audio_pkt_size = 0;
	break;
      }
      audio_pkt_data += len1;
      audio_pkt_size -= len1;
      if(data_size <= 0) 
      {
	/* No data yet, get more frames */
	continue;
      }
      /* We have data, return it and come back for more later */
      return data_size;
    }
    if(pkt.data)
      av_free_packet(&pkt);

    if(quit) 
    {
      return -1;
    }

    if(packet_queue_get(&audioq, &pkt, 1) < 0) 
    {
      return -1;
    }
    audio_pkt_data = pkt.data;
    audio_pkt_size = pkt.size;
  }
}

int main(int argc, char *argv[]) 
{

  int i;

  AVFormatContext *pFormatCtx;
  AVCodecContext  *vCodecCtx;
  AVCodecContext  *aCodecCtx;
  AVCodec         *vCodec;
  AVCodec         *aCodec;
  AVFrame         *pFrame;
  AVPacket         packet;
  AVPicture 	   pict;
 
  struct SwsContext *img_convert_ctx;

  int              videoStream = -1;
  int 		   audioStream = -1;
  int              frameFinished = 0;
  int 		   nFrames = 0;
  int 		   nPackets = 0;
  int 		   nVideoPackets = 0;
  int 		   nAudioPackets = 0;

 

  if( argc < 2 ) 
  {
    fprintf(stderr, "Usage: gdl <file> [zoom]\n");
    exit(1);
  }

  if( argc == 3 )
  {
	
	window.scale = atof( argv[2] );
  }
  else
  {
	window.scale = 1.0f;
  }

  // Register all formats and codecs
  avcodec_register_all();
  av_register_all();

  // Init the SDL part 
  if( SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER) ) 
  {
    fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
    exit(1);
  }

  // Alloc the format context
  pFormatCtx = avformat_alloc_context();

  // Open video file
  if( avformat_open_input(&pFormatCtx, argv[1], NULL, NULL) !=0 )
  {
    printf("Cannot open the %s file :( \n", argv[1]);
    return -1;
  }

  // Retrieve stream information
  if( avformat_find_stream_info(pFormatCtx, NULL) < 0 )
  {
    return -1; // Couldn't find stream information
  }

  // Dump information about file onto standard error
  av_dump_format(pFormatCtx, 0, argv[1], 0);
  
  // Find the first video stream
  for( i = 0 ; i < pFormatCtx->nb_streams ; i++ )
  {
    if( (pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) && (videoStream == -1) ) 
    {
      videoStream=i;
    }
    else
    if( (pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO) && (audioStream == -1) ) 
    {
      audioStream=i;
    }

  }
  if( audioStream == -1 )
  {
    printf("Didn't find an audio stream :( \n");
    return -1; 
  }
  if( videoStream == -1 )
  {
    printf("Didn't find an video stream :( \n");
    return -1; 
  }


  // Get a pointer to the codecs context for the video stream
  vCodecCtx = pFormatCtx->streams[videoStream]->codec;
  
  // Find the decoder for the video stream
  vCodec = avcodec_find_decoder(vCodecCtx->codec_id);
  if( vCodec == NULL ) 
  {
    fprintf(stderr, "Unsupported video codec!\n");
    return -1;
  }
  
  // Open video codec
  if( avcodec_open2(vCodecCtx, vCodec, NULL) < 0 )
  {
    fprintf(stderr, "Cannot open the video codec!\n");
    return -1;
  }


  // Get a pointer to the codec context for the audio stream
  aCodecCtx = pFormatCtx->streams[audioStream]->codec;

  // Find the decoder for the video stream
  aCodec = avcodec_find_decoder(aCodecCtx->codec_id);
  if( aCodec == NULL ) 
  {
    fprintf(stderr, "Unsupported audio codec!\n");
    return -1;
  }
  
  // Open codec
  if( avcodec_open2(aCodecCtx, aCodec, NULL) < 0 )
  {
    fprintf(stderr, "Cannot open the audio codec!\n");
    return -1;
  }

   
  // Init Audio queue buffering
  packet_queue_init(&audioq);
  SDL_PauseAudio(0);


  // Allocate video frame
  pFrame=avcodec_alloc_frame();

  // Make a window to put our scaled video 
  window.width = window.scale * vCodecCtx->width ;
  window.height = window.scale * vCodecCtx->height; 
  window.rect.x = 0;
  window.rect.y = 0;
  window.rect.w = window.width;
  window.rect.h = window.height ;
  window.ratio  = (float)(window.width) / (float)(window.height);
  printf("Create a %d x %d window (src=%dx%d scale=%1.1f ratio=%1.2f) \n", 
	window.width, window.height, vCodecCtx->width, vCodecCtx->height, window.scale, window.ratio);



#ifndef __DARWIN__
  window.screen = SDL_SetVideoMode(window.width, window.height, 0, 0);
#else
  window.screen = SDL_SetVideoMode(window.width, window.height, 24, 0);
#endif
  if(!window.screen) 
  {
    fprintf(stderr, "SDL: could not set video mode - exiting\n");
    exit(1);
  }

  // Init the frame->screen format conversion
  img_convert_ctx = sws_getContext( 
    vCodecCtx->width, vCodecCtx->height, vCodecCtx->pix_fmt, 
    window.width, window.height, window.format, 
    window.interpolation, NULL, NULL, NULL 
  );
  
  // Allocate a place to put our YUV image on that screen
  window.bmp = SDL_CreateYUVOverlay(window.width, window.height, SDL_YV12_OVERLAY, window.screen);

  // Read frames
  while( (av_read_frame(pFormatCtx, &packet) >= 0) && (quit == 0) ) 
  {
    nPackets++;

    // Is this a packet from the video stream?
    if( packet.stream_index == videoStream ) 
    {
      nVideoPackets++;

      // Decode video frame
      avcodec_decode_video2(vCodecCtx, pFrame, &frameFinished, &packet);
      
      // Did we get a video frame?
      if(frameFinished) 
      {

	nFrames++;

	SDL_LockYUVOverlay(window.bmp);

	pict.data[0] = window.bmp->pixels[0];
	pict.data[1] = window.bmp->pixels[2];
	pict.data[2] = window.bmp->pixels[1];

	pict.linesize[0] = window.bmp->pitches[0];
	pict.linesize[1] = window.bmp->pitches[2];
	pict.linesize[2] = window.bmp->pitches[1];

	// Convert the image into YUV format that SDL uses
   	sws_scale(
          img_convert_ctx, 
          (const uint8_t* const*)pFrame->data, 
          pFrame->linesize, 
          0, 
          vCodecCtx->height, 
          pict.data,
          pict.linesize
	);
	
	SDL_UnlockYUVOverlay(window.bmp);
	SDL_DisplayYUVOverlay(window.bmp , &window.rect);

	usleep( 50000 );
      
      }
    }
    else
    if( packet.stream_index == audioStream )
    {
	nAudioPackets++;
        // packet_queue_put(&audioq, &packet); // pb with av_codec_decode_audio2()  :(  
    } 
  
    // Free the packet that was allocated by av_read_frame
    av_free_packet(&packet);

    // Handle SDL events

    SDL_PollEvent(&window.event);
    switch(window.event.type) 
    {
    	case SDL_QUIT:
      		SDL_Quit();
      		exit(0);
		// break;

	case SDL_KEYDOWN :
		quit = 1;
		break;

    	default:
      		break;
    }

  }
  
  // Free the YUV frame
  // av_free(pFrame); /* commented because the av_free() function seem to miss on my Ubuntu 12.10 box :( */
 
  // Close the Audio/Video codecs
  avcodec_close(aCodecCtx);
  avcodec_close(vCodecCtx);
  
  // Close the video file
  avformat_close_input(&pFormatCtx);

  // Print infos
  printf("%d packets read, %d video packets decoded, %d audio packets read, %d frames displayed \n", 
	nPackets, nVideoPackets, nAudioPackets, nFrames);
 
  return 0;
}

=> où peut-on trouver un tuto **très récent** qui explique/détaille précisément comment arriver à décoder un flux audio + video via la version de  libavcodec disponible sous Ubuntu 12.10 ???

PS : contrairement à ce que j'ai mis dans la source, c'est loin d'aller le plus vite possible car j'y ai mis un usleep() qui me permet de ralentir la video lue (et surtout de très grandement réduire le taux d'occupation CPU ...)
=> une fois le pb du son résolu, je pourrais alors modifier ce source pour booster tout ça en  y utilisant  OpenGL avec un shader YUV420P à la place de faire des appels à  SDL et sws_scale() pour chaque image 


@+
Yannoo

Dernière modification par yannoo75020 (Le 30/12/2012, à 12:37)

Hors ligne

#2 Le 30/12/2012, à 14:31

yannoo75020

Re : pb avec avcodec_decode_audio2() sous Ubuntu 12.10

J'ai trouvé un tuto récent sous  https://github.com/mpenkov/ffmpeg-tutor … torial03.c

J'arrive maintenant à la compiler après de petites modifs mineures comme l'ajout de l'include libavutil/samplefmt.h et la [re?]définition de av_free() et av_malloc()

// tutorial03.c
// A pedagogical video player that will stream through every video frame as fast as it can
// and play audio (out of sync).
//
// This tutorial was written by Stephen Dranger (dranger@gmail.com) and updated
// for ffmpeg version N-42806-gf4451d2 by Michael Penkov
// (misha.penkov@gmail.com).
//
// Code based on FFplay, Copyright (c) 2003 Fabrice Bellard,
// and a tutorial by Martin Bohme (boehme@inb.uni-luebeckREMOVETHIS.de)
// Tested on Gentoo, CVS version 5/01/07 compiled with GCC 4.1.1
//
// Use the Makefile to build all examples.
//
// Run using
// tutorial03 myvideofile.mpg
//
// to play the stream on your screen.


#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libswscale/swscale.h>
#include <libavutil/samplefmt.h>

#include <SDL.h>
#include <SDL_thread.h>

#ifdef __MINGW32__
#undef main /* Prevents SDL from overriding main() */
#endif

#include <stdio.h>

#define SDL_AUDIO_BUFFER_SIZE 1024

#define av_malloc(x) malloc(x)
#define av_free(x) free(x)

typedef struct PacketQueue {
  AVPacketList *first_pkt, *last_pkt;
  int nb_packets;
  int size;
  SDL_mutex *mutex;
  SDL_cond *cond;
} PacketQueue;

PacketQueue audioq;

int quit = 0;

void packet_queue_init(PacketQueue *q) {
  memset(q, 0, sizeof(PacketQueue));
  q->mutex = SDL_CreateMutex();
  q->cond = SDL_CreateCond();
}
int packet_queue_put(PacketQueue *q, AVPacket *pkt) {

  AVPacketList *pkt1;
  if(av_dup_packet(pkt) < 0) {
    return -1;
  }
  pkt1 = av_malloc(sizeof(AVPacketList));
  if (!pkt1)
    return -1;
  pkt1->pkt = *pkt;
  pkt1->next = NULL;
  
  
  SDL_LockMutex(q->mutex);
  
  if (!q->last_pkt)
    q->first_pkt = pkt1;
  else
    q->last_pkt->next = pkt1;
  q->last_pkt = pkt1;
  q->nb_packets++;
  q->size += pkt1->pkt.size;
  SDL_CondSignal(q->cond);
  
  SDL_UnlockMutex(q->mutex);
  return 0;
}
static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
{
  AVPacketList *pkt1;
  int ret;
  
  SDL_LockMutex(q->mutex);
  
  for(;;) {
    
    if(quit) {
      ret = -1;
      break;
    }

    pkt1 = q->first_pkt;
    if (pkt1) {
      q->first_pkt = pkt1->next;
      if (!q->first_pkt)
q->last_pkt = NULL;
      q->nb_packets--;
      q->size -= pkt1->pkt.size;
      *pkt = pkt1->pkt;
      av_free(pkt1);
      ret = 1;
      break;
    } else if (!block) {
      ret = 0;
      break;
    } else {
      SDL_CondWait(q->cond, q->mutex);
    }
  }
  SDL_UnlockMutex(q->mutex);
  return ret;
}

int audio_decode_frame(AVCodecContext *aCodecCtx, uint8_t *audio_buf, int buf_size) {

  static AVPacket pkt;
  static uint8_t *audio_pkt_data = NULL;
  static int audio_pkt_size = 0;
  static AVFrame frame;

  int len1, data_size = 0;

  for(;;) {
    while(audio_pkt_size > 0) {
      int got_frame = 0;
      len1 = avcodec_decode_audio4(aCodecCtx, &frame, &got_frame, &pkt);
      if(len1 < 0) {
/* if error, skip frame */
audio_pkt_size = 0;
break;
      }
      audio_pkt_data += len1;
      audio_pkt_size -= len1;
      if (got_frame)
      {
          data_size =
            av_samples_get_buffer_size
            (
                NULL,
                aCodecCtx->channels,
                frame.nb_samples,
                aCodecCtx->sample_fmt,
                1
            );
          memcpy(audio_buf, frame.data[0], data_size);
      }
      if(data_size <= 0) {
/* No data yet, get more frames */
continue;
      }
      /* We have data, return it and come back for more later */
      return data_size;
    }
    if(pkt.data)
      av_free_packet(&pkt);

    if(quit) {
      return -1;
    }

    if(packet_queue_get(&audioq, &pkt, 1) < 0) {
      return -1;
    }
    audio_pkt_data = pkt.data;
    audio_pkt_size = pkt.size;
  }
}

void audio_callback(void *userdata, Uint8 *stream, int len) {

  AVCodecContext *aCodecCtx = (AVCodecContext *)userdata;
  int len1, audio_size;

  static uint8_t audio_buf[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
  static unsigned int audio_buf_size = 0;
  static unsigned int audio_buf_index = 0;

  while(len > 0) {
    if(audio_buf_index >= audio_buf_size) {
      /* We have already sent all our data; get more */
      audio_size = audio_decode_frame(aCodecCtx, audio_buf, audio_buf_size);
      if(audio_size < 0) {
/* If error, output silence */
audio_buf_size = 1024; // arbitrary?
memset(audio_buf, 0, audio_buf_size);
      } else {
audio_buf_size = audio_size;
      }
      audio_buf_index = 0;
    }
    len1 = audio_buf_size - audio_buf_index;
    if(len1 > len)
      len1 = len;
    memcpy(stream, (uint8_t *)audio_buf + audio_buf_index, len1);
    len -= len1;
    stream += len1;
    audio_buf_index += len1;
  }
}

int main(int argc, char *argv[]) {
  AVFormatContext *pFormatCtx = NULL;
  int i, videoStream, audioStream;
  AVCodecContext *pCodecCtx = NULL;
  AVCodec *pCodec = NULL;
  AVFrame *pFrame = NULL;
  AVPacket packet;
  int frameFinished;
  //float aspect_ratio;
  
  AVCodecContext *aCodecCtx = NULL;
  AVCodec *aCodec = NULL;

  SDL_Overlay *bmp = NULL;
  SDL_Surface *screen = NULL;
  SDL_Rect rect;
  SDL_Event event;
  SDL_AudioSpec wanted_spec, spec;

  struct SwsContext *sws_ctx = NULL;
  AVDictionary *videoOptionsDict = NULL;
  AVDictionary *audioOptionsDict = NULL;

  if(argc < 2) {
    fprintf(stderr, "Usage: test <file>\n");
    exit(1);
  }
  // Register all formats and codecs
  av_register_all();
  
  if(SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER)) {
    fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
    exit(1);
  }

  // Open video file
  if(avformat_open_input(&pFormatCtx, argv[1], NULL, NULL)!=0)
    return -1; // Couldn't open file
  
  // Retrieve stream information
  if(avformat_find_stream_info(pFormatCtx, NULL)<0)
    return -1; // Couldn't find stream information
  
  // Dump information about file onto standard error
  av_dump_format(pFormatCtx, 0, argv[1], 0);
  
  // Find the first video stream
  videoStream=-1;
  audioStream=-1;
  for(i=0; i<pFormatCtx->nb_streams; i++) {
    if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO &&
       videoStream < 0) {
      videoStream=i;
    }
    if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_AUDIO &&
       audioStream < 0) {
      audioStream=i;
    }
  }
  if(videoStream==-1)
    return -1; // Didn't find a video stream
  if(audioStream==-1)
    return -1;
   
  aCodecCtx=pFormatCtx->streams[audioStream]->codec;
  // Set audio settings from codec info
  wanted_spec.freq = aCodecCtx->sample_rate;
  wanted_spec.format = AUDIO_S16SYS;
  wanted_spec.channels = aCodecCtx->channels;
  wanted_spec.silence = 0;
  wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
  wanted_spec.callback = audio_callback;
  wanted_spec.userdata = aCodecCtx;
  
  if(SDL_OpenAudio(&wanted_spec, &spec) < 0) {
    fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
    return -1;
  }
  aCodec = avcodec_find_decoder(aCodecCtx->codec_id);
  if(!aCodec) {
    fprintf(stderr, "Unsupported codec!\n");
    return -1;
  }
  avcodec_open2(aCodecCtx, aCodec, &audioOptionsDict);

  // audio_st = pFormatCtx->streams[index]
  packet_queue_init(&audioq);
  SDL_PauseAudio(0);

  // Get a pointer to the codec context for the video stream
  pCodecCtx=pFormatCtx->streams[videoStream]->codec;
  
  // Find the decoder for the video stream
  pCodec=avcodec_find_decoder(pCodecCtx->codec_id);
  if(pCodec==NULL) {
    fprintf(stderr, "Unsupported codec!\n");
    return -1; // Codec not found
  }
  // Open codec
  if(avcodec_open2(pCodecCtx, pCodec, &videoOptionsDict)<0)
    return -1; // Could not open codec
  
  // Allocate video frame
  pFrame=avcodec_alloc_frame();

  // Make a screen to put our video

#ifndef __DARWIN__
        screen = SDL_SetVideoMode(pCodecCtx->width, pCodecCtx->height, 0, 0);
#else
        screen = SDL_SetVideoMode(pCodecCtx->width, pCodecCtx->height, 24, 0);
#endif
  if(!screen) {
    fprintf(stderr, "SDL: could not set video mode - exiting\n");
    exit(1);
  }
  
  // Allocate a place to put our YUV image on that screen
  bmp = SDL_CreateYUVOverlay(pCodecCtx->width,
pCodecCtx->height,
SDL_YV12_OVERLAY,
screen);
  sws_ctx =
    sws_getContext
    (
        pCodecCtx->width,
        pCodecCtx->height,
        pCodecCtx->pix_fmt,
        pCodecCtx->width,
        pCodecCtx->height,
        PIX_FMT_YUV420P,
        SWS_BILINEAR,
        NULL,
        NULL,
        NULL
    );


  // Read frames and save first five frames to disk
  i=0;
  while(av_read_frame(pFormatCtx, &packet)>=0) {
    // Is this a packet from the video stream?
    if(packet.stream_index==videoStream) {
      // Decode video frame
      avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished,
&packet);
      
      // Did we get a video frame?
      if(frameFinished) {
SDL_LockYUVOverlay(bmp);

AVPicture pict;
pict.data[0] = bmp->pixels[0];
pict.data[1] = bmp->pixels[2];
pict.data[2] = bmp->pixels[1];

pict.linesize[0] = bmp->pitches[0];
pict.linesize[1] = bmp->pitches[2];
pict.linesize[2] = bmp->pitches[1];

// Convert the image into YUV format that SDL uses
    sws_scale
    (
        sws_ctx,
        (uint8_t const * const *)pFrame->data,
        pFrame->linesize,
        0,
        pCodecCtx->height,
        pict.data,
        pict.linesize
    );

SDL_UnlockYUVOverlay(bmp);

rect.x = 0;
rect.y = 0;
rect.w = pCodecCtx->width;
rect.h = pCodecCtx->height;
SDL_DisplayYUVOverlay(bmp, &rect);
av_free_packet(&packet);
      }
    } else if(packet.stream_index==audioStream) {
      packet_queue_put(&audioq, &packet);
    } else {
      av_free_packet(&packet);
    }
    // Free the packet that was allocated by av_read_frame
    SDL_PollEvent(&event);
    switch(event.type) {
    case SDL_QUIT:
      quit = 1;
      SDL_Quit();
      exit(0);
      break;
    default:
      break;
    }

  }

  // Free the YUV frame
  av_free(pFrame);
  
  // Close the codec
  avcodec_close(pCodecCtx);
  
  // Close the video file
  avformat_close_input(&pFormatCtx);
  
  return 0;
}

Ca se compile avec cette commande

gcc -o tutorial03 tutorial03.c -lavformat -lavcodec -lswscale -lavutil -lz -lm `sdl-config --cflags --libs`

Et l'image et le son sont bien traités smile

A noter que ça utilise avcodec_decode_audio4() et non pas avcodec_decode_audio2()
[+ les functions avcodec_decode_audio*() ne semblent pas rétro-compatibles entre-elles, v'là le délire ...]   

=> maintenant que j'ai de nouveau une base fonctionnelle sur mon Ubuntu 12.10,  je vais  enfin pouvoir recommencer à faire le portage  des portions SDL / sws_scale()  en OpenGL via des shaders YCbCr420P
(j'aimerais franchement bien diviser par au moins 3 ou 4 fois les 90% d'occupation CPU que j'ai actuellement avec ce code source smile )

edit : avec un simple usleep(20000) après le SDL_DisplayYUVOverlay(bmp, &rect), je passe déjà de 10% à 75% du temps CPU libre smile
=> je pense que j'ai déjà vite fait obtenu mon 3 ou 4 fois moins gourmand en CPU  smile smile

edit n° 2 :  le rajout de la mise à jour de la taille de la fenêtre se fait avec assez peu de code et n'a vraiment pas  l'air d'être bien gourmand  en  %CPU 
(style 5% de perdu ou qqchose comme ça, le taux CPU n'est passé que de 75% à 70%)

 	// Make a screen to put our video
#ifndef __DARWIN__
	window.bpp = 0;
#else
	window.bpp = 24;
#endif
	window.screen = SDL_SetVideoMode(window.width, window.height, window.bpp, SDL_RESIZABLE);
  	if(!window.screen) 
  	{
    		fprintf(stderr, "SDL: could not set video mode - exiting\n");
    		exit(1);
  	}
  
  	// Allocate a place to put our YUV image on that screen
  	window.bmp = SDL_CreateYUVOverlay( window.width, window.height, SDL_YV12_OVERLAY, window.screen);
 
 	sws_ctx = sws_getContext(
        	vCodecCtx->width, vCodecCtx->height, vCodecCtx->pix_fmt,
        	window.width, window.height, window.format,
        	SWS_BILINEAR, NULL, NULL, NULL );
			case SDL_VIDEORESIZE :

				window.width = event.resize.w;
				window.height = event.resize.h;
				window.rect.w = window.width;
  				window.rect.h = window.height;
				window.scale  = window.width * window.height;
				window.scale /= (vCodecCtx->width * vCodecCtx->height); 
  				window.ratio  = (float)(window.width) / (float)(window.height);
  				printf("Resize to %d x %d (src=%dx%d scale=%1.1f ratio=%1.2f) \n", 
					window.width, window.height, 
					vCodecCtx->width, vCodecCtx->height, 
					window.scale, window.ratio
				);

				window.screen = SDL_SetVideoMode(
					window.width, window.height, 
					window.bpp, SDL_RESIZABLE
				);

				window.bmp = SDL_CreateYUVOverlay( 
					window.width, 
					window.height, 
					SDL_YV12_OVERLAY, 
					window.screen
				);

				sws_ctx = sws_getContext(
        				vCodecCtx->width, vCodecCtx->height, vCodecCtx->pix_fmt,
        				window.width, window.height, window.format,
        				SWS_BILINEAR, NULL, NULL, NULL 
				);

Mais ça squatte quand même environ la moitié d'un CPU, soit au moins de 2 à 5x trop à mon goût ... sad
(j'aimerais bien que ça ne prennent plus que qqchose comme 10% du temps CPU une fois que SDL + sws_scale() seront remplacés par OpenGL + un shader YUV420P,  puis ensuite dans les  5% ou moins si j'arrive à intégrer une accéllération hardware du style VA-API dans le bouzin  ...)
 
=> sans les appels à sws_scale() et SDL_DisplayYUVOverlay(), j'ai largement 90% du temps CPU libre smile

==>  ces 2 fonctions bouffent donc environ de 15% à 20% du temps CPU
(plus que tout le reste du code quoi ...)

===>  je pense que les remplacer  par OpenGL + un shader YUV420P devrait y faire économiser entre 5% et 10%
(cf. faire le même traitement de mise à l'échelle + affichage à l'écran mais environ 2x plus vite)   

A noter ça monte à plus de 96% de CPU libre quand je commente l'appel à avcodec_decode_video2()
=> il y a donc des chances que le passage via VA-API ou qqchose du style puisse faire descendre la conso CPU du player vers les 5% comme je l'avais initialement prévu smile smile


@+
Yannoo

Dernière modification par yannoo75020 (Le 30/12/2012, à 21:22)

Hors ligne