上一篇中介绍了XData,XDataThread,XPlay三个类的实现,这一篇中将对XRtmpPlay和QT界面的设计和代码实现进行介绍。

四:XRtmpPlay 的实现:

/*Xplay.cpp*/

classXRtmpPlay :publicXPlay

int in_audioSmapleRate = 44100;

int in_audioChannels = 2;

int in_audioSampleBit = 16;

AVFormatContext         *pFmtContext = NULL;

AVCodecContext          *pVideoCodec = NULL;

AVCodecContext          *pAudioCodec = NULL;

SwsContext               *pswsContext = NULL;

AVFrame                   *pFrame = NULL;

AVFrame                   *pRGBFrame =NULL;

SwrContext                *au_convert_ctx = NULL;

QMutex                      mutex;

uint8_t                      *out_buffer = NULL;

/**url : 直播视频源

width:图像宽度

height:图像高度**/

bool Init(char *url, intwidth, intheight)

int rt = -1;

outUrl= url;

vWidth= width;

vHeight= height;

pFmtContext= avformat_alloc_context();

AVSampleFormat fmt;

AVDictionary *opts = NULL;

rt= avformat_open_input(&pFmtContext, outUrl, NULL, &opts);

if (rt < 0)

char buf[AV_ERROR_MAX_STRING_SIZE] = { 0 };

av_make_error_string(buf,sizeof(buf), rt);

qDebug() << buf;

returnfalse;

rt= avformat_find_stream_info(pFmtContext, NULL);

if (rt < 0)

avformat_free_context(pFmtContext);

char buf[AV_ERROR_MAX_STRING_SIZE] = { 0 };

av_make_error_string(buf,sizeof(buf), rt);

qDebug() << buf;

returnfalse;

///取得视频和音频流索引

for (int i = 0; i <pFmtContext->nb_streams; i++)

if(pFmtContext->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO)

videoIndex= i;

rWidth= pFmtContext->streams[i]->codec->width;

rHeight= pFmtContext->streams[i]->codec->height;

elseif(pFmtContext->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO)

audioIndex= i;

in_audioSmapleRate= pFmtContext->streams[i]->codec->sample_rate;

in_audioChannels= pFmtContext->streams[i]->codec->channels;

fmt=pFmtContext->streams[i]->codec->sample_fmt;

switch (fmt)

caseAV_SAMPLE_FMT_U8:

in_audioSampleBit= 8;

caseAV_SAMPLE_FMT_S16:

in_audioSampleBit= 16;

case  AV_SAMPLE_FMT_S32:

in_audioSampleBit= 32;

//取得视频CodecContext

AVCodec *vCodec =avcodec_find_decoder(pFmtContext->streams[videoIndex]->codecpar->codec_id);

pVideoCodec= avcodec_alloc_context3(vCodec);

rt= avcodec_parameters_to_context(pVideoCodec, pFmtContext->streams[videoIndex]->codecpar);

if (rt < 0)

char buf[AV_ERROR_MAX_STRING_SIZE] = { 0 };

av_make_error_string(buf,sizeof(buf), rt);

qDebug() << buf;

returnfalse;

//取得音频CodecContext

AVCodec *aCodec =avcodec_find_decoder(pFmtContext->streams[audioIndex]->codecpar->codec_id);

pAudioCodec= avcodec_alloc_context3(aCodec);

rt= avcodec_parameters_to_context(pAudioCodec,pFmtContext->streams[audioIndex]->codecpar);

if (rt < 0)

char buf[AV_ERROR_MAX_STRING_SIZE] = { 0 };

av_make_error_string(buf,sizeof(buf), rt);

qDebug() << buf;

returnfalse;

rt= avcodec_open2(pVideoCodec, vCodec, NULL);

if (rt < 0)

char buf[AV_ERROR_MAX_STRING_SIZE] = { 0 };

av_make_error_string(buf,sizeof(buf), rt);

qDebug() << buf;

returnfalse;

rt= avcodec_open2(pAudioCodec, aCodec, NULL);

if (rt < 0)

char buf[AV_ERROR_MAX_STRING_SIZE] = { 0 };

av_make_error_string(buf,sizeof(buf), rt);

qDebug() << buf;

returnfalse;

//这里只分配了结构体的大小,需要用appicture_fill 来讲图像数据跟它联系起来

pRGBFrame= av_frame_alloc();

int bytes_num = avpicture_get_size(AV_PIX_FMT_RGB24, rWidth, rHeight);

//申请空间来存放图片数据。包含源数据和目标数据

uint8_t* buff = (uint8_t*)av_malloc(bytes_num);

avpicture_fill((AVPicture*)pRGBFrame, buff, AV_PIX_FMT_RGB24, rWidth, rHeight);

pswsContext= sws_getContext(rWidth, rHeight, pVideoCodec->pix_fmt, rWidth, rHeight, AV_PIX_FMT_RGB24, SWS_BICUBIC, NULL, NULL, NULL);

if (pswsContext == NULL)

char buf[AV_ERROR_MAX_STRING_SIZE] = { 0 };

av_make_error_string(buf, sizeof(buf), rt);

qDebug() << buf;

returnfalse;

//音频重采样

au_convert_ctx= swr_alloc();

au_convert_ctx= swr_alloc_set_opts(au_convert_ctx, AV_CH_LAYOUT_STEREO, AV_SAMPLE_FMT_S16, 44100,

pAudioCodec->channel_layout,pAudioCodec->sample_fmt, pAudioCodec->sample_rate, 0, NULL);

rt= swr_init(au_convert_ctx);

if (rt != 0)

char buf[AV_ERROR_MAX_STRING_SIZE] = { 0 };

av_make_error_string(buf,sizeof(buf), rt);

qDebug() << buf;

returnfalse;

returntrue;

void run()

qDebug() <<"Start Play RTMP StreamThread";

qDebug() <<"Stop Play RTMP StreamThread";

void Play()

pFrame = av_frame_alloc();

int rt = -1;

longlong starttime =GetCurTime();

uint64_t out_channel_layout = AV_CH_LAYOUT_STEREO;

AVSampleFormat out_sample_fmt = AV_SAMPLE_FMT_S16;

int bit_rate = pAudioCodec->bit_rate;

int out_linesize;

int out_buffer_size =av_samples_get_buffer_size(&out_linesize, pAudioCodec->channels,pAudioCodec->frame_size, pAudioCodec->sample_fmt, 1);

out_buffer= (uint8_t *)av_malloc(out_buffer_size);

uint8_t *outArr[2] = { 0 };

outArr[0]= out_buffer;

while (!isExit)

int i = 0;

AVPacket * packet = av_packet_alloc();

int got_pic = 0;

int got_wave = 0;

rt=  av_read_frame(pFmtContext, packet);

if (rt >= 0)

if (packet->stream_index == videoIndex)

rt= avcodec_send_packet(pVideoCodec, packet);

if (rt < 0) continue;

rt= avcodec_receive_frame(pVideoCodec, pFrame);

if (rt>=0)

sws_scale(pswsContext,(constuint8_t* const *)pFrame->data,pFrame->linesize, 0, rHeight, pRGBFrame->data, pRGBFrame->linesize);

longlong pts =GetCurTime()-starttime;

long tsize =avpicture_get_size(AV_PIX_FMT_RGB24, rWidth, rHeight);

Push(XData((char*)(pRGBFrame->data[0]), tsize,pts));

else  continue;

//开始传递图像数据

XData picData =Pop();

if (picData.size <= 0)

QThread::msleep(1);

//发送渲染图片信号

emit randerPic(picData.data, rWidth, rHeight);

//QThread::msleep(40);

elseif(packet->stream_index == audioIndex)

rt = avcodec_send_packet(pAudioCodec,packet);

if (rt < 0)

for (;;) {

rt = avcodec_receive_frame(pAudioCodec,pFrame);

rt = swr_convert(au_convert_ctx, outArr,out_linesize, (constuint8_t **)pFrame->data, pFrame->nb_samples);

if (rt < 0) continue;

int   outsize = 2 * av_get_bytes_per_sample(AV_SAMPLE_FMT_S16)*pFrame->nb_samples;

Push(XData((char *)out_buffer,outsize, pFrame->pts));

XData waveData =  Pop();

if (waveData.size <= 0)

QThread::msleep(1);

//发送播放音频信号

书安置软件作品伴音等级(了.potplayer mac)

emit playsound(waveData.data, out_linesize);

QThread::msleep(22.3);

av_packet_free(&packet);

XPlay * XPlay::Get(intindex)

staticbool isFirst = true;

if (isFirst)

av_register_all();

int rt = avformat_network_init();

isFirst= false;

static  XRtmpPlay play[255];

return &play[index];

XPlay::~XPlay()

XPlay::XPlay()

五:QT界面部分:

相应的界面样式:

QDialog {background: qlineargradient(spread:pad, x1:0, y1:0, x2:0,y2:1, stop:0 #DBDBDB, stop:1 #);

QPushButton{

border-radius: 4px;

border: none;

width: 75px;

height: 25px;

QPushButton:enabled{

background: rgb(68, 69, 73);

color: white;

QPushButton:!enabled{

background: rgb(100, 100, 100);

color: rgb(200, 200, 200);

QPushButton:enabled:hover{

background: rgb(85, 85, 85);

QPushButton:enabled:pressed{

background: rgb(80, 80, 80);

QT界面的设计不是本教程的重点,所以请读者自己参考相关资料

六:主窗体部分代码:

#include<qaudiooutput.h>

#include<QtWidgets/QDialog>

#include<QtGui>

#include<qaudioformat.h>

#include"ui_SimplePlayer.h"

#include"XPlay.h"

classSimplePlayer : publicQDialog

SimplePlayer(QWidget *parent = Q_NULLPTR);

XPlay *xp  ;

QAudioFormat audioFmt;

QAudioOutput *audioOutPut = NULL;

QIODevice *audioIoDev = NULL;

Ui::SimplePlayerClass ui;

privateslots:

void OnPlay();

void OnStop();

void DrawPic(char *  xdata, intw, inth);

void PlayAudio(char *xdata, intlen);

#include"SimplePlayer.h"

SimplePlayer::SimplePlayer(QWidget *parent)

: QDialog(parent)

ui.setupUi(this);

xp = XPlay::Get(0); //单件模式,取得xp对象

//关联信号处理函数,DrawPic,PlayAudio

connect(xp, SIGNAL(randerPic(char*, int, int)), this, SLOT(DrawPic(char *, int, int)));

connect(xp, SIGNAL(playsound(char *, int)), this, SLOT(PlayAudio(char *, int)));

voidSimplePlayer::OnPlay()

int width = ui.lbRanderVideo->width();

int height = ui.lbRanderVideo->height();

QString addr = "rtmp://live.hkstv.hk.lxdns.com/live/hks";

if (!xp->Init((char *)addr.toStdString().c_str(), width,height))

中国移动被熟悉potplayer设置的条件状态提示:

audioFmt.setSampleRate(44100);

audioFmt.setChannelCount(2);

audioFmt.setSampleSize(16);

audioFmt.setCodec("audio/pcm");

audioFmt.setByteOrder(QAudioFormat::LittleEndian);

audioFmt.setSampleType(QAudioFormat::SignedInt);

QAudioDeviceInfo info = QAudioDeviceInfo::defaultOutputDevice();

if (!info.isFormatSupported(audioFmt)) {

qDebug() <<"default format not supportedtry to use nearest";

audioFmt = info.nearestFormat(audioFmt);

audioOutPut = newQAudioOutput(audioFmt, this);

audioIoDev = audioOutPut->start();

xp->Start();

voidSimplePlayer::OnStop()

xp->Stop();

//在lbRanderVidow控件上绘制图像

voidSimplePlayer::DrawPic(char *xdata, intw, inth)

qDebug() <<"enter draw pictureslot:";

int width = ui.lbRanderVideo->width();

int height = ui.lbRanderVideo->height();

QImage image((uchar *)xdata, w, h, QImage::Format_RGB888);

QPixmap pixmap = QPixmap::fromImage(image.scaled(width, height));

ui.lbRanderVideo->setPixmap(pixmap);

catch (const std::exception&)

qDebug() <<"Draw Picture Error!";

free(xdata);  //记得释放

//播放音频

voidSimplePlayer::PlayAudio(char * xdata, intlen)

if (audioIoDev != NULL)

audioIoDev->write(xdata, len);

free(xdata); //记得释放

使用FFMPEG进行直播流的播放核心是取得解码后的数据,在解码前需要做图片格式的转换,音频的重采样,在解码时将解码的音视频数据进行封装根据队列的特点进行数据的排队播放。