ffmpeg_qsv: Fix hwaccel transcoding

Set up the encoder with a hardware context which will match the one
the decoder will use when it starts later.

Includes 02c2761973, with additional
hackery to get around a3a0230a98 being
skipped.
This commit is contained in:
Mark Thompson 2016-11-13 15:37:52 +00:00
parent 411ecb0be6
commit 03cef34aa6

View File

@ -20,127 +20,74 @@
#include <stdlib.h> #include <stdlib.h>
#include "libavutil/dict.h" #include "libavutil/dict.h"
#include "libavutil/hwcontext.h"
#include "libavutil/hwcontext_qsv.h"
#include "libavutil/mem.h" #include "libavutil/mem.h"
#include "libavutil/opt.h" #include "libavutil/opt.h"
#include "libavcodec/qsv.h" #include "libavcodec/qsv.h"
#include "ffmpeg.h" #include "ffmpeg.h"
typedef struct QSVContext {
OutputStream *ost;
mfxSession session;
mfxExtOpaqueSurfaceAlloc opaque_alloc;
AVBufferRef *opaque_surfaces_buf;
uint8_t *surface_used;
mfxFrameSurface1 **surface_ptrs;
int nb_surfaces;
mfxExtBuffer *ext_buffers[1];
} QSVContext;
static void buffer_release(void *opaque, uint8_t *data)
{
*(uint8_t*)opaque = 0;
}
static int qsv_get_buffer(AVCodecContext *s, AVFrame *frame, int flags) static int qsv_get_buffer(AVCodecContext *s, AVFrame *frame, int flags)
{ {
InputStream *ist = s->opaque; InputStream *ist = s->opaque;
QSVContext *qsv = ist->hwaccel_ctx;
int i;
for (i = 0; i < qsv->nb_surfaces; i++) { return av_hwframe_get_buffer(ist->hw_frames_ctx, frame, 0);
if (qsv->surface_used[i])
continue;
frame->buf[0] = av_buffer_create((uint8_t*)qsv->surface_ptrs[i], sizeof(*qsv->surface_ptrs[i]),
buffer_release, &qsv->surface_used[i], 0);
if (!frame->buf[0])
return AVERROR(ENOMEM);
frame->data[3] = (uint8_t*)qsv->surface_ptrs[i];
qsv->surface_used[i] = 1;
return 0;
}
return AVERROR(ENOMEM);
}
static int init_opaque_surf(QSVContext *qsv)
{
AVQSVContext *hwctx_enc = qsv->ost->enc_ctx->hwaccel_context;
mfxFrameSurface1 *surfaces;
int i;
qsv->nb_surfaces = hwctx_enc->nb_opaque_surfaces;
qsv->opaque_surfaces_buf = av_buffer_ref(hwctx_enc->opaque_surfaces);
qsv->surface_ptrs = av_mallocz_array(qsv->nb_surfaces, sizeof(*qsv->surface_ptrs));
qsv->surface_used = av_mallocz_array(qsv->nb_surfaces, sizeof(*qsv->surface_used));
if (!qsv->opaque_surfaces_buf || !qsv->surface_ptrs || !qsv->surface_used)
return AVERROR(ENOMEM);
surfaces = (mfxFrameSurface1*)qsv->opaque_surfaces_buf->data;
for (i = 0; i < qsv->nb_surfaces; i++)
qsv->surface_ptrs[i] = surfaces + i;
qsv->opaque_alloc.Out.Surfaces = qsv->surface_ptrs;
qsv->opaque_alloc.Out.NumSurface = qsv->nb_surfaces;
qsv->opaque_alloc.Out.Type = hwctx_enc->opaque_alloc_type;
qsv->opaque_alloc.Header.BufferId = MFX_EXTBUFF_OPAQUE_SURFACE_ALLOCATION;
qsv->opaque_alloc.Header.BufferSz = sizeof(qsv->opaque_alloc);
qsv->ext_buffers[0] = (mfxExtBuffer*)&qsv->opaque_alloc;
return 0;
} }
static void qsv_uninit(AVCodecContext *s) static void qsv_uninit(AVCodecContext *s)
{ {
InputStream *ist = s->opaque; InputStream *ist = s->opaque;
QSVContext *qsv = ist->hwaccel_ctx; av_buffer_unref(&ist->hw_frames_ctx);
}
av_freep(&qsv->ost->enc_ctx->hwaccel_context); static int qsv_device_init(InputStream *ist)
av_freep(&s->hwaccel_context); {
int err;
av_buffer_unref(&qsv->opaque_surfaces_buf); err = av_hwdevice_ctx_create(&hw_device_ctx, AV_HWDEVICE_TYPE_QSV,
av_freep(&qsv->surface_used); ist->hwaccel_device, NULL, 0);
av_freep(&qsv->surface_ptrs); if (err < 0) {
av_log(NULL, AV_LOG_ERROR, "Error creating a QSV device\n");
return err;
}
av_freep(&qsv); return 0;
} }
int qsv_init(AVCodecContext *s) int qsv_init(AVCodecContext *s)
{ {
InputStream *ist = s->opaque; InputStream *ist = s->opaque;
QSVContext *qsv = ist->hwaccel_ctx; AVHWFramesContext *frames_ctx;
AVQSVContext *hwctx_dec; AVQSVFramesContext *frames_hwctx;
int ret; int ret;
if (!qsv) { if (!hw_device_ctx) {
av_log(NULL, AV_LOG_ERROR, "QSV transcoding is not initialized. " ret = qsv_device_init(ist);
"-hwaccel qsv should only be used for one-to-one QSV transcoding " if (ret < 0)
"with no filters.\n"); return ret;
return AVERROR_BUG;
} }
ret = init_opaque_surf(qsv); av_buffer_unref(&ist->hw_frames_ctx);
if (ret < 0) ist->hw_frames_ctx = av_hwframe_ctx_alloc(hw_device_ctx);
return ret; if (!ist->hw_frames_ctx)
hwctx_dec = av_qsv_alloc_context();
if (!hwctx_dec)
return AVERROR(ENOMEM); return AVERROR(ENOMEM);
hwctx_dec->session = qsv->session; frames_ctx = (AVHWFramesContext*)ist->hw_frames_ctx->data;
hwctx_dec->iopattern = MFX_IOPATTERN_OUT_OPAQUE_MEMORY; frames_hwctx = frames_ctx->hwctx;
hwctx_dec->ext_buffers = qsv->ext_buffers;
hwctx_dec->nb_ext_buffers = FF_ARRAY_ELEMS(qsv->ext_buffers);
av_freep(&s->hwaccel_context); frames_ctx->width = FFALIGN(s->coded_width, 32);
s->hwaccel_context = hwctx_dec; frames_ctx->height = FFALIGN(s->coded_height, 32);
frames_ctx->format = AV_PIX_FMT_QSV;
frames_ctx->sw_format = s->sw_pix_fmt;
frames_ctx->initial_pool_size = 64;
frames_hwctx->frame_type = MFX_MEMTYPE_VIDEO_MEMORY_DECODER_TARGET;
ret = av_hwframe_ctx_init(ist->hw_frames_ctx);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Error initializing a QSV frame pool\n");
return ret;
}
ist->hwaccel_get_buffer = qsv_get_buffer; ist->hwaccel_get_buffer = qsv_get_buffer;
ist->hwaccel_uninit = qsv_uninit; ist->hwaccel_uninit = qsv_uninit;
@ -148,53 +95,15 @@ int qsv_init(AVCodecContext *s)
return 0; return 0;
} }
static mfxIMPL choose_implementation(const InputStream *ist)
{
static const struct {
const char *name;
mfxIMPL impl;
} impl_map[] = {
{ "auto", MFX_IMPL_AUTO },
{ "sw", MFX_IMPL_SOFTWARE },
{ "hw", MFX_IMPL_HARDWARE },
{ "auto_any", MFX_IMPL_AUTO_ANY },
{ "hw_any", MFX_IMPL_HARDWARE_ANY },
{ "hw2", MFX_IMPL_HARDWARE2 },
{ "hw3", MFX_IMPL_HARDWARE3 },
{ "hw4", MFX_IMPL_HARDWARE4 },
};
mfxIMPL impl = MFX_IMPL_AUTO_ANY;
int i;
if (ist->hwaccel_device) {
for (i = 0; i < FF_ARRAY_ELEMS(impl_map); i++)
if (!strcmp(ist->hwaccel_device, impl_map[i].name)) {
impl = impl_map[i].impl;
break;
}
if (i == FF_ARRAY_ELEMS(impl_map))
impl = strtol(ist->hwaccel_device, NULL, 0);
}
return impl;
}
int qsv_transcode_init(OutputStream *ost) int qsv_transcode_init(OutputStream *ost)
{ {
InputStream *ist; InputStream *ist;
const enum AVPixelFormat *pix_fmt; const enum AVPixelFormat *pix_fmt;
AVDictionaryEntry *e;
const AVOption *opt;
int flags = 0;
int err, i; int err, i;
AVBufferRef *encode_frames_ref = NULL;
QSVContext *qsv = NULL; AVHWFramesContext *encode_frames;
AVQSVContext *hwctx = NULL; AVQSVFramesContext *qsv_frames;
mfxIMPL impl;
mfxVersion ver = { { 3, 1 } };
/* check if the encoder supports QSV */ /* check if the encoder supports QSV */
if (!ost->enc->pix_fmts) if (!ost->enc->pix_fmts)
@ -225,43 +134,45 @@ int qsv_transcode_init(OutputStream *ost)
av_log(NULL, AV_LOG_VERBOSE, "Setting up QSV transcoding\n"); av_log(NULL, AV_LOG_VERBOSE, "Setting up QSV transcoding\n");
qsv = av_mallocz(sizeof(*qsv)); if (!hw_device_ctx) {
hwctx = av_qsv_alloc_context(); err = qsv_device_init(ist);
if (!qsv || !hwctx) if (err < 0)
goto fail; goto fail;
impl = choose_implementation(ist);
err = MFXInit(impl, &ver, &qsv->session);
if (err != MFX_ERR_NONE) {
av_log(NULL, AV_LOG_ERROR, "Error initializing an MFX session: %d\n", err);
goto fail;
} }
e = av_dict_get(ost->encoder_opts, "flags", NULL, 0); // This creates a dummy hw_frames_ctx for the encoder to be
opt = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0); // suitably initialised. It only contains one real frame, so
if (e && opt) // hopefully doesn't waste too much memory.
av_opt_eval_flags(ost->enc_ctx, opt, e->value, &flags);
qsv->ost = ost; encode_frames_ref = av_hwframe_ctx_alloc(hw_device_ctx);
if (!encode_frames_ref) {
err = AVERROR(ENOMEM);
goto fail;
}
encode_frames = (AVHWFramesContext*)encode_frames_ref->data;
qsv_frames = encode_frames->hwctx;
hwctx->session = qsv->session; encode_frames->width = FFALIGN(ist->resample_width, 32);
hwctx->iopattern = MFX_IOPATTERN_IN_OPAQUE_MEMORY; encode_frames->height = FFALIGN(ist->resample_height, 32);
hwctx->opaque_alloc = 1; encode_frames->format = AV_PIX_FMT_QSV;
hwctx->nb_opaque_surfaces = 16; encode_frames->sw_format = AV_PIX_FMT_NV12;
encode_frames->initial_pool_size = 1;
ost->hwaccel_ctx = qsv; qsv_frames->frame_type = MFX_MEMTYPE_VIDEO_MEMORY_DECODER_TARGET;
ost->enc_ctx->hwaccel_context = hwctx;
ost->enc_ctx->pix_fmt = AV_PIX_FMT_QSV;
ist->hwaccel_ctx = qsv; err = av_hwframe_ctx_init(encode_frames_ref);
ist->dec_ctx->pix_fmt = AV_PIX_FMT_QSV; if (err < 0)
ist->resample_pix_fmt = AV_PIX_FMT_QSV; goto fail;
ist->dec_ctx->pix_fmt = AV_PIX_FMT_QSV;
ist->resample_pix_fmt = AV_PIX_FMT_QSV;
ost->enc_ctx->pix_fmt = AV_PIX_FMT_QSV;
ost->enc_ctx->hw_frames_ctx = encode_frames_ref;
return 0; return 0;
fail: fail:
av_freep(&hwctx); av_buffer_unref(&encode_frames_ref);
av_freep(&qsv); return err;
return AVERROR_UNKNOWN;
} }