[FFmpeg-cvslog] Merge commit 'a5a6ac1a123a927e5bed984ed757a29b7ff87dab'

James Almer git at videolan.org
Sat Nov 11 20:08:25 EET 2017


ffmpeg | branch: master | James Almer <jamrial at gmail.com> | Sat Nov 11 14:46:24 2017 -0300| [4391d6cb8180f1261e38a588b7c4ffc457531cb2] | committer: James Almer

Merge commit 'a5a6ac1a123a927e5bed984ed757a29b7ff87dab'

* commit 'a5a6ac1a123a927e5bed984ed757a29b7ff87dab':
  libavfilter/overlay_qsv: Add QSV overlay vpp filter
  libavfilter/vf_vpp: Add common filters of the qsv vpp

Merged-by: James Almer <jamrial at gmail.com>

> http://git.videolan.org/gitweb.cgi/ffmpeg.git/?a=commit;h=4391d6cb8180f1261e38a588b7c4ffc457531cb2
---

 Changelog                    |   1 +
 configure                    |   6 +
 libavfilter/Makefile         |   7 +
 libavfilter/allfilters.c     |   2 +
 libavfilter/qsvvpp.c         | 727 +++++++++++++++++++++++++++++++++++++++++++
 libavfilter/qsvvpp.h         |  66 ++++
 libavfilter/vf_overlay_qsv.c | 487 +++++++++++++++++++++++++++++
 libavfilter/vf_vpp_qsv.c     | 401 ++++++++++++++++++++++++
 8 files changed, 1697 insertions(+)

diff --git a/Changelog b/Changelog
index cd4a841619..869ecde162 100644
--- a/Changelog
+++ b/Changelog
@@ -14,6 +14,7 @@ version <next>:
 - native aptX encoder and decoder
 - Raw aptX muxer and demuxer
 - NVIDIA NVDEC-accelerated H.264 and HEVC hwaccel decoding
+- Intel QSV-accelerated overlay filter
 
 
 version 3.4:
diff --git a/configure b/configure
index f9a27b5019..d949fecbef 100755
--- a/configure
+++ b/configure
@@ -2196,6 +2196,7 @@ CONFIG_EXTRA="
     qsv
     qsvdec
     qsvenc
+    qsvvpp
     rangecoder
     riffdec
     riffenc
@@ -2783,6 +2784,7 @@ omx_rpi_select="omx"
 qsv_deps="libmfx"
 qsvdec_select="qsv"
 qsvenc_select="qsv"
+qsvvpp_select="qsv"
 vaapi_encode_deps="vaapi"
 v4l2_m2m_deps_any="linux_videodev2_h"
 
@@ -3228,6 +3230,8 @@ negate_filter_deps="lut_filter"
 nnedi_filter_deps="gpl"
 ocr_filter_deps="libtesseract"
 ocv_filter_deps="libopencv"
+overlay_qsv_filter_deps="libmfx"
+overlay_qsv_filter_select="qsvvpp"
 owdenoise_filter_deps="gpl"
 pan_filter_deps="swresample"
 perspective_filter_deps="gpl"
@@ -3279,6 +3283,8 @@ zmq_filter_deps="libzmq"
 zoompan_filter_deps="swscale"
 zscale_filter_deps="libzimg const_nan"
 scale_vaapi_filter_deps="vaapi VAProcPipelineParameterBuffer"
+vpp_qsv_filter_deps="libmfx"
+vpp_qsv_filter_select="qsvvpp"
 
 # examples
 avio_dir_cmd_deps="avformat avutil"
diff --git a/libavfilter/Makefile b/libavfilter/Makefile
index 9873532d07..b7ddcd226d 100644
--- a/libavfilter/Makefile
+++ b/libavfilter/Makefile
@@ -25,6 +25,9 @@ OBJS = allfilters.o                                                     \
 
 OBJS-$(HAVE_THREADS)                         += pthread.o
 
+# subsystems
+OBJS-$(CONFIG_QSVVPP)                        += qsvvpp.o
+
 # audio filters
 OBJS-$(CONFIG_ABENCH_FILTER)                 += f_bench.o
 OBJS-$(CONFIG_ACOMPRESSOR_FILTER)            += af_sidechaincompress.o
@@ -248,6 +251,7 @@ OBJS-$(CONFIG_OCV_FILTER)                    += vf_libopencv.o
 OBJS-$(CONFIG_OPENCL)                        += deshake_opencl.o unsharp_opencl.o
 OBJS-$(CONFIG_OSCILLOSCOPE_FILTER)           += vf_datascope.o
 OBJS-$(CONFIG_OVERLAY_FILTER)                += vf_overlay.o framesync.o
+OBJS-$(CONFIG_OVERLAY_QSV_FILTER)            += vf_overlay_qsv.o
 OBJS-$(CONFIG_OWDENOISE_FILTER)              += vf_owdenoise.o
 OBJS-$(CONFIG_PAD_FILTER)                    += vf_pad.o
 OBJS-$(CONFIG_PALETTEGEN_FILTER)             += vf_palettegen.o
@@ -331,6 +335,7 @@ OBJS-$(CONFIG_VIDSTABDETECT_FILTER)          += vidstabutils.o vf_vidstabdetect.
 OBJS-$(CONFIG_VIDSTABTRANSFORM_FILTER)       += vidstabutils.o vf_vidstabtransform.o
 OBJS-$(CONFIG_VIGNETTE_FILTER)               += vf_vignette.o
 OBJS-$(CONFIG_VMAFMOTION_FILTER)             += vf_vmafmotion.o framesync.o
+OBJS-$(CONFIG_VPP_QSV_FILTER)                += vf_vpp_qsv.o
 OBJS-$(CONFIG_VSTACK_FILTER)                 += vf_stack.o framesync.o
 OBJS-$(CONFIG_W3FDIF_FILTER)                 += vf_w3fdif.o
 OBJS-$(CONFIG_WAVEFORM_FILTER)               += vf_waveform.o
@@ -389,6 +394,8 @@ SKIPHEADERS-$(CONFIG_OPENCL)                 += opencl_internal.h deshake_opencl
 
 OBJS-$(CONFIG_SHARED)                        += log2_tab.o
 
+SKIPHEADERS-$(CONFIG_QSVVPP)                 += qsvvpp.h
+
 TOOLS     = graph2dot
 TESTPROGS = drawutils filtfmts formats integral
 
diff --git a/libavfilter/allfilters.c b/libavfilter/allfilters.c
index 9b672a7a7e..3647a111ec 100644
--- a/libavfilter/allfilters.c
+++ b/libavfilter/allfilters.c
@@ -260,6 +260,7 @@ static void register_all(void)
     REGISTER_FILTER(OCV,            ocv,            vf);
     REGISTER_FILTER(OSCILLOSCOPE,   oscilloscope,   vf);
     REGISTER_FILTER(OVERLAY,        overlay,        vf);
+    REGISTER_FILTER(OVERLAY_QSV,    overlay_qsv,    vf);
     REGISTER_FILTER(OWDENOISE,      owdenoise,      vf);
     REGISTER_FILTER(PAD,            pad,            vf);
     REGISTER_FILTER(PALETTEGEN,     palettegen,     vf);
@@ -343,6 +344,7 @@ static void register_all(void)
     REGISTER_FILTER(VIDSTABTRANSFORM, vidstabtransform, vf);
     REGISTER_FILTER(VIGNETTE,       vignette,       vf);
     REGISTER_FILTER(VMAFMOTION,     vmafmotion,     vf);
+    REGISTER_FILTER(VPP_QSV,        vpp_qsv,        vf);
     REGISTER_FILTER(VSTACK,         vstack,         vf);
     REGISTER_FILTER(W3FDIF,         w3fdif,         vf);
     REGISTER_FILTER(WAVEFORM,       waveform,       vf);
diff --git a/libavfilter/qsvvpp.c b/libavfilter/qsvvpp.c
new file mode 100644
index 0000000000..37805f2323
--- /dev/null
+++ b/libavfilter/qsvvpp.c
@@ -0,0 +1,727 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Intel Quick Sync Video VPP base function
+ */
+
+#include "libavutil/common.h"
+#include "libavutil/mathematics.h"
+#include "libavutil/hwcontext.h"
+#include "libavutil/hwcontext_qsv.h"
+#include "libavutil/time.h"
+#include "libavutil/pixdesc.h"
+
+#include "internal.h"
+#include "qsvvpp.h"
+#include "video.h"
+
+#define IS_VIDEO_MEMORY(mode)  (mode & (MFX_MEMTYPE_VIDEO_MEMORY_DECODER_TARGET | \
+                                        MFX_MEMTYPE_VIDEO_MEMORY_PROCESSOR_TARGET))
+#define IS_OPAQUE_MEMORY(mode) (mode & MFX_MEMTYPE_OPAQUE_FRAME)
+#define IS_SYSTEM_MEMORY(mode) (mode & MFX_MEMTYPE_SYSTEM_MEMORY)
+
+typedef struct QSVFrame {
+    AVFrame          *frame;
+    mfxFrameSurface1 *surface;
+    mfxFrameSurface1  surface_internal;  /* for system memory */
+    struct QSVFrame  *next;
+} QSVFrame;
+
+/* abstract struct for all QSV filters */
+struct QSVVPPContext {
+    mfxSession          session;
+    int (*filter_frame) (AVFilterLink *outlink, AVFrame *frame);/* callback */
+    enum AVPixelFormat  out_sw_format;   /* Real output format */
+    mfxVideoParam       vpp_param;
+    mfxFrameInfo       *frame_infos;     /* frame info for each input */
+
+    /* members related to the input/output surface */
+    int                 in_mem_mode;
+    int                 out_mem_mode;
+    QSVFrame           *in_frame_list;
+    QSVFrame           *out_frame_list;
+    int                 nb_surface_ptrs_in;
+    int                 nb_surface_ptrs_out;
+    mfxFrameSurface1  **surface_ptrs_in;
+    mfxFrameSurface1  **surface_ptrs_out;
+
+    /* MFXVPP extern parameters */
+    mfxExtOpaqueSurfaceAlloc opaque_alloc;
+    mfxExtBuffer      **ext_buffers;
+    int                 nb_ext_buffers;
+};
+
+static const mfxHandleType handle_types[] = {
+    MFX_HANDLE_VA_DISPLAY,
+    MFX_HANDLE_D3D9_DEVICE_MANAGER,
+    MFX_HANDLE_D3D11_DEVICE,
+};
+
+static const AVRational default_tb = { 1, 90000 };
+
+/* functions for frameAlloc */
+static mfxStatus frame_alloc(mfxHDL pthis, mfxFrameAllocRequest *req,
+                             mfxFrameAllocResponse *resp)
+{
+    QSVVPPContext *s = pthis;
+    int i;
+
+    if (!(req->Type & MFX_MEMTYPE_VIDEO_MEMORY_PROCESSOR_TARGET) ||
+        !(req->Type & (MFX_MEMTYPE_FROM_VPPIN | MFX_MEMTYPE_FROM_VPPOUT)) ||
+        !(req->Type & MFX_MEMTYPE_EXTERNAL_FRAME))
+        return MFX_ERR_UNSUPPORTED;
+
+    if (req->Type & MFX_MEMTYPE_FROM_VPPIN) {
+        resp->mids = av_mallocz(s->nb_surface_ptrs_in * sizeof(*resp->mids));
+        if (!resp->mids)
+            return AVERROR(ENOMEM);
+
+        for (i = 0; i < s->nb_surface_ptrs_in; i++)
+            resp->mids[i] = s->surface_ptrs_in[i]->Data.MemId;
+
+        resp->NumFrameActual = s->nb_surface_ptrs_in;
+    } else {
+        resp->mids = av_mallocz(s->nb_surface_ptrs_out * sizeof(*resp->mids));
+        if (!resp->mids)
+            return AVERROR(ENOMEM);
+
+        for (i = 0; i < s->nb_surface_ptrs_out; i++)
+            resp->mids[i] = s->surface_ptrs_out[i]->Data.MemId;
+
+        resp->NumFrameActual = s->nb_surface_ptrs_out;
+    }
+
+    return MFX_ERR_NONE;
+}
+
+static mfxStatus frame_free(mfxHDL pthis, mfxFrameAllocResponse *resp)
+{
+    av_freep(&resp->mids);
+    return MFX_ERR_NONE;
+}
+
+static mfxStatus frame_lock(mfxHDL pthis, mfxMemId mid, mfxFrameData *ptr)
+{
+    return MFX_ERR_UNSUPPORTED;
+}
+
+static mfxStatus frame_unlock(mfxHDL pthis, mfxMemId mid, mfxFrameData *ptr)
+{
+    return MFX_ERR_UNSUPPORTED;
+}
+
+static mfxStatus frame_get_hdl(mfxHDL pthis, mfxMemId mid, mfxHDL *hdl)
+{
+    *hdl = mid;
+    return MFX_ERR_NONE;
+}
+
+static int pix_fmt_to_mfx_fourcc(int format)
+{
+    switch (format) {
+    case AV_PIX_FMT_YUV420P:
+        return MFX_FOURCC_YV12;
+    case AV_PIX_FMT_NV12:
+        return MFX_FOURCC_NV12;
+    case AV_PIX_FMT_YUYV422:
+        return MFX_FOURCC_YUY2;
+    case AV_PIX_FMT_RGB32:
+        return MFX_FOURCC_RGB4;
+    }
+
+    return MFX_FOURCC_NV12;
+}
+
+static int map_frame_to_surface(AVFrame *frame, mfxFrameSurface1 *surface)
+{
+    switch (frame->format) {
+    case AV_PIX_FMT_NV12:
+        surface->Data.Y  = frame->data[0];
+        surface->Data.UV = frame->data[1];
+        break;
+    case AV_PIX_FMT_YUV420P:
+        surface->Data.Y = frame->data[0];
+        surface->Data.U = frame->data[1];
+        surface->Data.V = frame->data[2];
+        break;
+    case AV_PIX_FMT_YUYV422:
+        surface->Data.Y = frame->data[0];
+        surface->Data.U = frame->data[0] + 1;
+        surface->Data.V = frame->data[0] + 3;
+        break;
+    case AV_PIX_FMT_RGB32:
+        surface->Data.B = frame->data[0];
+        surface->Data.G = frame->data[0] + 1;
+        surface->Data.R = frame->data[0] + 2;
+        surface->Data.A = frame->data[0] + 3;
+        break;
+    default:
+        return MFX_ERR_UNSUPPORTED;
+    }
+    surface->Data.Pitch = frame->linesize[0];
+
+    return 0;
+}
+
+/* fill the surface info */
+static int fill_frameinfo_by_link(mfxFrameInfo *frameinfo, AVFilterLink *link)
+{
+    enum AVPixelFormat        pix_fmt;
+    AVHWFramesContext        *frames_ctx;
+    AVQSVFramesContext       *frames_hwctx;
+    const AVPixFmtDescriptor *desc;
+
+    if (link->format == AV_PIX_FMT_QSV) {
+        if (!link->hw_frames_ctx)
+            return AVERROR(EINVAL);
+
+        frames_ctx   = (AVHWFramesContext *)link->hw_frames_ctx->data;
+        frames_hwctx = frames_ctx->hwctx;
+        *frameinfo   = frames_hwctx->surfaces[0].Info;
+    } else {
+        pix_fmt = link->format;
+        desc = av_pix_fmt_desc_get(pix_fmt);
+        if (!desc)
+            return AVERROR_BUG;
+
+        frameinfo->CropX          = 0;
+        frameinfo->CropY          = 0;
+        frameinfo->Width          = FFALIGN(link->w, 32);
+        frameinfo->Height         = FFALIGN(link->h, 32);
+        frameinfo->PicStruct      = MFX_PICSTRUCT_PROGRESSIVE;
+        frameinfo->FourCC         = pix_fmt_to_mfx_fourcc(pix_fmt);
+        frameinfo->BitDepthLuma   = desc->comp[0].depth;
+        frameinfo->BitDepthChroma = desc->comp[0].depth;
+        frameinfo->Shift          = desc->comp[0].depth > 8;
+        if (desc->log2_chroma_w && desc->log2_chroma_h)
+            frameinfo->ChromaFormat = MFX_CHROMAFORMAT_YUV420;
+        else if (desc->log2_chroma_w)
+            frameinfo->ChromaFormat = MFX_CHROMAFORMAT_YUV422;
+        else
+            frameinfo->ChromaFormat = MFX_CHROMAFORMAT_YUV444;
+    }
+
+    frameinfo->CropW          = link->w;
+    frameinfo->CropH          = link->h;
+    frameinfo->FrameRateExtN  = link->frame_rate.num;
+    frameinfo->FrameRateExtD  = link->frame_rate.den;
+    frameinfo->AspectRatioW   = link->sample_aspect_ratio.num ? link->sample_aspect_ratio.num : 1;
+    frameinfo->AspectRatioH   = link->sample_aspect_ratio.den ? link->sample_aspect_ratio.den : 1;
+
+    return 0;
+}
+
+static void clear_unused_frames(QSVFrame *list)
+{
+    while (list) {
+        if (list->surface && !list->surface->Data.Locked) {
+            list->surface = NULL;
+            av_frame_free(&list->frame);
+        }
+        list = list->next;
+    }
+}
+
+static void clear_frame_list(QSVFrame **list)
+{
+    while (*list) {
+        QSVFrame *frame;
+
+        frame = *list;
+        *list = (*list)->next;
+        av_frame_free(&frame->frame);
+        av_freep(&frame);
+    }
+}
+
+static QSVFrame *get_free_frame(QSVFrame **list)
+{
+    QSVFrame *out = *list;
+
+    for (; out; out = out->next) {
+        if (!out->surface)
+            break;
+    }
+
+    if (!out) {
+        out = av_mallocz(sizeof(*out));
+        if (!out) {
+            av_log(NULL, AV_LOG_ERROR, "Can't alloc new output frame.\n");
+            return NULL;
+        }
+        out->next  = *list;
+        *list      = out;
+    }
+
+    return out;
+}
+
+/* get the input surface */
+static QSVFrame *submit_frame(QSVVPPContext *s, AVFilterLink *inlink, AVFrame *picref)
+{
+    QSVFrame        *qsv_frame;
+    AVFilterContext *ctx = inlink->dst;
+
+    clear_unused_frames(s->in_frame_list);
+
+    qsv_frame = get_free_frame(&s->in_frame_list);
+    if (!qsv_frame)
+        return NULL;
+
+    /* Turn AVFrame into mfxFrameSurface1.
+     * For video/opaque memory mode, pix_fmt is AV_PIX_FMT_QSV, and
+     * mfxFrameSurface1 is stored in AVFrame->data[3];
+     * for system memory mode, raw video data is stored in
+     * AVFrame, we should map it into mfxFrameSurface1.
+     */
+    if (!IS_SYSTEM_MEMORY(s->in_mem_mode)) {
+        if (picref->format != AV_PIX_FMT_QSV) {
+            av_log(ctx, AV_LOG_ERROR, "QSVVPP gets a wrong frame.\n");
+            return NULL;
+        }
+        qsv_frame->frame   = picref;
+        qsv_frame->surface = (mfxFrameSurface1 *)qsv_frame->frame->data[3];
+    } else {
+        /* make a copy if the input is not padded as libmfx requires */
+        if (picref->height & 31 || picref->linesize[0] & 31) {
+            qsv_frame->frame = ff_get_video_buffer(inlink,
+                                                   FFALIGN(inlink->w, 32),
+                                                   FFALIGN(inlink->h, 32));
+            if (!qsv_frame->frame)
+                return NULL;
+
+            qsv_frame->frame->width   = picref->width;
+            qsv_frame->frame->height  = picref->height;
+
+            if (av_frame_copy(qsv_frame->frame, picref) < 0) {
+                av_frame_free(&qsv_frame->frame);
+                return NULL;
+            }
+
+            av_frame_copy_props(qsv_frame->frame, picref);
+            av_frame_free(&picref);
+        } else
+            qsv_frame->frame = picref;
+
+        if (map_frame_to_surface(qsv_frame->frame,
+                                &qsv_frame->surface_internal) < 0) {
+            av_log(ctx, AV_LOG_ERROR, "Unsupported frame.\n");
+            return NULL;
+        }
+        qsv_frame->surface = &qsv_frame->surface_internal;
+    }
+
+    qsv_frame->surface->Info           = s->frame_infos[FF_INLINK_IDX(inlink)];
+    qsv_frame->surface->Data.TimeStamp = av_rescale_q(qsv_frame->frame->pts,
+                                                      inlink->time_base, default_tb);
+
+    qsv_frame->surface->Info.PicStruct =
+            !qsv_frame->frame->interlaced_frame ? MFX_PICSTRUCT_PROGRESSIVE :
+            (qsv_frame->frame->top_field_first ? MFX_PICSTRUCT_FIELD_TFF :
+                                                 MFX_PICSTRUCT_FIELD_BFF);
+    if (qsv_frame->frame->repeat_pict == 1)
+        qsv_frame->surface->Info.PicStruct |= MFX_PICSTRUCT_FIELD_REPEATED;
+    else if (qsv_frame->frame->repeat_pict == 2)
+        qsv_frame->surface->Info.PicStruct |= MFX_PICSTRUCT_FRAME_DOUBLING;
+    else if (qsv_frame->frame->repeat_pict == 4)
+        qsv_frame->surface->Info.PicStruct |= MFX_PICSTRUCT_FRAME_TRIPLING;
+
+    return qsv_frame;
+}
+
+/* get the output surface */
+static QSVFrame *query_frame(QSVVPPContext *s, AVFilterLink *outlink)
+{
+    AVFilterContext *ctx = outlink->src;
+    QSVFrame        *out_frame;
+    int              ret;
+
+    clear_unused_frames(s->out_frame_list);
+
+    out_frame = get_free_frame(&s->out_frame_list);
+    if (!out_frame)
+        return NULL;
+
+    /* For video memory, get a hw frame;
+     * For system memory, get a sw frame and map it into a mfx_surface. */
+    if (!IS_SYSTEM_MEMORY(s->out_mem_mode)) {
+        out_frame->frame = av_frame_alloc();
+        if (!out_frame->frame)
+            return NULL;
+
+        ret = av_hwframe_get_buffer(outlink->hw_frames_ctx, out_frame->frame, 0);
+        if (ret < 0) {
+            av_log(ctx, AV_LOG_ERROR, "Can't allocate a surface.\n");
+            return NULL;
+        }
+
+        out_frame->surface = (mfxFrameSurface1 *)out_frame->frame->data[3];
+    } else {
+        /* Get a frame with aligned dimensions.
+         * Libmfx need system memory being 128x64 aligned */
+        out_frame->frame = ff_get_video_buffer(outlink,
+                                               FFALIGN(outlink->w, 128),
+                                               FFALIGN(outlink->h, 64));
+        if (!out_frame->frame)
+            return NULL;
+
+        out_frame->frame->width  = outlink->w;
+        out_frame->frame->height = outlink->h;
+
+        ret = map_frame_to_surface(out_frame->frame,
+                                  &out_frame->surface_internal);
+        if (ret < 0)
+            return NULL;
+
+        out_frame->surface = &out_frame->surface_internal;
+    }
+
+    out_frame->surface->Info = s->vpp_param.vpp.Out;
+
+    return out_frame;
+}
+
+/* create the QSV session */
+static int init_vpp_session(AVFilterContext *avctx, QSVVPPContext *s)
+{
+    AVFilterLink                 *inlink = avctx->inputs[0];
+    AVFilterLink                *outlink = avctx->outputs[0];
+    AVQSVFramesContext  *in_frames_hwctx = NULL;
+    AVQSVFramesContext *out_frames_hwctx = NULL;
+
+    AVBufferRef *device_ref;
+    AVHWDeviceContext *device_ctx;
+    AVQSVDeviceContext *device_hwctx;
+    mfxHDL handle;
+    mfxHandleType handle_type;
+    mfxVersion ver;
+    mfxIMPL impl;
+    int ret, i;
+
+    if (inlink->hw_frames_ctx) {
+        AVHWFramesContext *frames_ctx = (AVHWFramesContext *)inlink->hw_frames_ctx->data;
+
+        device_ref      = frames_ctx->device_ref;
+        in_frames_hwctx = frames_ctx->hwctx;
+
+        s->in_mem_mode = in_frames_hwctx->frame_type;
+
+        s->surface_ptrs_in = av_mallocz_array(in_frames_hwctx->nb_surfaces,
+                                              sizeof(*s->surface_ptrs_in));
+        if (!s->surface_ptrs_in)
+            return AVERROR(ENOMEM);
+
+        for (i = 0; i < in_frames_hwctx->nb_surfaces; i++)
+            s->surface_ptrs_in[i] = in_frames_hwctx->surfaces + i;
+
+        s->nb_surface_ptrs_in = in_frames_hwctx->nb_surfaces;
+    } else if (avctx->hw_device_ctx) {
+        device_ref     = avctx->hw_device_ctx;
+        s->in_mem_mode = MFX_MEMTYPE_SYSTEM_MEMORY;
+    } else {
+        av_log(avctx, AV_LOG_ERROR, "No hw context provided.\n");
+        return AVERROR(EINVAL);
+    }
+
+    device_ctx   = (AVHWDeviceContext *)device_ref->data;
+    device_hwctx = device_ctx->hwctx;
+
+    if (outlink->format == AV_PIX_FMT_QSV) {
+        AVHWFramesContext *out_frames_ctx;
+        AVBufferRef *out_frames_ref = av_hwframe_ctx_alloc(device_ref);
+        if (!out_frames_ref)
+            return AVERROR(ENOMEM);
+
+        s->out_mem_mode = IS_OPAQUE_MEMORY(s->in_mem_mode) ?
+                          MFX_MEMTYPE_OPAQUE_FRAME :
+                          MFX_MEMTYPE_VIDEO_MEMORY_DECODER_TARGET;
+
+        out_frames_ctx   = (AVHWFramesContext *)out_frames_ref->data;
+        out_frames_hwctx = out_frames_ctx->hwctx;
+
+        out_frames_ctx->format            = AV_PIX_FMT_QSV;
+        out_frames_ctx->width             = FFALIGN(outlink->w, 32);
+        out_frames_ctx->height            = FFALIGN(outlink->h, 32);
+        out_frames_ctx->sw_format         = s->out_sw_format;
+        out_frames_ctx->initial_pool_size = 64;
+        out_frames_hwctx->frame_type      = s->out_mem_mode;
+
+        ret = av_hwframe_ctx_init(out_frames_ref);
+        if (ret < 0) {
+            av_buffer_unref(&out_frames_ref);
+            av_log(avctx, AV_LOG_ERROR, "Error creating frames_ctx for output pad.\n");
+            return ret;
+        }
+
+        s->surface_ptrs_out = av_mallocz_array(out_frames_hwctx->nb_surfaces,
+                                               sizeof(*s->surface_ptrs_out));
+        if (!s->surface_ptrs_out) {
+            av_buffer_unref(&out_frames_ref);
+            return AVERROR(ENOMEM);
+        }
+
+        for (i = 0; i < out_frames_hwctx->nb_surfaces; i++)
+            s->surface_ptrs_out[i] = out_frames_hwctx->surfaces + i;
+        s->nb_surface_ptrs_out = out_frames_hwctx->nb_surfaces;
+
+        av_buffer_unref(&outlink->hw_frames_ctx);
+        outlink->hw_frames_ctx = out_frames_ref;
+    } else
+        s->out_mem_mode = MFX_MEMTYPE_SYSTEM_MEMORY;
+
+    /* extract the properties of the "master" session given to us */
+    ret = MFXQueryIMPL(device_hwctx->session, &impl);
+    if (ret == MFX_ERR_NONE)
+        ret = MFXQueryVersion(device_hwctx->session, &ver);
+    if (ret != MFX_ERR_NONE) {
+        av_log(avctx, AV_LOG_ERROR, "Error querying the session attributes\n");
+        return AVERROR_UNKNOWN;
+    }
+
+    for (i = 0; i < FF_ARRAY_ELEMS(handle_types); i++) {
+        ret = MFXVideoCORE_GetHandle(device_hwctx->session, handle_types[i], &handle);
+        if (ret == MFX_ERR_NONE) {
+            handle_type = handle_types[i];
+            break;
+        }
+    }
+
+    /* create a "slave" session with those same properties, to be used for vpp */
+    ret = MFXInit(impl, &ver, &s->session);
+    if (ret != MFX_ERR_NONE) {
+        av_log(avctx, AV_LOG_ERROR, "Error initializing a session for scaling\n");
+        return AVERROR_UNKNOWN;
+    }
+
+    if (handle) {
+        ret = MFXVideoCORE_SetHandle(s->session, handle_type, handle);
+        if (ret != MFX_ERR_NONE)
+            return AVERROR_UNKNOWN;
+    }
+
+    if (IS_OPAQUE_MEMORY(s->in_mem_mode) || IS_OPAQUE_MEMORY(s->out_mem_mode)) {
+        s->opaque_alloc.In.Surfaces   = s->surface_ptrs_in;
+        s->opaque_alloc.In.NumSurface = s->nb_surface_ptrs_in;
+        s->opaque_alloc.In.Type       = s->in_mem_mode;
+
+        s->opaque_alloc.Out.Surfaces   = s->surface_ptrs_out;
+        s->opaque_alloc.Out.NumSurface = s->nb_surface_ptrs_out;
+        s->opaque_alloc.Out.Type       = s->out_mem_mode;
+
+        s->opaque_alloc.Header.BufferId = MFX_EXTBUFF_OPAQUE_SURFACE_ALLOCATION;
+        s->opaque_alloc.Header.BufferSz = sizeof(s->opaque_alloc);
+    } else if (IS_VIDEO_MEMORY(s->in_mem_mode) || IS_VIDEO_MEMORY(s->out_mem_mode)) {
+        mfxFrameAllocator frame_allocator = {
+            .pthis  = s,
+            .Alloc  = frame_alloc,
+            .Lock   = frame_lock,
+            .Unlock = frame_unlock,
+            .GetHDL = frame_get_hdl,
+            .Free   = frame_free,
+        };
+
+        ret = MFXVideoCORE_SetFrameAllocator(s->session, &frame_allocator);
+        if (ret != MFX_ERR_NONE)
+            return AVERROR_UNKNOWN;
+    }
+
+    return 0;
+}
+
+int ff_qsvvpp_create(AVFilterContext *avctx, QSVVPPContext **vpp, QSVVPPParam *param)
+{
+    int i;
+    int ret;
+    QSVVPPContext *s;
+
+    s = av_mallocz(sizeof(*s));
+    if (!s)
+        return AVERROR(ENOMEM);
+
+    s->filter_frame  = param->filter_frame;
+    if (!s->filter_frame)
+        s->filter_frame = ff_filter_frame;
+    s->out_sw_format = param->out_sw_format;
+
+    /* create the vpp session */
+    ret = init_vpp_session(avctx, s);
+    if (ret < 0)
+        goto failed;
+
+    s->frame_infos = av_mallocz_array(avctx->nb_inputs, sizeof(*s->frame_infos));
+    if (!s->frame_infos) {
+        ret = AVERROR(ENOMEM);
+        goto failed;
+    }
+
+    /* Init each input's information */
+    for (i = 0; i < avctx->nb_inputs; i++) {
+        ret = fill_frameinfo_by_link(&s->frame_infos[i], avctx->inputs[i]);
+        if (ret < 0)
+            goto failed;
+    }
+
+    /* Update input's frame info according to crop */
+    for (i = 0; i < param->num_crop; i++) {
+        QSVVPPCrop *crop = param->crop + i;
+        if (crop->in_idx > avctx->nb_inputs) {
+            ret = AVERROR(EINVAL);
+            goto failed;
+        }
+        s->frame_infos[crop->in_idx].CropX = crop->x;
+        s->frame_infos[crop->in_idx].CropY = crop->y;
+        s->frame_infos[crop->in_idx].CropW = crop->w;
+        s->frame_infos[crop->in_idx].CropH = crop->h;
+    }
+
+    s->vpp_param.vpp.In = s->frame_infos[0];
+
+    ret = fill_frameinfo_by_link(&s->vpp_param.vpp.Out, avctx->outputs[0]);
+    if (ret < 0) {
+        av_log(avctx, AV_LOG_ERROR, "Fail to get frame info from link.\n");
+        goto failed;
+    }
+
+    if (IS_OPAQUE_MEMORY(s->in_mem_mode) || IS_OPAQUE_MEMORY(s->out_mem_mode)) {
+        s->nb_ext_buffers = param->num_ext_buf + 1;
+        s->ext_buffers = av_mallocz_array(s->nb_ext_buffers, sizeof(*s->ext_buffers));
+        if (!s->ext_buffers) {
+            ret = AVERROR(ENOMEM);
+            goto failed;
+        }
+
+        s->ext_buffers[0] = (mfxExtBuffer *)&s->opaque_alloc;
+        for (i = 1; i < param->num_ext_buf; i++)
+            s->ext_buffers[i]    = param->ext_buf[i - 1];
+        s->vpp_param.ExtParam    = s->ext_buffers;
+        s->vpp_param.NumExtParam = s->nb_ext_buffers;
+    } else {
+        s->vpp_param.NumExtParam = param->num_ext_buf;
+        s->vpp_param.ExtParam    = param->ext_buf;
+    }
+
+    s->vpp_param.AsyncDepth = 1;
+
+    if (IS_SYSTEM_MEMORY(s->in_mem_mode))
+        s->vpp_param.IOPattern |= MFX_IOPATTERN_IN_SYSTEM_MEMORY;
+    else if (IS_VIDEO_MEMORY(s->in_mem_mode))
+        s->vpp_param.IOPattern |= MFX_IOPATTERN_IN_VIDEO_MEMORY;
+    else if (IS_OPAQUE_MEMORY(s->in_mem_mode))
+        s->vpp_param.IOPattern |= MFX_IOPATTERN_IN_OPAQUE_MEMORY;
+
+    if (IS_SYSTEM_MEMORY(s->out_mem_mode))
+        s->vpp_param.IOPattern |= MFX_IOPATTERN_OUT_SYSTEM_MEMORY;
+    else if (IS_VIDEO_MEMORY(s->out_mem_mode))
+        s->vpp_param.IOPattern |= MFX_IOPATTERN_OUT_VIDEO_MEMORY;
+    else if (IS_OPAQUE_MEMORY(s->out_mem_mode))
+        s->vpp_param.IOPattern |= MFX_IOPATTERN_OUT_OPAQUE_MEMORY;
+
+    ret = MFXVideoVPP_Init(s->session, &s->vpp_param);
+    if (ret < 0) {
+        av_log(avctx, AV_LOG_ERROR, "Failed to create a qsvvpp, ret = %d.\n", ret);
+        goto failed;
+    }
+
+    *vpp = s;
+    return 0;
+
+failed:
+    ff_qsvvpp_free(&s);
+
+    return ret;
+}
+
+int ff_qsvvpp_free(QSVVPPContext **vpp)
+{
+    QSVVPPContext *s = *vpp;
+
+    if (!s)
+        return 0;
+
+    if (s->session) {
+        MFXVideoVPP_Close(s->session);
+        MFXClose(s->session);
+    }
+
+    /* release all the resources */
+    clear_frame_list(&s->in_frame_list);
+    clear_frame_list(&s->out_frame_list);
+    av_freep(&s->surface_ptrs_in);
+    av_freep(&s->surface_ptrs_out);
+    av_freep(&s->ext_buffers);
+    av_freep(&s->frame_infos);
+    av_freep(vpp);
+
+    return 0;
+}
+
+int ff_qsvvpp_filter_frame(QSVVPPContext *s, AVFilterLink *inlink, AVFrame *picref)
+{
+    AVFilterContext  *ctx     = inlink->dst;
+    AVFilterLink     *outlink = ctx->outputs[0];
+    mfxSyncPoint      sync;
+    QSVFrame         *in_frame, *out_frame;
+    int               ret, filter_ret;
+
+    in_frame = submit_frame(s, inlink, picref);
+    if (!in_frame) {
+        av_log(ctx, AV_LOG_ERROR, "Failed to submit frame on input[%d]\n",
+               FF_INLINK_IDX(inlink));
+        return AVERROR(ENOMEM);
+    }
+
+    do {
+        out_frame = query_frame(s, outlink);
+        if (!out_frame) {
+            av_log(ctx, AV_LOG_ERROR, "Failed to query an output frame.\n");
+            return AVERROR(ENOMEM);
+        }
+
+        do {
+            ret = MFXVideoVPP_RunFrameVPPAsync(s->session, in_frame->surface,
+                                               out_frame->surface, NULL, &sync);
+            if (ret == MFX_WRN_DEVICE_BUSY)
+                av_usleep(500);
+        } while (ret == MFX_WRN_DEVICE_BUSY);
+
+        if (ret < 0 && ret != MFX_ERR_MORE_SURFACE) {
+            /* Ignore more_data error */
+            if (ret == MFX_ERR_MORE_DATA)
+                ret = AVERROR(EAGAIN);
+            break;
+        }
+
+        if (MFXVideoCORE_SyncOperation(s->session, sync, 1000) < 0)
+            av_log(ctx, AV_LOG_WARNING, "Sync failed.\n");
+
+        out_frame->frame->pts = av_rescale_q(out_frame->surface->Data.TimeStamp,
+                                             default_tb, outlink->time_base);
+
+        filter_ret = s->filter_frame(outlink, out_frame->frame);
+        if (filter_ret < 0) {
+            av_frame_free(&out_frame->frame);
+            ret = filter_ret;
+            break;
+        }
+        out_frame->frame = NULL;
+    } while(ret == MFX_ERR_MORE_SURFACE);
+
+    return ret;
+}
diff --git a/libavfilter/qsvvpp.h b/libavfilter/qsvvpp.h
new file mode 100644
index 0000000000..d720c9ba42
--- /dev/null
+++ b/libavfilter/qsvvpp.h
@@ -0,0 +1,66 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Intel Quick Sync Video VPP base function
+ */
+
+#ifndef AVFILTER_QSVVPP_H
+#define AVFILTER_QSVVPP_H
+
+#include <mfx/mfxvideo.h>
+
+#include "avfilter.h"
+
+#define FF_INLINK_IDX(link)  ((int)((link)->dstpad - (link)->dst->input_pads))
+#define FF_OUTLINK_IDX(link) ((int)((link)->srcpad - (link)->src->output_pads))
+
+typedef struct QSVVPPContext QSVVPPContext;
+
+typedef struct QSVVPPCrop {
+    int in_idx;        ///< Input index
+    int x, y, w, h;    ///< Crop rectangle
+} QSVVPPCrop;
+
+typedef struct QSVVPPParam {
+    /* default is ff_filter_frame */
+    int (*filter_frame)(AVFilterLink *outlink, AVFrame *frame);
+
+    /* To fill with MFX enhanced filter configurations */
+    int num_ext_buf;
+    mfxExtBuffer **ext_buf;
+
+    /* Real output format */
+    enum AVPixelFormat out_sw_format;
+
+    /* Crop information for each input, if needed */
+    int num_crop;
+    QSVVPPCrop *crop;
+} QSVVPPParam;
+
+/* create and initialize the QSV session */
+int ff_qsvvpp_create(AVFilterContext *avctx, QSVVPPContext **vpp, QSVVPPParam *param);
+
+/* release the resources (eg.surfaces) */
+int ff_qsvvpp_free(QSVVPPContext **vpp);
+
+/* vpp filter frame and call the cb if needed */
+int ff_qsvvpp_filter_frame(QSVVPPContext *vpp, AVFilterLink *inlink, AVFrame *frame);
+
+#endif /* AVFILTER_QSVVPP_H */
diff --git a/libavfilter/vf_overlay_qsv.c b/libavfilter/vf_overlay_qsv.c
new file mode 100644
index 0000000000..471576e35a
--- /dev/null
+++ b/libavfilter/vf_overlay_qsv.c
@@ -0,0 +1,487 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * A hardware accelerated overlay filter based on Intel Quick Sync Video VPP
+ */
+
+#include "libavutil/opt.h"
+#include "libavutil/common.h"
+#include "libavutil/pixdesc.h"
+#include "libavutil/eval.h"
+#include "libavutil/hwcontext.h"
+#include "libavutil/avstring.h"
+#include "libavutil/avassert.h"
+#include "libavutil/imgutils.h"
+#include "libavutil/mathematics.h"
+
+#include "internal.h"
+#include "avfilter.h"
+#include "formats.h"
+#include "video.h"
+
+#include "qsvvpp.h"
+
+#define MAIN    0
+#define OVERLAY 1
+
+#define OFFSET(x) offsetof(QSVOverlayContext, x)
+#define FLAGS AV_OPT_FLAG_VIDEO_PARAM
+
+enum var_name {
+    VAR_MAIN_iW,     VAR_MW,
+    VAR_MAIN_iH,     VAR_MH,
+    VAR_OVERLAY_iW,
+    VAR_OVERLAY_iH,
+    VAR_OVERLAY_X,  VAR_OX,
+    VAR_OVERLAY_Y,  VAR_OY,
+    VAR_OVERLAY_W,  VAR_OW,
+    VAR_OVERLAY_H,  VAR_OH,
+    VAR_VARS_NB
+};
+
+enum EOFAction {
+    EOF_ACTION_REPEAT,
+    EOF_ACTION_ENDALL
+};
+
+typedef struct QSVOverlayContext {
+    const AVClass      *class;
+
+    QSVVPPContext      *qsv;
+    QSVVPPParam        qsv_param;
+    mfxExtVPPComposite comp_conf;
+    double             var_values[VAR_VARS_NB];
+
+    char     *overlay_ox, *overlay_oy, *overlay_ow, *overlay_oh;
+    uint16_t  overlay_alpha, overlay_pixel_alpha;
+
+    enum EOFAction eof_action;  /* action to take on EOF from source */
+
+    AVFrame *main;
+    AVFrame *over_prev, *over_next;
+} QSVOverlayContext;
+
+static const char *const var_names[] = {
+    "main_w",     "W",   /* input width of the main layer */
+    "main_h",     "H",   /* input height of the main layer */
+    "overlay_iw",        /* input width of the overlay layer */
+    "overlay_ih",        /* input height of the overlay layer */
+    "overlay_x",  "x",   /* x position of the overlay layer inside of main */
+    "overlay_y",  "y",   /* y position of the overlay layer inside of main */
+    "overlay_w",  "w",   /* output width of overlay layer */
+    "overlay_h",  "h",   /* output height of overlay layer */
+    NULL
+};
+
+static const AVOption options[] = {
+    { "x", "Overlay x position", OFFSET(overlay_ox), AV_OPT_TYPE_STRING, { .str="0"}, 0, 255, .flags = FLAGS},
+    { "y", "Overlay y position", OFFSET(overlay_oy), AV_OPT_TYPE_STRING, { .str="0"}, 0, 255, .flags = FLAGS},
+    { "w", "Overlay width",      OFFSET(overlay_ow), AV_OPT_TYPE_STRING, { .str="overlay_iw"}, 0, 255, .flags = FLAGS},
+    { "h", "Overlay height",     OFFSET(overlay_oh), AV_OPT_TYPE_STRING, { .str="overlay_ih*w/overlay_iw"}, 0, 255, .flags = FLAGS},
+    { "alpha", "Overlay global alpha", OFFSET(overlay_alpha), AV_OPT_TYPE_INT, { .i64 = 255}, 0, 255, .flags = FLAGS},
+    { "eof_action", "Action to take when encountering EOF from secondary input ",
+        OFFSET(eof_action), AV_OPT_TYPE_INT, { .i64 = EOF_ACTION_REPEAT },
+        EOF_ACTION_REPEAT, EOF_ACTION_ENDALL, .flags = FLAGS, "eof_action" },
+        { "repeat", "Repeat the previous frame.", 0, AV_OPT_TYPE_CONST, { .i64 = EOF_ACTION_REPEAT }, .flags = FLAGS, "eof_action" },
+        { "endall", "End both streams.",          0, AV_OPT_TYPE_CONST, { .i64 = EOF_ACTION_ENDALL }, .flags = FLAGS, "eof_action" },
+    { NULL }
+};
+
+static int eval_expr(AVFilterContext *ctx)
+{
+    QSVOverlayContext *vpp = ctx->priv;
+    double     *var_values = vpp->var_values;
+    int                ret = 0;
+    AVExpr *ox_expr = NULL, *oy_expr = NULL;
+    AVExpr *ow_expr = NULL, *oh_expr = NULL;
+
+#define PASS_EXPR(e, s) {\
+    ret = av_expr_parse(&e, s, var_names, NULL, NULL, NULL, NULL, 0, ctx); \
+    if (ret < 0) {\
+        av_log(ctx, AV_LOG_ERROR, "Error when passing '%s'.\n", s);\
+        goto release;\
+    }\
+}
+    PASS_EXPR(ox_expr, vpp->overlay_ox);
+    PASS_EXPR(oy_expr, vpp->overlay_oy);
+    PASS_EXPR(ow_expr, vpp->overlay_ow);
+    PASS_EXPR(oh_expr, vpp->overlay_oh);
+#undef PASS_EXPR
+
+    var_values[VAR_OVERLAY_W] =
+    var_values[VAR_OW]        = av_expr_eval(ow_expr, var_values, NULL);
+    var_values[VAR_OVERLAY_H] =
+    var_values[VAR_OH]        = av_expr_eval(oh_expr, var_values, NULL);
+
+    /* calc again in case ow is relative to oh */
+    var_values[VAR_OVERLAY_W] =
+    var_values[VAR_OW]        = av_expr_eval(ow_expr, var_values, NULL);
+
+    var_values[VAR_OVERLAY_X] =
+    var_values[VAR_OX]        = av_expr_eval(ox_expr, var_values, NULL);
+    var_values[VAR_OVERLAY_Y] =
+    var_values[VAR_OY]        = av_expr_eval(oy_expr, var_values, NULL);
+
+    /* calc again in case ox is relative to oy */
+    var_values[VAR_OVERLAY_X] =
+    var_values[VAR_OX]        = av_expr_eval(ox_expr, var_values, NULL);
+
+    /* calc overlay_w and overlay_h again incase relative to ox,oy */
+    var_values[VAR_OVERLAY_W] =
+    var_values[VAR_OW]        = av_expr_eval(ow_expr, var_values, NULL);
+    var_values[VAR_OVERLAY_H] =
+    var_values[VAR_OH]        = av_expr_eval(oh_expr, var_values, NULL);
+    var_values[VAR_OVERLAY_W] =
+    var_values[VAR_OW]        = av_expr_eval(ow_expr, var_values, NULL);
+
+release:
+    av_expr_free(ox_expr);
+    av_expr_free(oy_expr);
+    av_expr_free(ow_expr);
+    av_expr_free(oh_expr);
+
+    return ret;
+}
+
+static int have_alpha_planar(AVFilterLink *link)
+{
+    enum AVPixelFormat pix_fmt;
+    const AVPixFmtDescriptor *desc;
+    AVHWFramesContext *fctx;
+
+    if (link->format == AV_PIX_FMT_QSV) {
+        fctx    = (AVHWFramesContext *)link->hw_frames_ctx->data;
+        pix_fmt = fctx->sw_format;
+    }
+
+    desc = av_pix_fmt_desc_get(pix_fmt);
+    if (!desc)
+        return 0;
+
+    return !!(desc->flags & AV_PIX_FMT_FLAG_ALPHA);
+}
+
+static int config_main_input(AVFilterLink *inlink)
+{
+    AVFilterContext      *ctx = inlink->dst;
+    QSVOverlayContext    *vpp = ctx->priv;
+    mfxVPPCompInputStream *st = &vpp->comp_conf.InputStream[0];
+
+    av_log(ctx, AV_LOG_DEBUG, "Input[%d] is of %s.\n", FF_INLINK_IDX(inlink),
+           av_get_pix_fmt_name(inlink->format));
+
+    vpp->var_values[VAR_MAIN_iW] =
+    vpp->var_values[VAR_MW]      = inlink->w;
+    vpp->var_values[VAR_MAIN_iH] =
+    vpp->var_values[VAR_MH]      = inlink->h;
+
+    st->DstX              = 0;
+    st->DstY              = 0;
+    st->DstW              = inlink->w;
+    st->DstH              = inlink->h;
+    st->GlobalAlphaEnable = 0;
+    st->PixelAlphaEnable  = 0;
+
+    return 0;
+}
+
+static int config_overlay_input(AVFilterLink *inlink)
+{
+    AVFilterContext       *ctx = inlink->dst;
+    QSVOverlayContext     *vpp = ctx->priv;
+    mfxVPPCompInputStream *st  = &vpp->comp_conf.InputStream[1];
+    int                    ret = 0;
+
+    av_log(ctx, AV_LOG_DEBUG, "Input[%d] is of %s.\n", FF_INLINK_IDX(inlink),
+           av_get_pix_fmt_name(inlink->format));
+
+    vpp->var_values[VAR_OVERLAY_iW] = inlink->w;
+    vpp->var_values[VAR_OVERLAY_iH] = inlink->h;
+
+    ret = eval_expr(ctx);
+    if (ret < 0)
+        return ret;
+
+    st->DstX              = vpp->var_values[VAR_OX];
+    st->DstY              = vpp->var_values[VAR_OY];
+    st->DstW              = vpp->var_values[VAR_OW];
+    st->DstH              = vpp->var_values[VAR_OH];
+    st->GlobalAlpha       = vpp->overlay_alpha;
+    st->GlobalAlphaEnable = (st->GlobalAlpha < 255);
+    st->PixelAlphaEnable  = have_alpha_planar(inlink);
+
+    return 0;
+}
+
+static int config_output(AVFilterLink *outlink)
+{
+    AVFilterContext   *ctx = outlink->src;
+    QSVOverlayContext *vpp = ctx->priv;
+    AVFilterLink      *in0 = ctx->inputs[0];
+    AVFilterLink      *in1 = ctx->inputs[1];
+
+    av_log(ctx, AV_LOG_DEBUG, "Output is of %s.\n", av_get_pix_fmt_name(outlink->format));
+    if ((in0->format == AV_PIX_FMT_QSV && in1->format != AV_PIX_FMT_QSV) ||
+        (in0->format != AV_PIX_FMT_QSV && in1->format == AV_PIX_FMT_QSV)) {
+        av_log(ctx, AV_LOG_ERROR, "Mixing hardware and software pixel formats is not supported.\n");
+        return AVERROR(EINVAL);
+    } else if (in0->format == AV_PIX_FMT_QSV) {
+        AVHWFramesContext *hw_frame0 = (AVHWFramesContext *)in0->hw_frames_ctx->data;
+        AVHWFramesContext *hw_frame1 = (AVHWFramesContext *)in1->hw_frames_ctx->data;
+
+        if (hw_frame0->device_ctx != hw_frame1->device_ctx) {
+            av_log(ctx, AV_LOG_ERROR, "Inputs with different underlying QSV devices are forbidden.\n");
+            return AVERROR(EINVAL);
+        }
+    }
+
+    outlink->w          = vpp->var_values[VAR_MW];
+    outlink->h          = vpp->var_values[VAR_MH];
+    outlink->frame_rate = in0->frame_rate;
+    outlink->time_base  = av_inv_q(outlink->frame_rate);
+
+    return ff_qsvvpp_create(ctx, &vpp->qsv, &vpp->qsv_param);
+}
+
+static int blend_frame(AVFilterContext *ctx, AVFrame *mpic, AVFrame *opic)
+{
+    int                ret = 0;
+    QSVOverlayContext *vpp = ctx->priv;
+    AVFrame     *opic_copy = NULL;
+
+    ret = ff_qsvvpp_filter_frame(vpp->qsv, ctx->inputs[0], mpic);
+    if (ret == 0 || ret == AVERROR(EAGAIN)) {
+        /* Reference the overlay frame. Because:
+         * 1. ff_qsvvpp_filter_frame will take control of the given frame
+         * 2. We need to repeat the overlay frame when 2nd input goes into EOF
+         */
+        opic_copy = av_frame_clone(opic);
+        if (!opic_copy)
+            return AVERROR(ENOMEM);
+
+        ret = ff_qsvvpp_filter_frame(vpp->qsv, ctx->inputs[1], opic_copy);
+    }
+
+    return ret;
+}
+
+static int handle_overlay_eof(AVFilterContext *ctx)
+{
+    int              ret = 0;
+    QSVOverlayContext *s = ctx->priv;
+    /* Repeat previous frame on secondary input */
+    if (s->over_prev && s->eof_action == EOF_ACTION_REPEAT)
+        ret = blend_frame(ctx, s->main, s->over_prev);
+    /* End both streams */
+    else if (s->eof_action == EOF_ACTION_ENDALL)
+        return AVERROR_EOF;
+
+    s->main = NULL;
+
+    return ret;
+}
+
+static int request_frame(AVFilterLink *outlink)
+{
+    AVFilterContext *ctx = outlink->src;
+    QSVOverlayContext *s = ctx->priv;
+    AVRational   tb_main = ctx->inputs[MAIN]->time_base;
+    AVRational   tb_over = ctx->inputs[OVERLAY]->time_base;
+    int              ret = 0;
+
+    /* get a frame on the main input */
+    if (!s->main) {
+        ret = ff_request_frame(ctx->inputs[MAIN]);
+        if (ret < 0)
+            return ret;
+    }
+
+    /* get a new frame on the overlay input, on EOF check setting 'eof_action' */
+    if (!s->over_next) {
+        ret = ff_request_frame(ctx->inputs[OVERLAY]);
+        if (ret == AVERROR_EOF)
+            return handle_overlay_eof(ctx);
+        else if (ret < 0)
+            return ret;
+    }
+
+    while (s->main->pts != AV_NOPTS_VALUE &&
+           s->over_next->pts != AV_NOPTS_VALUE &&
+           av_compare_ts(s->over_next->pts, tb_over, s->main->pts, tb_main) < 0) {
+        av_frame_free(&s->over_prev);
+        FFSWAP(AVFrame*, s->over_prev, s->over_next);
+
+        ret = ff_request_frame(ctx->inputs[OVERLAY]);
+        if (ret == AVERROR_EOF)
+            return handle_overlay_eof(ctx);
+        else if (ret < 0)
+            return ret;
+    }
+
+    if (s->main->pts == AV_NOPTS_VALUE ||
+        s->over_next->pts == AV_NOPTS_VALUE ||
+        !av_compare_ts(s->over_next->pts, tb_over, s->main->pts, tb_main)) {
+        ret = blend_frame(ctx, s->main, s->over_next);
+        av_frame_free(&s->over_prev);
+        FFSWAP(AVFrame*, s->over_prev, s->over_next);
+    } else if (s->over_prev) {
+        ret = blend_frame(ctx, s->main, s->over_prev);
+    } else {
+        av_frame_free(&s->main);
+        ret = AVERROR(EAGAIN);
+    }
+
+    s->main = NULL;
+
+    return ret;
+}
+
+static int filter_frame_main(AVFilterLink *inlink, AVFrame *frame)
+{
+    QSVOverlayContext *s = inlink->dst->priv;
+
+    av_assert0(!s->main);
+    s->main = frame;
+
+    return 0;
+}
+
+static int filter_frame_overlay(AVFilterLink *inlink, AVFrame *frame)
+{
+    QSVOverlayContext *s = inlink->dst->priv;
+
+    av_assert0(!s->over_next);
+    s->over_next = frame;
+
+    return 0;
+}
+
+static int overlay_qsv_init(AVFilterContext *ctx)
+{
+    QSVOverlayContext *vpp = ctx->priv;
+
+    /* fill composite config */
+    vpp->comp_conf.Header.BufferId = MFX_EXTBUFF_VPP_COMPOSITE;
+    vpp->comp_conf.Header.BufferSz = sizeof(vpp->comp_conf);
+    vpp->comp_conf.NumInputStream  = ctx->nb_inputs;
+    vpp->comp_conf.InputStream     = av_mallocz_array(ctx->nb_inputs,
+                                                      sizeof(*vpp->comp_conf.InputStream));
+    if (!vpp->comp_conf.InputStream)
+        return AVERROR(ENOMEM);
+
+    /* initialize QSVVPP params */
+    vpp->qsv_param.filter_frame = NULL;
+    vpp->qsv_param.ext_buf      = av_mallocz(sizeof(*vpp->qsv_param.ext_buf));
+    if (!vpp->qsv_param.ext_buf)
+        return AVERROR(ENOMEM);
+
+    vpp->qsv_param.ext_buf[0]    = (mfxExtBuffer *)&vpp->comp_conf;
+    vpp->qsv_param.num_ext_buf   = 1;
+    vpp->qsv_param.out_sw_format = AV_PIX_FMT_NV12;
+    vpp->qsv_param.num_crop      = 0;
+
+    return 0;
+}
+
+static void overlay_qsv_uninit(AVFilterContext *ctx)
+{
+    QSVOverlayContext *vpp = ctx->priv;
+
+    av_frame_free(&vpp->main);
+    av_frame_free(&vpp->over_prev);
+    av_frame_free(&vpp->over_next);
+    ff_qsvvpp_free(&vpp->qsv);
+    av_freep(&vpp->comp_conf.InputStream);
+    av_freep(&vpp->qsv_param.ext_buf);
+}
+
+static int overlay_qsv_query_formats(AVFilterContext *ctx)
+{
+    int i;
+
+    static const enum AVPixelFormat main_in_fmts[] = {
+        AV_PIX_FMT_YUV420P,
+        AV_PIX_FMT_NV12,
+        AV_PIX_FMT_YUYV422,
+        AV_PIX_FMT_RGB32,
+        AV_PIX_FMT_QSV,
+        AV_PIX_FMT_NONE
+    };
+    static const enum AVPixelFormat out_pix_fmts[] = {
+        AV_PIX_FMT_NV12,
+        AV_PIX_FMT_QSV,
+        AV_PIX_FMT_NONE
+    };
+
+    for (i = 0; i < ctx->nb_inputs; i++)
+        ff_formats_ref(ff_make_format_list(main_in_fmts), &ctx->inputs[i]->out_formats);
+
+    ff_formats_ref(ff_make_format_list(out_pix_fmts), &ctx->outputs[0]->in_formats);
+
+    return 0;
+}
+
+static const AVClass overlay_qsv_class = {
+    .class_name = "overlay_qsv",
+    .item_name  = av_default_item_name,
+    .option     = options,
+    .version    = LIBAVUTIL_VERSION_INT,
+};
+
+static const AVFilterPad overlay_qsv_inputs[] = {
+    {
+        .name          = "main",
+        .type          = AVMEDIA_TYPE_VIDEO,
+        .filter_frame  = filter_frame_main,
+        .config_props  = config_main_input,
+        .needs_fifo    = 1,
+    },
+    {
+        .name          = "overlay",
+        .type          = AVMEDIA_TYPE_VIDEO,
+        .filter_frame  = filter_frame_overlay,
+        .config_props  = config_overlay_input,
+        .needs_fifo    = 1,
+    },
+    { NULL }
+};
+
+static const AVFilterPad overlay_qsv_outputs[] = {
+    {
+        .name          = "default",
+        .type          = AVMEDIA_TYPE_VIDEO,
+        .config_props  = config_output,
+        .request_frame = request_frame,
+    },
+    { NULL }
+};
+
+AVFilter ff_vf_overlay_qsv = {
+    .name           = "overlay_qsv",
+    .description    = NULL_IF_CONFIG_SMALL("Quick Sync Video overlay."),
+    .priv_size      = sizeof(QSVOverlayContext),
+    .query_formats  = overlay_qsv_query_formats,
+    .init           = overlay_qsv_init,
+    .uninit         = overlay_qsv_uninit,
+    .inputs         = overlay_qsv_inputs,
+    .outputs        = overlay_qsv_outputs,
+    .priv_class     = &overlay_qsv_class,
+    .flags_internal = FF_FILTER_FLAG_HWFRAME_AWARE,
+};
diff --git a/libavfilter/vf_vpp_qsv.c b/libavfilter/vf_vpp_qsv.c
new file mode 100644
index 0000000000..86af017d2e
--- /dev/null
+++ b/libavfilter/vf_vpp_qsv.c
@@ -0,0 +1,401 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ ** @file
+ ** Hardware accelerated common filters based on Intel Quick Sync Video VPP
+ **/
+
+#include <float.h>
+
+#include "libavutil/opt.h"
+#include "libavutil/eval.h"
+#include "libavutil/avassert.h"
+#include "libavutil/pixdesc.h"
+
+#include "formats.h"
+#include "internal.h"
+#include "avfilter.h"
+#include "libavcodec/avcodec.h"
+#include "libavformat/avformat.h"
+
+#include "qsvvpp.h"
+
+#define OFFSET(x) offsetof(VPPContext, x)
+#define FLAGS AV_OPT_FLAG_VIDEO_PARAM
+
+/* number of video enhancement filters */
+#define ENH_FILTERS_COUNT (5)
+
+typedef struct VPPContext{
+    const AVClass *class;
+
+    QSVVPPContext *qsv;
+
+    /* Video Enhancement Algorithms */
+    mfxExtVPPDeinterlacing  deinterlace_conf;
+    mfxExtVPPFrameRateConversion frc_conf;
+    mfxExtVPPDenoise denoise_conf;
+    mfxExtVPPDetail detail_conf;
+    mfxExtVPPProcAmp procamp_conf;
+
+    int out_width;
+    int out_height;
+
+    AVRational framerate;       /* target framerate */
+    int use_frc;                /* use framerate conversion */
+    int deinterlace;            /* deinterlace mode : 0=off, 1=bob, 2=advanced */
+    int denoise;                /* Enable Denoise algorithm. Value [0, 100] */
+    int detail;                 /* Enable Detail Enhancement algorithm. */
+                                /* Level is the optional, value [0, 100] */
+    int use_crop;               /* 1 = use crop; 0=none */
+    int crop_w;
+    int crop_h;
+    int crop_x;
+    int crop_y;
+
+    /* param for the procamp */
+    int    procamp;            /* enable procamp */
+    float  hue;
+    float  saturation;
+    float  contrast;
+    float  brightness;
+
+    char *cx, *cy, *cw, *ch;
+    char *ow, *oh;
+} VPPContext;
+
+static const AVOption options[] = {
+    { "deinterlace", "deinterlace mode: 0=off, 1=bob, 2=advanced", OFFSET(deinterlace), AV_OPT_TYPE_INT,      { .i64 = 0 }, 0, MFX_DEINTERLACING_ADVANCED, .flags = FLAGS, "deinterlace" },
+    { "bob",         "Bob deinterlace mode.",                      0,                   AV_OPT_TYPE_CONST,    { .i64 = MFX_DEINTERLACING_BOB },            .flags = FLAGS, "deinterlace" },
+    { "advanced",    "Advanced deinterlace mode. ",                0,                   AV_OPT_TYPE_CONST,    { .i64 = MFX_DEINTERLACING_ADVANCED },       .flags = FLAGS, "deinterlace" },
+
+    { "denoise",     "denoise level [0, 100]",       OFFSET(denoise),     AV_OPT_TYPE_INT,      { .i64 = 0 }, 0, 100, .flags = FLAGS },
+    { "detail",      "enhancement level [0, 100]",   OFFSET(detail),      AV_OPT_TYPE_INT,      { .i64 = 0 }, 0, 100, .flags = FLAGS },
+    { "framerate",   "output framerate",             OFFSET(framerate),   AV_OPT_TYPE_RATIONAL, { .dbl = 0.0 },0, DBL_MAX, .flags = FLAGS },
+    { "procamp",     "Enable ProcAmp",               OFFSET(procamp),     AV_OPT_TYPE_INT,      { .i64 = 0 }, 0, 1, .flags = FLAGS},
+    { "hue",         "ProcAmp hue",                  OFFSET(hue),         AV_OPT_TYPE_FLOAT,    { .dbl = 0.0 }, -180.0, 180.0, .flags = FLAGS},
+    { "saturation",  "ProcAmp saturation",           OFFSET(saturation),  AV_OPT_TYPE_FLOAT,    { .dbl = 1.0 }, 0.0, 10.0, .flags = FLAGS},
+    { "contrast",    "ProcAmp contrast",             OFFSET(contrast),    AV_OPT_TYPE_FLOAT,    { .dbl = 1.0 }, 0.0, 10.0, .flags = FLAGS},
+    { "brightness",  "ProcAmp brightness",           OFFSET(brightness),  AV_OPT_TYPE_FLOAT,    { .dbl = 0.0 }, -100.0, 100.0, .flags = FLAGS},
+
+    { "cw",   "set the width crop area expression",   OFFSET(cw), AV_OPT_TYPE_STRING, { .str = "iw" }, CHAR_MIN, CHAR_MAX, FLAGS },
+    { "ch",   "set the height crop area expression",  OFFSET(ch), AV_OPT_TYPE_STRING, { .str = "ih" }, CHAR_MIN, CHAR_MAX, FLAGS },
+    { "cx",   "set the x crop area expression",       OFFSET(cx), AV_OPT_TYPE_STRING, { .str = "(in_w-out_w)/2" }, CHAR_MIN, CHAR_MAX, FLAGS },
+    { "cy",   "set the y crop area expression",       OFFSET(cy), AV_OPT_TYPE_STRING, { .str = "(in_h-out_h)/2" }, CHAR_MIN, CHAR_MAX, FLAGS },
+
+    { "w",      "Output video width",  OFFSET(ow), AV_OPT_TYPE_STRING, { .str="cw" }, 0, 255, .flags = FLAGS },
+    { "width",  "Output video width",  OFFSET(ow), AV_OPT_TYPE_STRING, { .str="cw" }, 0, 255, .flags = FLAGS },
+    { "h",      "Output video height", OFFSET(oh), AV_OPT_TYPE_STRING, { .str="w*ch/cw" }, 0, 255, .flags = FLAGS },
+    { "height", "Output video height", OFFSET(oh), AV_OPT_TYPE_STRING, { .str="w*ch/cw" }, 0, 255, .flags = FLAGS },
+    { NULL }
+};
+
+static const char *const var_names[] = {
+    "iw", "in_w",
+    "ih", "in_h",
+    "ow", "out_w", "w",
+    "oh", "out_h", "h",
+    "cw",
+    "ch",
+    "cx",
+    "cy",
+    NULL
+};
+
+enum var_name {
+    VAR_iW, VAR_IN_W,
+    VAR_iH, VAR_IN_H,
+    VAR_oW, VAR_OUT_W, VAR_W,
+    VAR_oH, VAR_OUT_H, VAR_H,
+    CW,
+    CH,
+    CX,
+    CY,
+    VAR_VARS_NB
+};
+
+static int eval_expr(AVFilterContext *ctx)
+{
+#define PASS_EXPR(e, s) {\
+    ret = av_expr_parse(&e, s, var_names, NULL, NULL, NULL, NULL, 0, ctx); \
+    if (ret < 0) {\
+        av_log(ctx, AV_LOG_ERROR, "Error when passing '%s'.\n", s);\
+        goto release;\
+    }\
+}
+#define CALC_EXPR(e, v, i) {\
+    i = v = av_expr_eval(e, var_values, NULL); \
+}
+    VPPContext *vpp = ctx->priv;
+    double  var_values[VAR_VARS_NB] = { NAN };
+    AVExpr *w_expr  = NULL, *h_expr  = NULL;
+    AVExpr *cw_expr = NULL, *ch_expr = NULL;
+    AVExpr *cx_expr = NULL, *cy_expr = NULL;
+    int     ret = 0;
+
+    PASS_EXPR(cw_expr, vpp->cw);
+    PASS_EXPR(ch_expr, vpp->ch);
+
+    PASS_EXPR(w_expr, vpp->ow);
+    PASS_EXPR(h_expr, vpp->oh);
+
+    PASS_EXPR(cx_expr, vpp->cx);
+    PASS_EXPR(cy_expr, vpp->cy);
+
+    var_values[VAR_iW] =
+    var_values[VAR_IN_W] = ctx->inputs[0]->w;
+
+    var_values[VAR_iH] =
+    var_values[VAR_IN_H] = ctx->inputs[0]->h;
+
+    /* crop params */
+    CALC_EXPR(cw_expr, var_values[CW], vpp->crop_w);
+    CALC_EXPR(ch_expr, var_values[CH], vpp->crop_h);
+
+    /* calc again in case cw is relative to ch */
+    CALC_EXPR(cw_expr, var_values[CW], vpp->crop_w);
+
+    CALC_EXPR(w_expr,
+            var_values[VAR_OUT_W] = var_values[VAR_oW] = var_values[VAR_W],
+            vpp->out_width);
+    CALC_EXPR(h_expr,
+            var_values[VAR_OUT_H] = var_values[VAR_oH] = var_values[VAR_H],
+            vpp->out_height);
+
+    /* calc again in case ow is relative to oh */
+    CALC_EXPR(w_expr,
+            var_values[VAR_OUT_W] = var_values[VAR_oW] = var_values[VAR_W],
+            vpp->out_width);
+
+
+    CALC_EXPR(cx_expr, var_values[CX], vpp->crop_x);
+    CALC_EXPR(cy_expr, var_values[CY], vpp->crop_y);
+
+    /* calc again in case cx is relative to cy */
+    CALC_EXPR(cx_expr, var_values[CX], vpp->crop_x);
+
+    if ((vpp->crop_w != var_values[VAR_iW]) || (vpp->crop_h != var_values[VAR_iH]))
+        vpp->use_crop = 1;
+
+release:
+    av_expr_free(w_expr);
+    av_expr_free(h_expr);
+    av_expr_free(cw_expr);
+    av_expr_free(ch_expr);
+    av_expr_free(cx_expr);
+    av_expr_free(cy_expr);
+#undef PASS_EXPR
+#undef CALC_EXPR
+
+    return ret;
+}
+
+static int config_input(AVFilterLink *inlink)
+{
+    AVFilterContext *ctx = inlink->dst;
+    VPPContext      *vpp = ctx->priv;
+    int              ret;
+
+    if (vpp->framerate.den == 0 || vpp->framerate.num == 0)
+        vpp->framerate = inlink->frame_rate;
+
+    if (av_cmp_q(vpp->framerate, inlink->frame_rate))
+        vpp->use_frc = 1;
+
+    ret = eval_expr(ctx);
+    if (ret != 0) {
+        av_log(ctx, AV_LOG_ERROR, "Fail to eval expr.\n");
+        return ret;
+    }
+
+    if (vpp->out_height == 0 || vpp->out_width == 0) {
+        vpp->out_width  = inlink->w;
+        vpp->out_height = inlink->h;
+    }
+
+    if (vpp->use_crop) {
+        vpp->crop_x = FFMAX(vpp->crop_x, 0);
+        vpp->crop_y = FFMAX(vpp->crop_y, 0);
+
+        if(vpp->crop_w + vpp->crop_x > inlink->w)
+           vpp->crop_x = inlink->w - vpp->crop_w;
+        if(vpp->crop_h + vpp->crop_y > inlink->h)
+           vpp->crop_y = inlink->h - vpp->crop_h;
+    }
+
+    return 0;
+}
+
+static int config_output(AVFilterLink *outlink)
+{
+    AVFilterContext *ctx = outlink->src;
+    VPPContext      *vpp = ctx->priv;
+    QSVVPPParam     param = { NULL };
+    QSVVPPCrop      crop  = { 0 };
+    mfxExtBuffer    *ext_buf[ENH_FILTERS_COUNT];
+
+    outlink->w          = vpp->out_width;
+    outlink->h          = vpp->out_height;
+    outlink->frame_rate = vpp->framerate;
+    outlink->time_base  = av_inv_q(vpp->framerate);
+
+    param.filter_frame  = NULL;
+    param.out_sw_format = AV_PIX_FMT_NV12;
+    param.num_ext_buf   = 0;
+    param.ext_buf       = ext_buf;
+
+    if (vpp->use_crop) {
+        crop.in_idx = 0;
+        crop.x = vpp->crop_x;
+        crop.y = vpp->crop_y;
+        crop.w = vpp->crop_w;
+        crop.h = vpp->crop_h;
+
+        param.num_crop = 1;
+        param.crop     = &crop;
+    }
+
+    if (vpp->deinterlace) {
+        memset(&vpp->deinterlace_conf, 0, sizeof(mfxExtVPPDeinterlacing));
+        vpp->deinterlace_conf.Header.BufferId = MFX_EXTBUFF_VPP_DEINTERLACING;
+        vpp->deinterlace_conf.Header.BufferSz = sizeof(mfxExtVPPDeinterlacing);
+        vpp->deinterlace_conf.Mode = vpp->deinterlace == 1 ?
+                                     MFX_DEINTERLACING_BOB : MFX_DEINTERLACING_ADVANCED;
+
+        param.ext_buf[param.num_ext_buf++] = (mfxExtBuffer*)&vpp->deinterlace_conf;
+    }
+
+    if (vpp->use_frc) {
+        memset(&vpp->frc_conf, 0, sizeof(mfxExtVPPFrameRateConversion));
+        vpp->frc_conf.Header.BufferId = MFX_EXTBUFF_VPP_FRAME_RATE_CONVERSION;
+        vpp->frc_conf.Header.BufferSz = sizeof(mfxExtVPPFrameRateConversion);
+        vpp->frc_conf.Algorithm = MFX_FRCALGM_DISTRIBUTED_TIMESTAMP;
+
+        param.ext_buf[param.num_ext_buf++] = (mfxExtBuffer*)&vpp->frc_conf;
+    }
+
+    if (vpp->denoise) {
+        memset(&vpp->denoise_conf, 0, sizeof(mfxExtVPPDenoise));
+        vpp->denoise_conf.Header.BufferId = MFX_EXTBUFF_VPP_DENOISE;
+        vpp->denoise_conf.Header.BufferSz = sizeof(mfxExtVPPDenoise);
+        vpp->denoise_conf.DenoiseFactor   = vpp->denoise;
+
+        param.ext_buf[param.num_ext_buf++] = (mfxExtBuffer*)&vpp->denoise_conf;
+    }
+
+    if (vpp->detail) {
+        memset(&vpp->detail_conf, 0, sizeof(mfxExtVPPDetail));
+        vpp->detail_conf.Header.BufferId  = MFX_EXTBUFF_VPP_DETAIL;
+        vpp->detail_conf.Header.BufferSz  = sizeof(mfxExtVPPDetail);
+        vpp->detail_conf.DetailFactor = vpp->detail;
+
+        param.ext_buf[param.num_ext_buf++] = (mfxExtBuffer*)&vpp->detail_conf;
+    }
+
+    if (vpp->procamp) {
+        memset(&vpp->procamp_conf, 0, sizeof(mfxExtVPPProcAmp));
+        vpp->procamp_conf.Header.BufferId  = MFX_EXTBUFF_VPP_PROCAMP;
+        vpp->procamp_conf.Header.BufferSz  = sizeof(mfxExtVPPProcAmp);
+        vpp->procamp_conf.Hue              = vpp->hue;
+        vpp->procamp_conf.Saturation       = vpp->saturation;
+        vpp->procamp_conf.Contrast         = vpp->contrast;
+        vpp->procamp_conf.Brightness       = vpp->brightness;
+
+        param.ext_buf[param.num_ext_buf++] = (mfxExtBuffer*)&vpp->procamp_conf;
+    }
+
+    return ff_qsvvpp_create(ctx, &vpp->qsv, &param);
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *picref)
+{
+    VPPContext *vpp = inlink->dst->priv;
+
+    return ff_qsvvpp_filter_frame(vpp->qsv, inlink, picref);
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+    AVFilterFormats *in_fmts, *out_fmts;
+    static const enum AVPixelFormat in_pix_fmts[] = {
+        AV_PIX_FMT_YUV420P,
+        AV_PIX_FMT_NV12,
+        AV_PIX_FMT_YUYV422,
+        AV_PIX_FMT_RGB32,
+        AV_PIX_FMT_QSV,
+        AV_PIX_FMT_NONE
+    };
+    static const enum AVPixelFormat out_pix_fmts[] = {
+        AV_PIX_FMT_NV12,
+        AV_PIX_FMT_QSV,
+        AV_PIX_FMT_NONE
+    };
+
+    in_fmts  = ff_make_format_list(in_pix_fmts);
+    out_fmts = ff_make_format_list(out_pix_fmts);
+    ff_formats_ref(in_fmts, &ctx->inputs[0]->out_formats);
+    ff_formats_ref(out_fmts, &ctx->outputs[0]->in_formats);
+
+    return 0;
+}
+
+static av_cold void vpp_uninit(AVFilterContext *ctx)
+{
+    VPPContext *vpp = ctx->priv;
+
+    ff_qsvvpp_free(&vpp->qsv);
+}
+
+static const AVClass vpp_class = {
+    .class_name = "vpp_qsv",
+    .item_name  = av_default_item_name,
+    .option     = options,
+    .version    = LIBAVUTIL_VERSION_INT,
+};
+
+static const AVFilterPad vpp_inputs[] = {
+    {
+        .name          = "default",
+        .type          = AVMEDIA_TYPE_VIDEO,
+        .config_props  = config_input,
+        .filter_frame  = filter_frame,
+    },
+    { NULL }
+};
+
+static const AVFilterPad vpp_outputs[] = {
+    {
+        .name          = "default",
+        .type          = AVMEDIA_TYPE_VIDEO,
+        .config_props  = config_output,
+    },
+    { NULL }
+};
+
+AVFilter ff_vf_vpp_qsv = {
+    .name          = "vpp_qsv",
+    .description   = NULL_IF_CONFIG_SMALL("Quick Sync Video VPP."),
+    .priv_size     = sizeof(VPPContext),
+    .query_formats = query_formats,
+    .uninit        = vpp_uninit,
+    .inputs        = vpp_inputs,
+    .outputs       = vpp_outputs,
+    .priv_class    = &vpp_class,
+    .flags_internal = FF_FILTER_FLAG_HWFRAME_AWARE,
+};


======================================================================

diff --cc Changelog
index cd4a841619,51c3f85a28..869ecde162
--- a/Changelog
+++ b/Changelog
@@@ -2,362 -2,38 +2,363 @@@ Entries are sorted chronologically fro
  releases are sorted from youngest to oldest.
  
  version <next>:
 -- Support for spherical videos
 -- Intel QSV-accelerated VP8 and VC-1 decoding
 -- VAAPI-accelerated VP8 and HEVC decoding
 -- VAAPI-accelerated deinterlacing
 -- config.log and other configuration files moved into avbuild/ directory
 -- VAAPI-accelerated MPEG-2 and VP8 encoding
 -- Apple Pixlet decoder
 +- Bitstream filters for editing metadata in H.264, HEVC and MPEG-2 streams
 +- Dropped support for OpenJPEG versions 2.0 and below. Using OpenJPEG now
 +  requires 2.1 (or later) and pkg-config.
 +- VDA dropped (use VideoToolbox instead)
 +- MagicYUV encoder
 +- Raw AMR-NB and AMR-WB demuxers
 +- TiVo ty/ty+ demuxer
 +- Intel QSV-accelerated MJPEG encoding
 +- PCE support for extended channel layouts in the AAC encoder
 +- native aptX encoder and decoder
 +- Raw aptX muxer and demuxer
 +- NVIDIA NVDEC-accelerated H.264 and HEVC hwaccel decoding
++- Intel QSV-accelerated overlay filter
 +
 +
 +version 3.4:
 +- deflicker video filter
 +- doubleweave video filter
 +- lumakey video filter
 +- pixscope video filter
 +- oscilloscope video filter
 +- config.log and other configuration files moved into ffbuild/ directory
 +- update cuvid/nvenc headers to Video Codec SDK 8.0.14
 +- afir audio filter
 +- scale_cuda CUDA based video scale filter
 +- librsvg support for svg rasterization
 +- crossfeed audio filter
 +- spec compliant VP9 muxing support in MP4
 +- remove the libnut muxer/demuxer wrappers
 +- remove the libschroedinger encoder/decoder wrappers
 +- surround audio filter
 +- sofalizer filter switched to libmysofa
 +- Gremlin Digital Video demuxer and decoder
 +- headphone audio filter
 +- superequalizer audio filter
 +- roberts video filter
  - The x86 assembler default switched from yasm to nasm, pass
    --x86asmexe=yasm to configure to restore the old behavior.
 -- Cineform HD decoder
 -- VP9 superframe split/merge bitstream filters
 +- additional frame format support for Interplay MVE movies
 +- support for decoding through D3D11VA in ffmpeg
 +- limiter video filter
 +- libvmaf video filter
 +- Dolby E decoder and SMPTE 337M demuxer
 +- unpremultiply video filter
 +- tlut2 video filter
 +- floodfill video filter
 +- pseudocolor video filter
 +- raw G.726 muxer and demuxer, left- and right-justified
 +- NewTek NDI input/output device
 +- Some video filters with several inputs now use a common set of options:
 +  blend, libvmaf, lut3d, overlay, psnr, ssim.
 +  They must always be used by name.
 +- FITS demuxer and decoder
 +- FITS muxer and encoder
 +- add --disable-autodetect build switch
 +- drop deprecated qtkit input device (use avfoundation instead)
 +- despill video filter
 +- haas audio filter
 +- SUP/PGS subtitle muxer
 +- convolve video filter
 +- VP9 tile threading support
 +- KMS screen grabber
 +- CUDA thumbnail filter
 +- V4L2 mem2mem HW assisted codecs
 +- Rockchip MPP hardware decoding
 +- vmafmotion video filter
 +- use MIME type "G726" for little-endian G.726, "AAL2-G726" for big-endian G.726
 +
 +
 +version 3.3:
 +- CrystalHD decoder moved to new decode API
 +- add internal ebur128 library, remove external libebur128 dependency
 +- Pro-MPEG CoP #3-R2 FEC protocol
 +- premultiply video filter
 +- Support for spherical videos
 +- configure now fails if autodetect-libraries are requested but not found
 +- PSD Decoder
 +- 16.8 floating point pcm decoder
 +- 24.0 floating point pcm decoder
 +- Apple Pixlet decoder
 +- QDMC audio decoder
 +- NewTek SpeedHQ decoder
 +- MIDI Sample Dump Standard demuxer
 +- readeia608 filter
 +- Sample Dump eXchange demuxer
 +- abitscope multimedia filter
 +- Scenarist Closed Captions demuxer and muxer
 +- threshold filter
 +- midequalizer filter
 +- Optimal Huffman tables for (M)JPEG encoding
 +- VAAPI-accelerated MPEG-2 and VP8 encoding
  - FM Screen Capture Codec decoder
 -- ClearVideo decoder (I-frames only)
 -- support for decoding through D3D11VA in avconv
 -- Cinepak encoder
 -- Intel QSV-accelerated MJPEG encoding
 -- NVIDIA CUVID-accelerated H.264 and HEVC decoding
 -- Intel QSV-accelerated overlay filter
 +- native Opus encoder
 +- ScreenPressor decoder
 +- incomplete ClearVideo decoder
 +- Intel QSV video scaling and deinterlacing filters
 +- Support MOV with multiple sample description tables
 +- XPM decoder
 +- Removed the legacy X11 screen grabber, use XCB instead
 +- MPEG-7 Video Signature filter
 +- Removed asyncts filter (use af_aresample instead)
 +- Intel QSV-accelerated VP8 video decoding
 +- VAAPI-accelerated deinterlacing
  
  
 -version 12:
 -- aliases and defaults for Ogg subtypes (opus, spx)
 -- HEVC/H.265 RTP payload format (draft v6) packetizer and depacketizer
 -- avplay now exits by default at the end of playback
 -- XCB-based screen-grabber
 -- creating DASH compatible fragmented MP4, MPEG-DASH segmenting muxer
 -- H.261 RTP payload format (RFC 4587) depacketizer and experimental packetizer
 +version 3.2:
 +- libopenmpt demuxer
 +- tee protocol
 +- Changed metadata print option to accept general urls
 +- Alias muxer for Ogg Video (.ogv)
 +- VP8 in Ogg muxing
 +- curves filter doesn't automatically insert points at x=0 and x=1 anymore
 +- 16-bit support in curves filter and selectivecolor filter
 +- OpenH264 decoder wrapper
 +- MediaCodec H.264/HEVC/MPEG-4/VP8/VP9 hwaccel
 +- True Audio (TTA) muxer
 +- crystalizer audio filter
 +- acrusher audio filter
 +- bitplanenoise video filter
 +- floating point support in als decoder
 +- fifo muxer
 +- maskedclamp filter
 +- hysteresis filter
 +- lut2 filter
 +- yuvtestsrc filter
 +- CUDA CUVID H.263/VP8/VP9/10 bit HEVC (Dithered) Decoding
 +- vaguedenoiser filter
 +- added threads option per filter instance
 +- weave filter
 +- gblur filter
 +- avgblur filter
 +- sobel and prewitt filter
 +- MediaCodec HEVC/MPEG-4/VP8/VP9 decoding
 +- Meridian Lossless Packing (MLP) / TrueHD encoder
 +- Non-Local Means (nlmeans) denoising filter
 +- sdl2 output device and ffplay support
 +- sdl1 output device and sdl1 support removed
 +- extended mov edit list support
 +- libfaac encoder removed
 +- Matroska muxer now writes CRC32 elements by default in all Level 1 elements
 +- sidedata video and asidedata audio filter
 +- Changed mapping of rtp MIME type G726 to codec g726le.
 +- spec compliant VAAPI/DXVA2 VC-1 decoding of slices in frame-coded images
 +
 +
 +version 3.1:
 +- DXVA2-accelerated HEVC Main10 decoding
 +- fieldhint filter
 +- loop video filter and aloop audio filter
 +- Bob Weaver deinterlacing filter
 +- firequalizer filter
 +- datascope filter
 +- bench and abench filters
 +- ciescope filter
 +- protocol blacklisting API
 +- MediaCodec H264 decoding
 +- VC-2 HQ RTP payload format (draft v1) depacketizer and packetizer
 +- VP9 RTP payload format (draft v2) packetizer
 +- AudioToolbox audio decoders
 +- AudioToolbox audio encoders
 +- coreimage filter (GPU based image filtering on OSX)
 +- libdcadec removed
 +- bitstream filter for extracting DTS core
 +- ADPCM IMA DAT4 decoder
 +- musx demuxer
 +- aix demuxer
 +- remap filter
 +- hash and framehash muxers
 +- colorspace filter
 +- hdcd filter
 +- readvitc filter
 +- VAAPI-accelerated format conversion and scaling
 +- libnpp/CUDA-accelerated format conversion and scaling
 +- Duck TrueMotion 2.0 Real Time decoder
 +- Wideband Single-bit Data (WSD) demuxer
 +- VAAPI-accelerated H.264/HEVC/MJPEG encoding
 +- DTS Express (LBR) decoder
 +- Generic OpenMAX IL encoder with support for Raspberry Pi
 +- IFF ANIM demuxer & decoder
 +- Direct Stream Transfer (DST) decoder
 +- loudnorm filter
 +- MTAF demuxer and decoder
 +- MagicYUV decoder
 +- OpenExr improvements (tile data and B44/B44A support)
 +- BitJazz SheerVideo decoder
 +- CUDA CUVID H264/HEVC decoder
 +- 10-bit depth support in native utvideo decoder
 +- libutvideo wrapper removed
 +- YUY2 Lossless Codec decoder
 +- VideoToolbox H.264 encoder
 +
 +
 +version 3.0:
 +- Common Encryption (CENC) MP4 encoding and decoding support
 +- DXV decoding
 +- extrastereo filter
 +- ocr filter
 +- alimiter filter
 +- stereowiden filter
 +- stereotools filter
 +- rubberband filter
 +- tremolo filter
 +- agate filter
 +- chromakey filter
 +- maskedmerge filter
 +- Screenpresso SPV1 decoding
 +- chromaprint fingerprinting muxer
 +- ffplay dynamic volume control
 +- displace filter
 +- selectivecolor filter
 +- extensive native AAC encoder improvements and removal of experimental flag
 +- ADPCM PSX decoder
 +- 3dostr, dcstr, fsb, genh, vag, xvag, ads, msf, svag & vpk demuxer
 +- zscale filter
 +- wve demuxer
 +- zero-copy Intel QSV transcoding in ffmpeg
 +- shuffleframes filter
 +- SDX2 DPCM decoder
 +- vibrato filter
 +- innoHeim/Rsupport Screen Capture Codec decoder
 +- ADPCM AICA decoder
 +- Interplay ACM demuxer and audio decoder
 +- XMA1 & XMA2 decoder
 +- realtime filter
 +- anoisesrc audio filter source
 +- IVR demuxer
 +- compensationdelay filter
 +- acompressor filter
 +- support encoding 16-bit RLE SGI images
 +- apulsator filter
 +- sidechaingate audio filter
 +- mipsdspr1 option has been renamed to mipsdsp
 +- aemphasis filter
 +- mips32r5 option has been removed
 +- mips64r6 option has been removed
 +- DXVA2-accelerated VP9 decoding
 +- SOFAlizer: virtual binaural acoustics filter
 +- VAAPI VP9 hwaccel
 +- audio high-order multiband parametric equalizer
 +- automatic bitstream filtering
 +- showspectrumpic filter
 +- libstagefright support removed
 +- spectrumsynth filter
 +- ahistogram filter
 +- only seek with the right mouse button in ffplay
 +- toggle full screen when double-clicking with the left mouse button in ffplay
 +- afftfilt filter
 +- convolution filter
 +- libquvi support removed
 +- support for dvaudio in wav and avi
 +- libaacplus and libvo-aacenc support removed
 +- Cineform HD decoder
 +- new DCA decoder with full support for DTS-HD extensions
 +- significant performance improvements in Windows Television (WTV) demuxer
 +- nnedi deinterlacer
 +- streamselect video and astreamselect audio filter
 +- swaprect filter
 +- metadata video and ametadata audio filter
 +- SMPTE VC-2 HQ profile support for the Dirac decoder
 +- SMPTE VC-2 native encoder supporting the HQ profile
 +
 +
 +version 2.8:
 +- colorkey video filter
 +- BFSTM/BCSTM demuxer
 +- little-endian ADPCM_THP decoder
 +- Hap decoder and encoder
 +- DirectDraw Surface image/texture decoder
 +- ssim filter
 +- optional new ASF demuxer
 +- showvolume filter
 +- Many improvements to the JPEG 2000 decoder
 +- Go2Meeting decoding support
 +- adrawgraph audio and drawgraph video filter
 +- removegrain video filter
 +- Intel QSV-accelerated MPEG-2 video and HEVC encoding
 +- Intel QSV-accelerated MPEG-2 video and HEVC decoding
 +- Intel QSV-accelerated VC-1 video decoding
 +- libkvazaar HEVC encoder
 +- erosion, dilation, deflate and inflate video filters
 +- Dynamic Audio Normalizer as dynaudnorm filter
 +- Reverse video and areverse audio filter
 +- Random filter
 +- deband filter
 +- AAC fixed-point decoding
 +- sidechaincompress audio filter
 +- bitstream filter for converting HEVC from MP4 to Annex B
 +- acrossfade audio filter
 +- allyuv and allrgb video sources
 +- atadenoise video filter
 +- OS X VideoToolbox support
 +- aphasemeter filter
 +- showfreqs filter
 +- vectorscope filter
 +- waveform filter
 +- hstack and vstack filter
 +- Support DNx100 (1440x1080 at 8)
 +- VAAPI hevc hwaccel
 +- VDPAU hevc hwaccel
 +- framerate filter
 +- Switched default encoders for webm to VP9 and Opus
 +- Removed experimental flag from the JPEG 2000 encoder
 +
 +
 +version 2.7:
 +- FFT video filter
 +- TDSC decoder
 +- DTS lossless extension (XLL) decoding (not lossless, disabled by default)
 +- showwavespic filter
 +- DTS decoding through libdcadec
 +- Drop support for nvenc API before 5.0
 +- nvenc HEVC encoder
 +- Detelecine filter
 +- Intel QSV-accelerated H.264 encoding
 +- MMAL-accelerated H.264 decoding
 +- basic APNG encoder and muxer with default extension "apng"
 +- unpack DivX-style packed B-frames in MPEG-4 bitstream filter
 +- WebM Live Chunk Muxer
 +- nvenc level and tier options
 +- chorus filter
 +- Canopus HQ/HQA decoder
 +- Automatically rotate videos based on metadata in ffmpeg
 +- improved Quickdraw compatibility
 +- VP9 high bit-depth and extended colorspaces decoding support
 +- WebPAnimEncoder API when available for encoding and muxing WebP
 +- Direct3D11-accelerated decoding
 +- Support Secure Transport
 +- Multipart JPEG demuxer
 +
 +
 +version 2.6:
 +- nvenc encoder
 +- 10bit spp filter
 +- colorlevels filter
 +- RIFX format for *.wav files
  - RTP/mpegts muxer
 -- VP8 in Ogg demuxing
 +- non continuous cache protocol support
 +- tblend filter
 +- cropdetect support for non 8bpp, absolute (if limit >= 1) and relative (if limit < 1.0) threshold
 +- Camellia symmetric block cipher
  - OpenH264 encoder wrapper
 +- VOC seeking support
 +- Closed caption Decoder
 +- fspp, uspp, pp7 MPlayer postprocessing filters ported to native filters
 +- showpalette filter
 +- Twofish symmetric block cipher
  - Support DNx100 (960x720 at 8)
 -- Direct3D11-accelerated decoding
 +- eq2 filter ported from libmpcodecs as eq filter
 +- removed libmpcodecs
 +- Changed default DNxHD colour range in QuickTime .mov derivatives to mpeg range
 +- ported softpulldown filter from libmpcodecs as repeatfields filter
 +- dcshift filter
 +- RTP depacketizer for loss tolerant payload format for MP3 audio (RFC 5219)
 +- RTP depacketizer for AC3 payload format (RFC 4184)
 +- palettegen and paletteuse filters
 +- VP9 RTP payload format (draft 0) experimental depacketizer
 +- RTP depacketizer for DV (RFC 6469)
  - DXVA2-accelerated HEVC decoding
  - AAC ELD 480 decoding
  - Intel QSV-accelerated H.264 decoding
diff --cc configure
index f9a27b5019,a3cfe37680..d949fecbef
--- a/configure
+++ b/configure
@@@ -2783,8 -2277,8 +2784,9 @@@ omx_rpi_select="omx
  qsv_deps="libmfx"
  qsvdec_select="qsv"
  qsvenc_select="qsv"
+ qsvvpp_select="qsv"
  vaapi_encode_deps="vaapi"
 +v4l2_m2m_deps_any="linux_videodev2_h"
  
  hwupload_cuda_filter_deps="cuda"
  scale_npp_filter_deps="cuda libnpp"
@@@ -3197,111 -2533,34 +3199,115 @@@ cropdetect_filter_deps="gpl
  deinterlace_qsv_filter_deps="libmfx"
  deinterlace_vaapi_filter_deps="vaapi"
  delogo_filter_deps="gpl"
 +deshake_filter_select="pixelutils"
 +deshake_filter_suggest="opencl"
  drawtext_filter_deps="libfreetype"
 -drawtext_filter_suggest="libfontconfig"
 +drawtext_filter_suggest="libfontconfig libfribidi"
 +elbg_filter_deps="avcodec"
 +eq_filter_deps="gpl"
 +fftfilt_filter_deps="avcodec"
 +fftfilt_filter_select="rdft"
 +find_rect_filter_deps="avcodec avformat gpl"
 +firequalizer_filter_deps="avcodec"
 +firequalizer_filter_select="rdft"
 +flite_filter_deps="libflite"
 +framerate_filter_select="pixelutils"
  frei0r_filter_deps="frei0r libdl"
  frei0r_src_filter_deps="frei0r libdl"
 -hdcd_filter_deps="libhdcd"
 +fspp_filter_deps="gpl"
 +geq_filter_deps="gpl"
 +histeq_filter_deps="gpl"
  hqdn3d_filter_deps="gpl"
  interlace_filter_deps="gpl"
 +kerndeint_filter_deps="gpl"
 +ladspa_filter_deps="ladspa libdl"
 +mcdeint_filter_deps="avcodec gpl"
  movie_filter_deps="avcodec avformat"
 +mpdecimate_filter_deps="gpl"
 +mpdecimate_filter_select="pixelutils"
 +mptestsrc_filter_deps="gpl"
 +negate_filter_deps="lut_filter"
 +nnedi_filter_deps="gpl"
 +ocr_filter_deps="libtesseract"
  ocv_filter_deps="libopencv"
+ overlay_qsv_filter_deps="libmfx"
+ overlay_qsv_filter_select="qsvvpp"
 +owdenoise_filter_deps="gpl"
 +pan_filter_deps="swresample"
 +perspective_filter_deps="gpl"
 +phase_filter_deps="gpl"
 +pp7_filter_deps="gpl"
 +pp_filter_deps="gpl postproc"
 +pullup_filter_deps="gpl"
 +removelogo_filter_deps="avcodec avformat swscale"
 +repeatfields_filter_deps="gpl"
  resample_filter_deps="avresample"
 +rubberband_filter_deps="librubberband"
 +sab_filter_deps="gpl swscale"
 +scale2ref_filter_deps="swscale"
  scale_filter_deps="swscale"
  scale_qsv_filter_deps="libmfx"
 +select_filter_select="pixelutils"
 +showcqt_filter_deps="avcodec avformat swscale"
 +showcqt_filter_suggest="libfontconfig libfreetype"
 +showcqt_filter_select="fft"
 +showfreqs_filter_deps="avcodec"
 +showfreqs_filter_select="fft"
 +showspectrum_filter_deps="avcodec"
 +showspectrum_filter_select="fft"
 +showspectrumpic_filter_deps="avcodec"
 +showspectrumpic_filter_select="fft"
 +signature_filter_deps="gpl avcodec avformat"
 +smartblur_filter_deps="gpl swscale"
 +sofalizer_filter_deps="libmysofa avcodec"
 +sofalizer_filter_select="fft"
 +spectrumsynth_filter_deps="avcodec"
 +spectrumsynth_filter_select="fft"
 +spp_filter_deps="gpl avcodec"
 +spp_filter_select="fft idctdsp fdctdsp me_cmp pixblockdsp"
 +stereo3d_filter_deps="gpl"
 +subtitles_filter_deps="avformat avcodec libass"
 +super2xsai_filter_deps="gpl"
 +pixfmts_super2xsai_test_deps="super2xsai_filter"
 +tinterlace_filter_deps="gpl"
 +tinterlace_merge_test_deps="tinterlace_filter"
 +tinterlace_pad_test_deps="tinterlace_filter"
 +tonemap_filter_deps="const_nan"
 +uspp_filter_deps="gpl avcodec"
 +unsharp_filter_suggest="opencl"
 +vaguedenoiser_filter_deps="gpl"
 +vidstabdetect_filter_deps="libvidstab"
 +vidstabtransform_filter_deps="libvidstab"
 +libvmaf_filter_deps="libvmaf"
 +zmq_filter_deps="libzmq"
 +zoompan_filter_deps="swscale"
 +zscale_filter_deps="libzimg const_nan"
  scale_vaapi_filter_deps="vaapi VAProcPipelineParameterBuffer"
+ vpp_qsv_filter_deps="libmfx"
+ vpp_qsv_filter_select="qsvvpp"
  
  # examples
 +avio_dir_cmd_deps="avformat avutil"
 +avio_reading_deps="avformat avcodec avutil"
  decode_audio_example_deps="avcodec avutil"
  decode_video_example_deps="avcodec avutil"
 +demuxing_decoding_example_deps="avcodec avformat avutil"
  encode_audio_example_deps="avcodec avutil"
  encode_video_example_deps="avcodec avutil"
 +extract_mvs_example_deps="avcodec avformat avutil"
  filter_audio_example_deps="avfilter avutil"
 +filtering_audio_example_deps="avfilter avcodec avformat avutil"
 +filtering_video_example_deps="avfilter avcodec avformat avutil"
 +http_multiclient_example_deps="avformat avutil fork"
 +hw_decode_example_deps="avcodec avformat avutil"
  metadata_example_deps="avformat avutil"
 -output_example_deps="avcodec avformat avresample avutil swscale"
 +muxing_example_deps="avcodec avformat avutil swscale"
  qsvdec_example_deps="avcodec avutil libmfx h264_qsv_decoder"
 -transcode_aac_example_deps="avcodec avformat avresample"
 +remuxing_example_deps="avcodec avformat avutil"
 +resampling_audio_example_deps="avutil swresample"
 +scaling_video_example_deps="avutil swscale"
 +transcode_aac_example_deps="avcodec avformat swresample"
 +transcoding_example_deps="avfilter avcodec avformat avutil"
  
  # EXTRALIBS_LIST
  cpu_init_extralibs="pthreads_extralibs"
diff --cc libavfilter/Makefile
index 9873532d07,8277626770..b7ddcd226d
--- a/libavfilter/Makefile
+++ b/libavfilter/Makefile
@@@ -25,55 -20,19 +25,58 @@@ OBJS = allfilters.
  
  OBJS-$(HAVE_THREADS)                         += pthread.o
  
+ # subsystems
+ OBJS-$(CONFIG_QSVVPP)                        += qsvvpp.o
+ 
  # audio filters
 +OBJS-$(CONFIG_ABENCH_FILTER)                 += f_bench.o
 +OBJS-$(CONFIG_ACOMPRESSOR_FILTER)            += af_sidechaincompress.o
 +OBJS-$(CONFIG_ACOPY_FILTER)                  += af_acopy.o
 +OBJS-$(CONFIG_ACROSSFADE_FILTER)             += af_afade.o
 +OBJS-$(CONFIG_ACRUSHER_FILTER)               += af_acrusher.o
 +OBJS-$(CONFIG_ADELAY_FILTER)                 += af_adelay.o
 +OBJS-$(CONFIG_AECHO_FILTER)                  += af_aecho.o
 +OBJS-$(CONFIG_AEMPHASIS_FILTER)              += af_aemphasis.o
 +OBJS-$(CONFIG_AEVAL_FILTER)                  += aeval.o
 +OBJS-$(CONFIG_AFADE_FILTER)                  += af_afade.o
 +OBJS-$(CONFIG_AFFTFILT_FILTER)               += af_afftfilt.o
 +OBJS-$(CONFIG_AFIR_FILTER)                   += af_afir.o
  OBJS-$(CONFIG_AFORMAT_FILTER)                += af_aformat.o
 +OBJS-$(CONFIG_AGATE_FILTER)                  += af_agate.o
 +OBJS-$(CONFIG_AINTERLEAVE_FILTER)            += f_interleave.o
 +OBJS-$(CONFIG_ALIMITER_FILTER)               += af_alimiter.o
 +OBJS-$(CONFIG_ALLPASS_FILTER)                += af_biquads.o
 +OBJS-$(CONFIG_ALOOP_FILTER)                  += f_loop.o
 +OBJS-$(CONFIG_AMERGE_FILTER)                 += af_amerge.o
 +OBJS-$(CONFIG_AMETADATA_FILTER)              += f_metadata.o
  OBJS-$(CONFIG_AMIX_FILTER)                   += af_amix.o
 +OBJS-$(CONFIG_ANEQUALIZER_FILTER)            += af_anequalizer.o
  OBJS-$(CONFIG_ANULL_FILTER)                  += af_anull.o
 +OBJS-$(CONFIG_APAD_FILTER)                   += af_apad.o
 +OBJS-$(CONFIG_APERMS_FILTER)                 += f_perms.o
 +OBJS-$(CONFIG_APHASER_FILTER)                += af_aphaser.o generate_wave_table.o
 +OBJS-$(CONFIG_APULSATOR_FILTER)              += af_apulsator.o
 +OBJS-$(CONFIG_AREALTIME_FILTER)              += f_realtime.o
 +OBJS-$(CONFIG_ARESAMPLE_FILTER)              += af_aresample.o
 +OBJS-$(CONFIG_AREVERSE_FILTER)               += f_reverse.o
 +OBJS-$(CONFIG_ASELECT_FILTER)                += f_select.o
 +OBJS-$(CONFIG_ASENDCMD_FILTER)               += f_sendcmd.o
 +OBJS-$(CONFIG_ASETNSAMPLES_FILTER)           += af_asetnsamples.o
  OBJS-$(CONFIG_ASETPTS_FILTER)                += setpts.o
 +OBJS-$(CONFIG_ASETRATE_FILTER)               += af_asetrate.o
  OBJS-$(CONFIG_ASETTB_FILTER)                 += settb.o
  OBJS-$(CONFIG_ASHOWINFO_FILTER)              += af_ashowinfo.o
 +OBJS-$(CONFIG_ASIDEDATA_FILTER)              += f_sidedata.o
  OBJS-$(CONFIG_ASPLIT_FILTER)                 += split.o
 -OBJS-$(CONFIG_ASYNCTS_FILTER)                += af_asyncts.o
 +OBJS-$(CONFIG_ASTATS_FILTER)                 += af_astats.o
 +OBJS-$(CONFIG_ASTREAMSELECT_FILTER)          += f_streamselect.o framesync.o
 +OBJS-$(CONFIG_ATEMPO_FILTER)                 += af_atempo.o
  OBJS-$(CONFIG_ATRIM_FILTER)                  += trim.o
 +OBJS-$(CONFIG_AZMQ_FILTER)                   += f_zmq.o
 +OBJS-$(CONFIG_BANDPASS_FILTER)               += af_biquads.o
 +OBJS-$(CONFIG_BANDREJECT_FILTER)             += af_biquads.o
 +OBJS-$(CONFIG_BASS_FILTER)                   += af_biquads.o
 +OBJS-$(CONFIG_BIQUAD_FILTER)                 += af_biquads.o
  OBJS-$(CONFIG_BS2B_FILTER)                   += af_bs2b.o
  OBJS-$(CONFIG_CHANNELMAP_FILTER)             += af_channelmap.o
  OBJS-$(CONFIG_CHANNELSPLIT_FILTER)           += af_channelsplit.o
@@@ -211,188 -69,47 +214,192 @@@ OBJS-$(CONFIG_HWDOWNLOAD_FILTER
  OBJS-$(CONFIG_HWMAP_FILTER)                  += vf_hwmap.o
  OBJS-$(CONFIG_HWUPLOAD_CUDA_FILTER)          += vf_hwupload_cuda.o
  OBJS-$(CONFIG_HWUPLOAD_FILTER)               += vf_hwupload.o
 +OBJS-$(CONFIG_HYSTERESIS_FILTER)             += vf_hysteresis.o framesync.o
 +OBJS-$(CONFIG_IDET_FILTER)                   += vf_idet.o
 +OBJS-$(CONFIG_IL_FILTER)                     += vf_il.o
 +OBJS-$(CONFIG_INFLATE_FILTER)                += vf_neighbor.o
  OBJS-$(CONFIG_INTERLACE_FILTER)              += vf_interlace.o
 +OBJS-$(CONFIG_INTERLEAVE_FILTER)             += f_interleave.o
 +OBJS-$(CONFIG_KERNDEINT_FILTER)              += vf_kerndeint.o
 +OBJS-$(CONFIG_LENSCORRECTION_FILTER)         += vf_lenscorrection.o
 +OBJS-$(CONFIG_LIBVMAF_FILTER)                += vf_libvmaf.o framesync.o
 +OBJS-$(CONFIG_LIMITER_FILTER)                += vf_limiter.o
 +OBJS-$(CONFIG_LOOP_FILTER)                   += f_loop.o
 +OBJS-$(CONFIG_LUMAKEY_FILTER)                += vf_lumakey.o
  OBJS-$(CONFIG_LUT_FILTER)                    += vf_lut.o
 +OBJS-$(CONFIG_LUT2_FILTER)                   += vf_lut2.o framesync.o
 +OBJS-$(CONFIG_LUT3D_FILTER)                  += vf_lut3d.o
  OBJS-$(CONFIG_LUTRGB_FILTER)                 += vf_lut.o
  OBJS-$(CONFIG_LUTYUV_FILTER)                 += vf_lut.o
 +OBJS-$(CONFIG_MASKEDCLAMP_FILTER)            += vf_maskedclamp.o framesync.o
 +OBJS-$(CONFIG_MASKEDMERGE_FILTER)            += vf_maskedmerge.o framesync.o
 +OBJS-$(CONFIG_MCDEINT_FILTER)                += vf_mcdeint.o
 +OBJS-$(CONFIG_MERGEPLANES_FILTER)            += vf_mergeplanes.o framesync.o
 +OBJS-$(CONFIG_MESTIMATE_FILTER)              += vf_mestimate.o motion_estimation.o
 +OBJS-$(CONFIG_METADATA_FILTER)               += f_metadata.o
 +OBJS-$(CONFIG_MIDEQUALIZER_FILTER)           += vf_midequalizer.o framesync.o
 +OBJS-$(CONFIG_MINTERPOLATE_FILTER)           += vf_minterpolate.o motion_estimation.o
 +OBJS-$(CONFIG_MPDECIMATE_FILTER)             += vf_mpdecimate.o
  OBJS-$(CONFIG_NEGATE_FILTER)                 += vf_lut.o
 +OBJS-$(CONFIG_NLMEANS_FILTER)                += vf_nlmeans.o
 +OBJS-$(CONFIG_NNEDI_FILTER)                  += vf_nnedi.o
  OBJS-$(CONFIG_NOFORMAT_FILTER)               += vf_format.o
 +OBJS-$(CONFIG_NOISE_FILTER)                  += vf_noise.o
  OBJS-$(CONFIG_NULL_FILTER)                   += vf_null.o
 +OBJS-$(CONFIG_OCR_FILTER)                    += vf_ocr.o
  OBJS-$(CONFIG_OCV_FILTER)                    += vf_libopencv.o
 -OBJS-$(CONFIG_OVERLAY_FILTER)                += vf_overlay.o
 +OBJS-$(CONFIG_OPENCL)                        += deshake_opencl.o unsharp_opencl.o
 +OBJS-$(CONFIG_OSCILLOSCOPE_FILTER)           += vf_datascope.o
 +OBJS-$(CONFIG_OVERLAY_FILTER)                += vf_overlay.o framesync.o
+ OBJS-$(CONFIG_OVERLAY_QSV_FILTER)            += vf_overlay_qsv.o
 +OBJS-$(CONFIG_OWDENOISE_FILTER)              += vf_owdenoise.o
  OBJS-$(CONFIG_PAD_FILTER)                    += vf_pad.o
 +OBJS-$(CONFIG_PALETTEGEN_FILTER)             += vf_palettegen.o
 +OBJS-$(CONFIG_PALETTEUSE_FILTER)             += vf_paletteuse.o framesync.o
 +OBJS-$(CONFIG_PERMS_FILTER)                  += f_perms.o
 +OBJS-$(CONFIG_PERSPECTIVE_FILTER)            += vf_perspective.o
 +OBJS-$(CONFIG_PHASE_FILTER)                  += vf_phase.o
  OBJS-$(CONFIG_PIXDESCTEST_FILTER)            += vf_pixdesctest.o
 -OBJS-$(CONFIG_SCALE_FILTER)                  += vf_scale.o
 -OBJS-$(CONFIG_SCALE_NPP_FILTER)              += vf_scale_npp.o
 +OBJS-$(CONFIG_PIXSCOPE_FILTER)               += vf_datascope.o
 +OBJS-$(CONFIG_PP_FILTER)                     += vf_pp.o
 +OBJS-$(CONFIG_PP7_FILTER)                    += vf_pp7.o
 +OBJS-$(CONFIG_PREMULTIPLY_FILTER)            += vf_premultiply.o framesync.o
 +OBJS-$(CONFIG_PREWITT_FILTER)                += vf_convolution.o
 +OBJS-$(CONFIG_PSEUDOCOLOR_FILTER)            += vf_pseudocolor.o
 +OBJS-$(CONFIG_PSNR_FILTER)                   += vf_psnr.o framesync.o
 +OBJS-$(CONFIG_PULLUP_FILTER)                 += vf_pullup.o
 +OBJS-$(CONFIG_QP_FILTER)                     += vf_qp.o
 +OBJS-$(CONFIG_RANDOM_FILTER)                 += vf_random.o
 +OBJS-$(CONFIG_READEIA608_FILTER)             += vf_readeia608.o
 +OBJS-$(CONFIG_READVITC_FILTER)               += vf_readvitc.o
 +OBJS-$(CONFIG_REALTIME_FILTER)               += f_realtime.o
 +OBJS-$(CONFIG_REMAP_FILTER)                  += vf_remap.o framesync.o
 +OBJS-$(CONFIG_REMOVEGRAIN_FILTER)            += vf_removegrain.o
 +OBJS-$(CONFIG_REMOVELOGO_FILTER)             += bbox.o lswsutils.o lavfutils.o vf_removelogo.o
 +OBJS-$(CONFIG_REPEATFIELDS_FILTER)           += vf_repeatfields.o
 +OBJS-$(CONFIG_REVERSE_FILTER)                += f_reverse.o
 +OBJS-$(CONFIG_ROBERTS_FILTER)                += vf_convolution.o
 +OBJS-$(CONFIG_ROTATE_FILTER)                 += vf_rotate.o
 +OBJS-$(CONFIG_SAB_FILTER)                    += vf_sab.o
 +OBJS-$(CONFIG_SCALE_FILTER)                  += vf_scale.o scale.o
 +OBJS-$(CONFIG_SCALE_CUDA_FILTER)             += vf_scale_cuda.o vf_scale_cuda.ptx.o
 +OBJS-$(CONFIG_SCALE_NPP_FILTER)              += vf_scale_npp.o scale.o
  OBJS-$(CONFIG_SCALE_QSV_FILTER)              += vf_scale_qsv.o
 -OBJS-$(CONFIG_SCALE_VAAPI_FILTER)            += vf_scale_vaapi.o
 -OBJS-$(CONFIG_SELECT_FILTER)                 += vf_select.o
 +OBJS-$(CONFIG_SCALE_VAAPI_FILTER)            += vf_scale_vaapi.o scale.o
 +OBJS-$(CONFIG_SCALE2REF_FILTER)              += vf_scale.o scale.o
 +OBJS-$(CONFIG_SELECT_FILTER)                 += f_select.o
 +OBJS-$(CONFIG_SELECTIVECOLOR_FILTER)         += vf_selectivecolor.o
 +OBJS-$(CONFIG_SENDCMD_FILTER)                += f_sendcmd.o
 +OBJS-$(CONFIG_SEPARATEFIELDS_FILTER)         += vf_separatefields.o
  OBJS-$(CONFIG_SETDAR_FILTER)                 += vf_aspect.o
 +OBJS-$(CONFIG_SETFIELD_FILTER)               += vf_setfield.o
  OBJS-$(CONFIG_SETPTS_FILTER)                 += setpts.o
  OBJS-$(CONFIG_SETSAR_FILTER)                 += vf_aspect.o
  OBJS-$(CONFIG_SETTB_FILTER)                  += settb.o
  OBJS-$(CONFIG_SHOWINFO_FILTER)               += vf_showinfo.o
 +OBJS-$(CONFIG_SHOWPALETTE_FILTER)            += vf_showpalette.o
 +OBJS-$(CONFIG_SHUFFLEFRAMES_FILTER)          += vf_shuffleframes.o
  OBJS-$(CONFIG_SHUFFLEPLANES_FILTER)          += vf_shuffleplanes.o
 +OBJS-$(CONFIG_SIDEDATA_FILTER)               += f_sidedata.o
 +OBJS-$(CONFIG_SIGNALSTATS_FILTER)            += vf_signalstats.o
 +OBJS-$(CONFIG_SIGNATURE_FILTER)              += vf_signature.o
 +OBJS-$(CONFIG_SMARTBLUR_FILTER)              += vf_smartblur.o
 +OBJS-$(CONFIG_SOBEL_FILTER)                  += vf_convolution.o
  OBJS-$(CONFIG_SPLIT_FILTER)                  += split.o
 +OBJS-$(CONFIG_SPP_FILTER)                    += vf_spp.o
 +OBJS-$(CONFIG_SSIM_FILTER)                   += vf_ssim.o framesync.o
 +OBJS-$(CONFIG_STEREO3D_FILTER)               += vf_stereo3d.o
 +OBJS-$(CONFIG_STREAMSELECT_FILTER)           += f_streamselect.o framesync.o
 +OBJS-$(CONFIG_SUBTITLES_FILTER)              += vf_subtitles.o
 +OBJS-$(CONFIG_SUPER2XSAI_FILTER)             += vf_super2xsai.o
 +OBJS-$(CONFIG_SWAPRECT_FILTER)               += vf_swaprect.o
 +OBJS-$(CONFIG_SWAPUV_FILTER)                 += vf_swapuv.o
 +OBJS-$(CONFIG_TBLEND_FILTER)                 += vf_blend.o framesync.o
 +OBJS-$(CONFIG_TELECINE_FILTER)               += vf_telecine.o
 +OBJS-$(CONFIG_THRESHOLD_FILTER)              += vf_threshold.o framesync.o
 +OBJS-$(CONFIG_THUMBNAIL_FILTER)              += vf_thumbnail.o
 +OBJS-$(CONFIG_THUMBNAIL_CUDA_FILTER)         += vf_thumbnail_cuda.o vf_thumbnail_cuda.ptx.o
 +OBJS-$(CONFIG_TILE_FILTER)                   += vf_tile.o
 +OBJS-$(CONFIG_TINTERLACE_FILTER)             += vf_tinterlace.o
 +OBJS-$(CONFIG_TLUT2_FILTER)                  += vf_lut2.o framesync.o
 +OBJS-$(CONFIG_TONEMAP_FILTER)                += vf_tonemap.o
  OBJS-$(CONFIG_TRANSPOSE_FILTER)              += vf_transpose.o
  OBJS-$(CONFIG_TRIM_FILTER)                   += trim.o
 +OBJS-$(CONFIG_UNPREMULTIPLY_FILTER)          += vf_premultiply.o framesync.o
  OBJS-$(CONFIG_UNSHARP_FILTER)                += vf_unsharp.o
 +OBJS-$(CONFIG_USPP_FILTER)                   += vf_uspp.o
 +OBJS-$(CONFIG_VAGUEDENOISER_FILTER)          += vf_vaguedenoiser.o
 +OBJS-$(CONFIG_VECTORSCOPE_FILTER)            += vf_vectorscope.o
  OBJS-$(CONFIG_VFLIP_FILTER)                  += vf_vflip.o
 +OBJS-$(CONFIG_VIDSTABDETECT_FILTER)          += vidstabutils.o vf_vidstabdetect.o
 +OBJS-$(CONFIG_VIDSTABTRANSFORM_FILTER)       += vidstabutils.o vf_vidstabtransform.o
 +OBJS-$(CONFIG_VIGNETTE_FILTER)               += vf_vignette.o
 +OBJS-$(CONFIG_VMAFMOTION_FILTER)             += vf_vmafmotion.o framesync.o
+ OBJS-$(CONFIG_VPP_QSV_FILTER)                += vf_vpp_qsv.o
 +OBJS-$(CONFIG_VSTACK_FILTER)                 += vf_stack.o framesync.o
 +OBJS-$(CONFIG_W3FDIF_FILTER)                 += vf_w3fdif.o
 +OBJS-$(CONFIG_WAVEFORM_FILTER)               += vf_waveform.o
 +OBJS-$(CONFIG_WEAVE_FILTER)                  += vf_weave.o
 +OBJS-$(CONFIG_XBR_FILTER)                    += vf_xbr.o
  OBJS-$(CONFIG_YADIF_FILTER)                  += vf_yadif.o
 +OBJS-$(CONFIG_ZMQ_FILTER)                    += f_zmq.o
 +OBJS-$(CONFIG_ZOOMPAN_FILTER)                += vf_zoompan.o
 +OBJS-$(CONFIG_ZSCALE_FILTER)                 += vf_zscale.o
  
 -OBJS-$(CONFIG_NULLSINK_FILTER)               += vsink_nullsink.o
 -
 -OBJS-$(CONFIG_COLOR_FILTER)                  += vsrc_color.o
 +OBJS-$(CONFIG_ALLRGB_FILTER)                 += vsrc_testsrc.o
 +OBJS-$(CONFIG_ALLYUV_FILTER)                 += vsrc_testsrc.o
 +OBJS-$(CONFIG_CELLAUTO_FILTER)               += vsrc_cellauto.o
 +OBJS-$(CONFIG_COLOR_FILTER)                  += vsrc_testsrc.o
 +OBJS-$(CONFIG_COREIMAGESRC_FILTER)           += vf_coreimage.o
  OBJS-$(CONFIG_FREI0R_SRC_FILTER)             += vf_frei0r.o
 -OBJS-$(CONFIG_MOVIE_FILTER)                  += vsrc_movie.o
 -OBJS-$(CONFIG_NULLSRC_FILTER)                += vsrc_nullsrc.o
 +OBJS-$(CONFIG_HALDCLUTSRC_FILTER)            += vsrc_testsrc.o
 +OBJS-$(CONFIG_LIFE_FILTER)                   += vsrc_life.o
 +OBJS-$(CONFIG_MANDELBROT_FILTER)             += vsrc_mandelbrot.o
 +OBJS-$(CONFIG_MPTESTSRC_FILTER)              += vsrc_mptestsrc.o
 +OBJS-$(CONFIG_NULLSRC_FILTER)                += vsrc_testsrc.o
  OBJS-$(CONFIG_RGBTESTSRC_FILTER)             += vsrc_testsrc.o
 +OBJS-$(CONFIG_SMPTEBARS_FILTER)              += vsrc_testsrc.o
 +OBJS-$(CONFIG_SMPTEHDBARS_FILTER)            += vsrc_testsrc.o
  OBJS-$(CONFIG_TESTSRC_FILTER)                += vsrc_testsrc.o
 +OBJS-$(CONFIG_TESTSRC2_FILTER)               += vsrc_testsrc.o
 +OBJS-$(CONFIG_YUVTESTSRC_FILTER)             += vsrc_testsrc.o
 +
 +OBJS-$(CONFIG_NULLSINK_FILTER)               += vsink_nullsink.o
 +
 +# multimedia filters
 +OBJS-$(CONFIG_ABITSCOPE_FILTER)              += avf_abitscope.o
 +OBJS-$(CONFIG_ADRAWGRAPH_FILTER)             += f_drawgraph.o
 +OBJS-$(CONFIG_AHISTOGRAM_FILTER)             += avf_ahistogram.o
 +OBJS-$(CONFIG_APHASEMETER_FILTER)            += avf_aphasemeter.o
 +OBJS-$(CONFIG_AVECTORSCOPE_FILTER)           += avf_avectorscope.o
 +OBJS-$(CONFIG_CONCAT_FILTER)                 += avf_concat.o
 +OBJS-$(CONFIG_SHOWCQT_FILTER)                += avf_showcqt.o lswsutils.o lavfutils.o
 +OBJS-$(CONFIG_SHOWFREQS_FILTER)              += avf_showfreqs.o
 +OBJS-$(CONFIG_SHOWSPECTRUM_FILTER)           += avf_showspectrum.o
 +OBJS-$(CONFIG_SHOWSPECTRUMPIC_FILTER)        += avf_showspectrum.o
 +OBJS-$(CONFIG_SHOWVOLUME_FILTER)             += avf_showvolume.o
 +OBJS-$(CONFIG_SHOWWAVES_FILTER)              += avf_showwaves.o
 +OBJS-$(CONFIG_SHOWWAVESPIC_FILTER)           += avf_showwaves.o
 +OBJS-$(CONFIG_SPECTRUMSYNTH_FILTER)          += vaf_spectrumsynth.o
 +
 +# multimedia sources
 +OBJS-$(CONFIG_AMOVIE_FILTER)                 += src_movie.o
 +OBJS-$(CONFIG_MOVIE_FILTER)                  += src_movie.o
 +
 +# Windows resource file
 +SLIBOBJS-$(HAVE_GNU_WINDRES)                 += avfilterres.o
 +
 +SKIPHEADERS-$(CONFIG_LIBVIDSTAB)             += vidstabutils.h
 +SKIPHEADERS-$(CONFIG_OPENCL)                 += opencl_internal.h deshake_opencl_kernel.h unsharp_opencl_kernel.h
 +
 +OBJS-$(CONFIG_SHARED)                        += log2_tab.o
  
+ SKIPHEADERS-$(CONFIG_QSVVPP)                 += qsvvpp.h
+ 
  TOOLS     = graph2dot
 -TESTPROGS = filtfmts
 +TESTPROGS = drawutils filtfmts formats integral
 +
 +TOOLS-$(CONFIG_LIBZMQ) += zmqsend
 +
 +clean::
 +	$(RM) $(CLEANSUFFIXES:%=libavfilter/libmpcodecs/%)
diff --cc libavfilter/allfilters.c
index 9b672a7a7e,2b3a67244e..3647a111ec
--- a/libavfilter/allfilters.c
+++ b/libavfilter/allfilters.c
@@@ -224,73 -89,19 +224,74 @@@ static void register_all(void
      REGISTER_FILTER(HWMAP,          hwmap,          vf);
      REGISTER_FILTER(HWUPLOAD,       hwupload,       vf);
      REGISTER_FILTER(HWUPLOAD_CUDA,  hwupload_cuda,  vf);
 +    REGISTER_FILTER(HYSTERESIS,     hysteresis,     vf);
 +    REGISTER_FILTER(IDET,           idet,           vf);
 +    REGISTER_FILTER(IL,             il,             vf);
 +    REGISTER_FILTER(INFLATE,        inflate,        vf);
      REGISTER_FILTER(INTERLACE,      interlace,      vf);
 +    REGISTER_FILTER(INTERLEAVE,     interleave,     vf);
 +    REGISTER_FILTER(KERNDEINT,      kerndeint,      vf);
 +    REGISTER_FILTER(LENSCORRECTION, lenscorrection, vf);
 +    REGISTER_FILTER(LIBVMAF,        libvmaf,        vf);
 +    REGISTER_FILTER(LIMITER,        limiter,        vf);
 +    REGISTER_FILTER(LOOP,           loop,           vf);
 +    REGISTER_FILTER(LUMAKEY,        lumakey,        vf);
      REGISTER_FILTER(LUT,            lut,            vf);
 +    REGISTER_FILTER(LUT2,           lut2,           vf);
 +    REGISTER_FILTER(LUT3D,          lut3d,          vf);
      REGISTER_FILTER(LUTRGB,         lutrgb,         vf);
      REGISTER_FILTER(LUTYUV,         lutyuv,         vf);
 +    REGISTER_FILTER(MASKEDCLAMP,    maskedclamp,    vf);
 +    REGISTER_FILTER(MASKEDMERGE,    maskedmerge,    vf);
 +    REGISTER_FILTER(MCDEINT,        mcdeint,        vf);
 +    REGISTER_FILTER(MERGEPLANES,    mergeplanes,    vf);
 +    REGISTER_FILTER(MESTIMATE,      mestimate,      vf);
 +    REGISTER_FILTER(METADATA,       metadata,       vf);
 +    REGISTER_FILTER(MIDEQUALIZER,   midequalizer,   vf);
 +    REGISTER_FILTER(MINTERPOLATE,   minterpolate,   vf);
 +    REGISTER_FILTER(MPDECIMATE,     mpdecimate,     vf);
      REGISTER_FILTER(NEGATE,         negate,         vf);
 +    REGISTER_FILTER(NLMEANS,        nlmeans,        vf);
 +    REGISTER_FILTER(NNEDI,          nnedi,          vf);
      REGISTER_FILTER(NOFORMAT,       noformat,       vf);
 +    REGISTER_FILTER(NOISE,          noise,          vf);
      REGISTER_FILTER(NULL,           null,           vf);
 +    REGISTER_FILTER(OCR,            ocr,            vf);
      REGISTER_FILTER(OCV,            ocv,            vf);
 +    REGISTER_FILTER(OSCILLOSCOPE,   oscilloscope,   vf);
      REGISTER_FILTER(OVERLAY,        overlay,        vf);
+     REGISTER_FILTER(OVERLAY_QSV,    overlay_qsv,    vf);
 +    REGISTER_FILTER(OWDENOISE,      owdenoise,      vf);
      REGISTER_FILTER(PAD,            pad,            vf);
 +    REGISTER_FILTER(PALETTEGEN,     palettegen,     vf);
 +    REGISTER_FILTER(PALETTEUSE,     paletteuse,     vf);
 +    REGISTER_FILTER(PERMS,          perms,          vf);
 +    REGISTER_FILTER(PERSPECTIVE,    perspective,    vf);
 +    REGISTER_FILTER(PHASE,          phase,          vf);
      REGISTER_FILTER(PIXDESCTEST,    pixdesctest,    vf);
 +    REGISTER_FILTER(PIXSCOPE,       pixscope,       vf);
 +    REGISTER_FILTER(PP,             pp,             vf);
 +    REGISTER_FILTER(PP7,            pp7,            vf);
 +    REGISTER_FILTER(PREMULTIPLY,    premultiply,    vf);
 +    REGISTER_FILTER(PREWITT,        prewitt,        vf);
 +    REGISTER_FILTER(PSEUDOCOLOR,    pseudocolor,    vf);
 +    REGISTER_FILTER(PSNR,           psnr,           vf);
 +    REGISTER_FILTER(PULLUP,         pullup,         vf);
 +    REGISTER_FILTER(QP,             qp,             vf);
 +    REGISTER_FILTER(RANDOM,         random,         vf);
 +    REGISTER_FILTER(READEIA608,     readeia608,     vf);
 +    REGISTER_FILTER(READVITC,       readvitc,       vf);
 +    REGISTER_FILTER(REALTIME,       realtime,       vf);
 +    REGISTER_FILTER(REMAP,          remap,          vf);
 +    REGISTER_FILTER(REMOVEGRAIN,    removegrain,    vf);
 +    REGISTER_FILTER(REMOVELOGO,     removelogo,     vf);
 +    REGISTER_FILTER(REPEATFIELDS,   repeatfields,   vf);
 +    REGISTER_FILTER(REVERSE,        reverse,        vf);
 +    REGISTER_FILTER(ROBERTS,        roberts,        vf);
 +    REGISTER_FILTER(ROTATE,         rotate,         vf);
 +    REGISTER_FILTER(SAB,            sab,            vf);
      REGISTER_FILTER(SCALE,          scale,          vf);
 +    REGISTER_FILTER(SCALE_CUDA,     scale_cuda,     vf);
      REGISTER_FILTER(SCALE_NPP,      scale_npp,      vf);
      REGISTER_FILTER(SCALE_QSV,      scale_qsv,      vf);
      REGISTER_FILTER(SCALE_VAAPI,    scale_vaapi,    vf);
@@@ -305,71 -111,21 +306,72 @@@
      REGISTER_FILTER(SETSAR,         setsar,         vf);
      REGISTER_FILTER(SETTB,          settb,          vf);
      REGISTER_FILTER(SHOWINFO,       showinfo,       vf);
 +    REGISTER_FILTER(SHOWPALETTE,    showpalette,    vf);
 +    REGISTER_FILTER(SHUFFLEFRAMES,  shuffleframes,  vf);
      REGISTER_FILTER(SHUFFLEPLANES,  shuffleplanes,  vf);
 +    REGISTER_FILTER(SIDEDATA,       sidedata,       vf);
 +    REGISTER_FILTER(SIGNALSTATS,    signalstats,    vf);
 +    REGISTER_FILTER(SIGNATURE,      signature,      vf);
 +    REGISTER_FILTER(SMARTBLUR,      smartblur,      vf);
 +    REGISTER_FILTER(SOBEL,          sobel,          vf);
      REGISTER_FILTER(SPLIT,          split,          vf);
 +    REGISTER_FILTER(SPP,            spp,            vf);
 +    REGISTER_FILTER(SSIM,           ssim,           vf);
 +    REGISTER_FILTER(STEREO3D,       stereo3d,       vf);
 +    REGISTER_FILTER(STREAMSELECT,   streamselect,   vf);
 +    REGISTER_FILTER(SUBTITLES,      subtitles,      vf);
 +    REGISTER_FILTER(SUPER2XSAI,     super2xsai,     vf);
 +    REGISTER_FILTER(SWAPRECT,       swaprect,       vf);
 +    REGISTER_FILTER(SWAPUV,         swapuv,         vf);
 +    REGISTER_FILTER(TBLEND,         tblend,         vf);
 +    REGISTER_FILTER(TELECINE,       telecine,       vf);
 +    REGISTER_FILTER(THRESHOLD,      threshold,      vf);
 +    REGISTER_FILTER(THUMBNAIL,      thumbnail,      vf);
 +    REGISTER_FILTER(THUMBNAIL_CUDA, thumbnail_cuda, vf);
 +    REGISTER_FILTER(TILE,           tile,           vf);
 +    REGISTER_FILTER(TINTERLACE,     tinterlace,     vf);
 +    REGISTER_FILTER(TLUT2,          tlut2,          vf);
 +    REGISTER_FILTER(TONEMAP,        tonemap,        vf);
      REGISTER_FILTER(TRANSPOSE,      transpose,      vf);
      REGISTER_FILTER(TRIM,           trim,           vf);
 +    REGISTER_FILTER(UNPREMULTIPLY,  unpremultiply,  vf);
      REGISTER_FILTER(UNSHARP,        unsharp,        vf);
 +    REGISTER_FILTER(USPP,           uspp,           vf);
 +    REGISTER_FILTER(VAGUEDENOISER,  vaguedenoiser,  vf);
 +    REGISTER_FILTER(VECTORSCOPE,    vectorscope,    vf);
      REGISTER_FILTER(VFLIP,          vflip,          vf);
 +    REGISTER_FILTER(VIDSTABDETECT,  vidstabdetect,  vf);
 +    REGISTER_FILTER(VIDSTABTRANSFORM, vidstabtransform, vf);
 +    REGISTER_FILTER(VIGNETTE,       vignette,       vf);
 +    REGISTER_FILTER(VMAFMOTION,     vmafmotion,     vf);
+     REGISTER_FILTER(VPP_QSV,        vpp_qsv,        vf);
 +    REGISTER_FILTER(VSTACK,         vstack,         vf);
 +    REGISTER_FILTER(W3FDIF,         w3fdif,         vf);
 +    REGISTER_FILTER(WAVEFORM,       waveform,       vf);
 +    REGISTER_FILTER(WEAVE,          weave,          vf);
 +    REGISTER_FILTER(XBR,            xbr,            vf);
      REGISTER_FILTER(YADIF,          yadif,          vf);
 +    REGISTER_FILTER(ZMQ,            zmq,            vf);
 +    REGISTER_FILTER(ZOOMPAN,        zoompan,        vf);
 +    REGISTER_FILTER(ZSCALE,         zscale,         vf);
  
 +    REGISTER_FILTER(ALLRGB,         allrgb,         vsrc);
 +    REGISTER_FILTER(ALLYUV,         allyuv,         vsrc);
 +    REGISTER_FILTER(CELLAUTO,       cellauto,       vsrc);
      REGISTER_FILTER(COLOR,          color,          vsrc);
 +    REGISTER_FILTER(COREIMAGESRC,   coreimagesrc,   vsrc);
      REGISTER_FILTER(FREI0R,         frei0r_src,     vsrc);
 -    REGISTER_FILTER(MOVIE,          movie,          vsrc);
 +    REGISTER_FILTER(HALDCLUTSRC,    haldclutsrc,    vsrc);
 +    REGISTER_FILTER(LIFE,           life,           vsrc);
 +    REGISTER_FILTER(MANDELBROT,     mandelbrot,     vsrc);
 +    REGISTER_FILTER(MPTESTSRC,      mptestsrc,      vsrc);
      REGISTER_FILTER(NULLSRC,        nullsrc,        vsrc);
      REGISTER_FILTER(RGBTESTSRC,     rgbtestsrc,     vsrc);
 +    REGISTER_FILTER(SMPTEBARS,      smptebars,      vsrc);
 +    REGISTER_FILTER(SMPTEHDBARS,    smptehdbars,    vsrc);
      REGISTER_FILTER(TESTSRC,        testsrc,        vsrc);
 +    REGISTER_FILTER(TESTSRC2,       testsrc2,       vsrc);
 +    REGISTER_FILTER(YUVTESTSRC,     yuvtestsrc,     vsrc);
  
      REGISTER_FILTER(NULLSINK,       nullsink,       vsink);
  
diff --cc libavfilter/qsvvpp.c
index 0000000000,0b639c2d67..37805f2323
mode 000000,100644..100644
--- a/libavfilter/qsvvpp.c
+++ b/libavfilter/qsvvpp.c
@@@ -1,0 -1,727 +1,727 @@@
+ /*
 - * This file is part of Libav.
++ * This file is part of FFmpeg.
+  *
 - * Libav is free software; you can redistribute it and/or
++ * FFmpeg is free software; you can redistribute it and/or
+  * modify it under the terms of the GNU Lesser General Public
+  * License as published by the Free Software Foundation; either
+  * version 2.1 of the License, or (at your option) any later version.
+  *
 - * Libav is distributed in the hope that it will be useful,
++ * FFmpeg is distributed in the hope that it will be useful,
+  * but WITHOUT ANY WARRANTY; without even the implied warranty of
+  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  * Lesser General Public License for more details.
+  *
+  * You should have received a copy of the GNU Lesser General Public
 - * License along with Libav; if not, write to the Free Software
++ * License along with FFmpeg; if not, write to the Free Software
+  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+  */
+ 
+ /**
+  * @file
+  * Intel Quick Sync Video VPP base function
+  */
+ 
+ #include "libavutil/common.h"
+ #include "libavutil/mathematics.h"
+ #include "libavutil/hwcontext.h"
+ #include "libavutil/hwcontext_qsv.h"
+ #include "libavutil/time.h"
+ #include "libavutil/pixdesc.h"
+ 
+ #include "internal.h"
+ #include "qsvvpp.h"
+ #include "video.h"
+ 
+ #define IS_VIDEO_MEMORY(mode)  (mode & (MFX_MEMTYPE_VIDEO_MEMORY_DECODER_TARGET | \
+                                         MFX_MEMTYPE_VIDEO_MEMORY_PROCESSOR_TARGET))
+ #define IS_OPAQUE_MEMORY(mode) (mode & MFX_MEMTYPE_OPAQUE_FRAME)
+ #define IS_SYSTEM_MEMORY(mode) (mode & MFX_MEMTYPE_SYSTEM_MEMORY)
+ 
+ typedef struct QSVFrame {
+     AVFrame          *frame;
+     mfxFrameSurface1 *surface;
+     mfxFrameSurface1  surface_internal;  /* for system memory */
+     struct QSVFrame  *next;
+ } QSVFrame;
+ 
+ /* abstract struct for all QSV filters */
+ struct QSVVPPContext {
+     mfxSession          session;
+     int (*filter_frame) (AVFilterLink *outlink, AVFrame *frame);/* callback */
+     enum AVPixelFormat  out_sw_format;   /* Real output format */
+     mfxVideoParam       vpp_param;
+     mfxFrameInfo       *frame_infos;     /* frame info for each input */
+ 
+     /* members related to the input/output surface */
+     int                 in_mem_mode;
+     int                 out_mem_mode;
+     QSVFrame           *in_frame_list;
+     QSVFrame           *out_frame_list;
+     int                 nb_surface_ptrs_in;
+     int                 nb_surface_ptrs_out;
+     mfxFrameSurface1  **surface_ptrs_in;
+     mfxFrameSurface1  **surface_ptrs_out;
+ 
+     /* MFXVPP extern parameters */
+     mfxExtOpaqueSurfaceAlloc opaque_alloc;
+     mfxExtBuffer      **ext_buffers;
+     int                 nb_ext_buffers;
+ };
+ 
+ static const mfxHandleType handle_types[] = {
+     MFX_HANDLE_VA_DISPLAY,
+     MFX_HANDLE_D3D9_DEVICE_MANAGER,
+     MFX_HANDLE_D3D11_DEVICE,
+ };
+ 
+ static const AVRational default_tb = { 1, 90000 };
+ 
+ /* functions for frameAlloc */
+ static mfxStatus frame_alloc(mfxHDL pthis, mfxFrameAllocRequest *req,
+                              mfxFrameAllocResponse *resp)
+ {
+     QSVVPPContext *s = pthis;
+     int i;
+ 
+     if (!(req->Type & MFX_MEMTYPE_VIDEO_MEMORY_PROCESSOR_TARGET) ||
+         !(req->Type & (MFX_MEMTYPE_FROM_VPPIN | MFX_MEMTYPE_FROM_VPPOUT)) ||
+         !(req->Type & MFX_MEMTYPE_EXTERNAL_FRAME))
+         return MFX_ERR_UNSUPPORTED;
+ 
+     if (req->Type & MFX_MEMTYPE_FROM_VPPIN) {
+         resp->mids = av_mallocz(s->nb_surface_ptrs_in * sizeof(*resp->mids));
+         if (!resp->mids)
+             return AVERROR(ENOMEM);
+ 
+         for (i = 0; i < s->nb_surface_ptrs_in; i++)
+             resp->mids[i] = s->surface_ptrs_in[i]->Data.MemId;
+ 
+         resp->NumFrameActual = s->nb_surface_ptrs_in;
+     } else {
+         resp->mids = av_mallocz(s->nb_surface_ptrs_out * sizeof(*resp->mids));
+         if (!resp->mids)
+             return AVERROR(ENOMEM);
+ 
+         for (i = 0; i < s->nb_surface_ptrs_out; i++)
+             resp->mids[i] = s->surface_ptrs_out[i]->Data.MemId;
+ 
+         resp->NumFrameActual = s->nb_surface_ptrs_out;
+     }
+ 
+     return MFX_ERR_NONE;
+ }
+ 
+ static mfxStatus frame_free(mfxHDL pthis, mfxFrameAllocResponse *resp)
+ {
+     av_freep(&resp->mids);
+     return MFX_ERR_NONE;
+ }
+ 
+ static mfxStatus frame_lock(mfxHDL pthis, mfxMemId mid, mfxFrameData *ptr)
+ {
+     return MFX_ERR_UNSUPPORTED;
+ }
+ 
+ static mfxStatus frame_unlock(mfxHDL pthis, mfxMemId mid, mfxFrameData *ptr)
+ {
+     return MFX_ERR_UNSUPPORTED;
+ }
+ 
+ static mfxStatus frame_get_hdl(mfxHDL pthis, mfxMemId mid, mfxHDL *hdl)
+ {
+     *hdl = mid;
+     return MFX_ERR_NONE;
+ }
+ 
+ static int pix_fmt_to_mfx_fourcc(int format)
+ {
+     switch (format) {
+     case AV_PIX_FMT_YUV420P:
+         return MFX_FOURCC_YV12;
+     case AV_PIX_FMT_NV12:
+         return MFX_FOURCC_NV12;
+     case AV_PIX_FMT_YUYV422:
+         return MFX_FOURCC_YUY2;
+     case AV_PIX_FMT_RGB32:
+         return MFX_FOURCC_RGB4;
+     }
+ 
+     return MFX_FOURCC_NV12;
+ }
+ 
+ static int map_frame_to_surface(AVFrame *frame, mfxFrameSurface1 *surface)
+ {
+     switch (frame->format) {
+     case AV_PIX_FMT_NV12:
+         surface->Data.Y  = frame->data[0];
+         surface->Data.UV = frame->data[1];
+         break;
+     case AV_PIX_FMT_YUV420P:
+         surface->Data.Y = frame->data[0];
+         surface->Data.U = frame->data[1];
+         surface->Data.V = frame->data[2];
+         break;
+     case AV_PIX_FMT_YUYV422:
+         surface->Data.Y = frame->data[0];
+         surface->Data.U = frame->data[0] + 1;
+         surface->Data.V = frame->data[0] + 3;
+         break;
+     case AV_PIX_FMT_RGB32:
+         surface->Data.B = frame->data[0];
+         surface->Data.G = frame->data[0] + 1;
+         surface->Data.R = frame->data[0] + 2;
+         surface->Data.A = frame->data[0] + 3;
+         break;
+     default:
+         return MFX_ERR_UNSUPPORTED;
+     }
+     surface->Data.Pitch = frame->linesize[0];
+ 
+     return 0;
+ }
+ 
+ /* fill the surface info */
+ static int fill_frameinfo_by_link(mfxFrameInfo *frameinfo, AVFilterLink *link)
+ {
+     enum AVPixelFormat        pix_fmt;
+     AVHWFramesContext        *frames_ctx;
+     AVQSVFramesContext       *frames_hwctx;
+     const AVPixFmtDescriptor *desc;
+ 
+     if (link->format == AV_PIX_FMT_QSV) {
+         if (!link->hw_frames_ctx)
+             return AVERROR(EINVAL);
+ 
+         frames_ctx   = (AVHWFramesContext *)link->hw_frames_ctx->data;
+         frames_hwctx = frames_ctx->hwctx;
+         *frameinfo   = frames_hwctx->surfaces[0].Info;
+     } else {
+         pix_fmt = link->format;
+         desc = av_pix_fmt_desc_get(pix_fmt);
+         if (!desc)
+             return AVERROR_BUG;
+ 
+         frameinfo->CropX          = 0;
+         frameinfo->CropY          = 0;
+         frameinfo->Width          = FFALIGN(link->w, 32);
+         frameinfo->Height         = FFALIGN(link->h, 32);
+         frameinfo->PicStruct      = MFX_PICSTRUCT_PROGRESSIVE;
+         frameinfo->FourCC         = pix_fmt_to_mfx_fourcc(pix_fmt);
+         frameinfo->BitDepthLuma   = desc->comp[0].depth;
+         frameinfo->BitDepthChroma = desc->comp[0].depth;
+         frameinfo->Shift          = desc->comp[0].depth > 8;
+         if (desc->log2_chroma_w && desc->log2_chroma_h)
+             frameinfo->ChromaFormat = MFX_CHROMAFORMAT_YUV420;
+         else if (desc->log2_chroma_w)
+             frameinfo->ChromaFormat = MFX_CHROMAFORMAT_YUV422;
+         else
+             frameinfo->ChromaFormat = MFX_CHROMAFORMAT_YUV444;
+     }
+ 
+     frameinfo->CropW          = link->w;
+     frameinfo->CropH          = link->h;
+     frameinfo->FrameRateExtN  = link->frame_rate.num;
+     frameinfo->FrameRateExtD  = link->frame_rate.den;
+     frameinfo->AspectRatioW   = link->sample_aspect_ratio.num ? link->sample_aspect_ratio.num : 1;
+     frameinfo->AspectRatioH   = link->sample_aspect_ratio.den ? link->sample_aspect_ratio.den : 1;
+ 
+     return 0;
+ }
+ 
+ static void clear_unused_frames(QSVFrame *list)
+ {
+     while (list) {
+         if (list->surface && !list->surface->Data.Locked) {
+             list->surface = NULL;
+             av_frame_free(&list->frame);
+         }
+         list = list->next;
+     }
+ }
+ 
+ static void clear_frame_list(QSVFrame **list)
+ {
+     while (*list) {
+         QSVFrame *frame;
+ 
+         frame = *list;
+         *list = (*list)->next;
+         av_frame_free(&frame->frame);
+         av_freep(&frame);
+     }
+ }
+ 
+ static QSVFrame *get_free_frame(QSVFrame **list)
+ {
+     QSVFrame *out = *list;
+ 
+     for (; out; out = out->next) {
+         if (!out->surface)
+             break;
+     }
+ 
+     if (!out) {
+         out = av_mallocz(sizeof(*out));
+         if (!out) {
+             av_log(NULL, AV_LOG_ERROR, "Can't alloc new output frame.\n");
+             return NULL;
+         }
+         out->next  = *list;
+         *list      = out;
+     }
+ 
+     return out;
+ }
+ 
+ /* get the input surface */
+ static QSVFrame *submit_frame(QSVVPPContext *s, AVFilterLink *inlink, AVFrame *picref)
+ {
+     QSVFrame        *qsv_frame;
+     AVFilterContext *ctx = inlink->dst;
+ 
+     clear_unused_frames(s->in_frame_list);
+ 
+     qsv_frame = get_free_frame(&s->in_frame_list);
+     if (!qsv_frame)
+         return NULL;
+ 
+     /* Turn AVFrame into mfxFrameSurface1.
+      * For video/opaque memory mode, pix_fmt is AV_PIX_FMT_QSV, and
+      * mfxFrameSurface1 is stored in AVFrame->data[3];
+      * for system memory mode, raw video data is stored in
+      * AVFrame, we should map it into mfxFrameSurface1.
+      */
+     if (!IS_SYSTEM_MEMORY(s->in_mem_mode)) {
+         if (picref->format != AV_PIX_FMT_QSV) {
+             av_log(ctx, AV_LOG_ERROR, "QSVVPP gets a wrong frame.\n");
+             return NULL;
+         }
+         qsv_frame->frame   = picref;
+         qsv_frame->surface = (mfxFrameSurface1 *)qsv_frame->frame->data[3];
+     } else {
+         /* make a copy if the input is not padded as libmfx requires */
+         if (picref->height & 31 || picref->linesize[0] & 31) {
+             qsv_frame->frame = ff_get_video_buffer(inlink,
+                                                    FFALIGN(inlink->w, 32),
+                                                    FFALIGN(inlink->h, 32));
+             if (!qsv_frame->frame)
+                 return NULL;
+ 
+             qsv_frame->frame->width   = picref->width;
+             qsv_frame->frame->height  = picref->height;
+ 
+             if (av_frame_copy(qsv_frame->frame, picref) < 0) {
+                 av_frame_free(&qsv_frame->frame);
+                 return NULL;
+             }
+ 
+             av_frame_copy_props(qsv_frame->frame, picref);
+             av_frame_free(&picref);
+         } else
+             qsv_frame->frame = picref;
+ 
+         if (map_frame_to_surface(qsv_frame->frame,
+                                 &qsv_frame->surface_internal) < 0) {
+             av_log(ctx, AV_LOG_ERROR, "Unsupported frame.\n");
+             return NULL;
+         }
+         qsv_frame->surface = &qsv_frame->surface_internal;
+     }
+ 
+     qsv_frame->surface->Info           = s->frame_infos[FF_INLINK_IDX(inlink)];
+     qsv_frame->surface->Data.TimeStamp = av_rescale_q(qsv_frame->frame->pts,
+                                                       inlink->time_base, default_tb);
+ 
+     qsv_frame->surface->Info.PicStruct =
+             !qsv_frame->frame->interlaced_frame ? MFX_PICSTRUCT_PROGRESSIVE :
+             (qsv_frame->frame->top_field_first ? MFX_PICSTRUCT_FIELD_TFF :
+                                                  MFX_PICSTRUCT_FIELD_BFF);
+     if (qsv_frame->frame->repeat_pict == 1)
+         qsv_frame->surface->Info.PicStruct |= MFX_PICSTRUCT_FIELD_REPEATED;
+     else if (qsv_frame->frame->repeat_pict == 2)
+         qsv_frame->surface->Info.PicStruct |= MFX_PICSTRUCT_FRAME_DOUBLING;
+     else if (qsv_frame->frame->repeat_pict == 4)
+         qsv_frame->surface->Info.PicStruct |= MFX_PICSTRUCT_FRAME_TRIPLING;
+ 
+     return qsv_frame;
+ }
+ 
+ /* get the output surface */
+ static QSVFrame *query_frame(QSVVPPContext *s, AVFilterLink *outlink)
+ {
+     AVFilterContext *ctx = outlink->src;
+     QSVFrame        *out_frame;
+     int              ret;
+ 
+     clear_unused_frames(s->out_frame_list);
+ 
+     out_frame = get_free_frame(&s->out_frame_list);
+     if (!out_frame)
+         return NULL;
+ 
+     /* For video memory, get a hw frame;
+      * For system memory, get a sw frame and map it into a mfx_surface. */
+     if (!IS_SYSTEM_MEMORY(s->out_mem_mode)) {
+         out_frame->frame = av_frame_alloc();
+         if (!out_frame->frame)
+             return NULL;
+ 
+         ret = av_hwframe_get_buffer(outlink->hw_frames_ctx, out_frame->frame, 0);
+         if (ret < 0) {
+             av_log(ctx, AV_LOG_ERROR, "Can't allocate a surface.\n");
+             return NULL;
+         }
+ 
+         out_frame->surface = (mfxFrameSurface1 *)out_frame->frame->data[3];
+     } else {
+         /* Get a frame with aligned dimensions.
+          * Libmfx need system memory being 128x64 aligned */
+         out_frame->frame = ff_get_video_buffer(outlink,
+                                                FFALIGN(outlink->w, 128),
+                                                FFALIGN(outlink->h, 64));
+         if (!out_frame->frame)
+             return NULL;
+ 
+         out_frame->frame->width  = outlink->w;
+         out_frame->frame->height = outlink->h;
+ 
+         ret = map_frame_to_surface(out_frame->frame,
+                                   &out_frame->surface_internal);
+         if (ret < 0)
+             return NULL;
+ 
+         out_frame->surface = &out_frame->surface_internal;
+     }
+ 
+     out_frame->surface->Info = s->vpp_param.vpp.Out;
+ 
+     return out_frame;
+ }
+ 
+ /* create the QSV session */
+ static int init_vpp_session(AVFilterContext *avctx, QSVVPPContext *s)
+ {
+     AVFilterLink                 *inlink = avctx->inputs[0];
+     AVFilterLink                *outlink = avctx->outputs[0];
+     AVQSVFramesContext  *in_frames_hwctx = NULL;
+     AVQSVFramesContext *out_frames_hwctx = NULL;
+ 
+     AVBufferRef *device_ref;
+     AVHWDeviceContext *device_ctx;
+     AVQSVDeviceContext *device_hwctx;
+     mfxHDL handle;
+     mfxHandleType handle_type;
+     mfxVersion ver;
+     mfxIMPL impl;
+     int ret, i;
+ 
+     if (inlink->hw_frames_ctx) {
+         AVHWFramesContext *frames_ctx = (AVHWFramesContext *)inlink->hw_frames_ctx->data;
+ 
+         device_ref      = frames_ctx->device_ref;
+         in_frames_hwctx = frames_ctx->hwctx;
+ 
+         s->in_mem_mode = in_frames_hwctx->frame_type;
+ 
+         s->surface_ptrs_in = av_mallocz_array(in_frames_hwctx->nb_surfaces,
+                                               sizeof(*s->surface_ptrs_in));
+         if (!s->surface_ptrs_in)
+             return AVERROR(ENOMEM);
+ 
+         for (i = 0; i < in_frames_hwctx->nb_surfaces; i++)
+             s->surface_ptrs_in[i] = in_frames_hwctx->surfaces + i;
+ 
+         s->nb_surface_ptrs_in = in_frames_hwctx->nb_surfaces;
+     } else if (avctx->hw_device_ctx) {
+         device_ref     = avctx->hw_device_ctx;
+         s->in_mem_mode = MFX_MEMTYPE_SYSTEM_MEMORY;
+     } else {
+         av_log(avctx, AV_LOG_ERROR, "No hw context provided.\n");
+         return AVERROR(EINVAL);
+     }
+ 
+     device_ctx   = (AVHWDeviceContext *)device_ref->data;
+     device_hwctx = device_ctx->hwctx;
+ 
+     if (outlink->format == AV_PIX_FMT_QSV) {
+         AVHWFramesContext *out_frames_ctx;
+         AVBufferRef *out_frames_ref = av_hwframe_ctx_alloc(device_ref);
+         if (!out_frames_ref)
+             return AVERROR(ENOMEM);
+ 
+         s->out_mem_mode = IS_OPAQUE_MEMORY(s->in_mem_mode) ?
+                           MFX_MEMTYPE_OPAQUE_FRAME :
+                           MFX_MEMTYPE_VIDEO_MEMORY_DECODER_TARGET;
+ 
+         out_frames_ctx   = (AVHWFramesContext *)out_frames_ref->data;
+         out_frames_hwctx = out_frames_ctx->hwctx;
+ 
+         out_frames_ctx->format            = AV_PIX_FMT_QSV;
+         out_frames_ctx->width             = FFALIGN(outlink->w, 32);
+         out_frames_ctx->height            = FFALIGN(outlink->h, 32);
+         out_frames_ctx->sw_format         = s->out_sw_format;
+         out_frames_ctx->initial_pool_size = 64;
+         out_frames_hwctx->frame_type      = s->out_mem_mode;
+ 
+         ret = av_hwframe_ctx_init(out_frames_ref);
+         if (ret < 0) {
+             av_buffer_unref(&out_frames_ref);
+             av_log(avctx, AV_LOG_ERROR, "Error creating frames_ctx for output pad.\n");
+             return ret;
+         }
+ 
+         s->surface_ptrs_out = av_mallocz_array(out_frames_hwctx->nb_surfaces,
+                                                sizeof(*s->surface_ptrs_out));
+         if (!s->surface_ptrs_out) {
+             av_buffer_unref(&out_frames_ref);
+             return AVERROR(ENOMEM);
+         }
+ 
+         for (i = 0; i < out_frames_hwctx->nb_surfaces; i++)
+             s->surface_ptrs_out[i] = out_frames_hwctx->surfaces + i;
+         s->nb_surface_ptrs_out = out_frames_hwctx->nb_surfaces;
+ 
+         av_buffer_unref(&outlink->hw_frames_ctx);
+         outlink->hw_frames_ctx = out_frames_ref;
+     } else
+         s->out_mem_mode = MFX_MEMTYPE_SYSTEM_MEMORY;
+ 
+     /* extract the properties of the "master" session given to us */
+     ret = MFXQueryIMPL(device_hwctx->session, &impl);
+     if (ret == MFX_ERR_NONE)
+         ret = MFXQueryVersion(device_hwctx->session, &ver);
+     if (ret != MFX_ERR_NONE) {
+         av_log(avctx, AV_LOG_ERROR, "Error querying the session attributes\n");
+         return AVERROR_UNKNOWN;
+     }
+ 
+     for (i = 0; i < FF_ARRAY_ELEMS(handle_types); i++) {
+         ret = MFXVideoCORE_GetHandle(device_hwctx->session, handle_types[i], &handle);
+         if (ret == MFX_ERR_NONE) {
+             handle_type = handle_types[i];
+             break;
+         }
+     }
+ 
+     /* create a "slave" session with those same properties, to be used for vpp */
+     ret = MFXInit(impl, &ver, &s->session);
+     if (ret != MFX_ERR_NONE) {
+         av_log(avctx, AV_LOG_ERROR, "Error initializing a session for scaling\n");
+         return AVERROR_UNKNOWN;
+     }
+ 
+     if (handle) {
+         ret = MFXVideoCORE_SetHandle(s->session, handle_type, handle);
+         if (ret != MFX_ERR_NONE)
+             return AVERROR_UNKNOWN;
+     }
+ 
+     if (IS_OPAQUE_MEMORY(s->in_mem_mode) || IS_OPAQUE_MEMORY(s->out_mem_mode)) {
+         s->opaque_alloc.In.Surfaces   = s->surface_ptrs_in;
+         s->opaque_alloc.In.NumSurface = s->nb_surface_ptrs_in;
+         s->opaque_alloc.In.Type       = s->in_mem_mode;
+ 
+         s->opaque_alloc.Out.Surfaces   = s->surface_ptrs_out;
+         s->opaque_alloc.Out.NumSurface = s->nb_surface_ptrs_out;
+         s->opaque_alloc.Out.Type       = s->out_mem_mode;
+ 
+         s->opaque_alloc.Header.BufferId = MFX_EXTBUFF_OPAQUE_SURFACE_ALLOCATION;
+         s->opaque_alloc.Header.BufferSz = sizeof(s->opaque_alloc);
+     } else if (IS_VIDEO_MEMORY(s->in_mem_mode) || IS_VIDEO_MEMORY(s->out_mem_mode)) {
+         mfxFrameAllocator frame_allocator = {
+             .pthis  = s,
+             .Alloc  = frame_alloc,
+             .Lock   = frame_lock,
+             .Unlock = frame_unlock,
+             .GetHDL = frame_get_hdl,
+             .Free   = frame_free,
+         };
+ 
+         ret = MFXVideoCORE_SetFrameAllocator(s->session, &frame_allocator);
+         if (ret != MFX_ERR_NONE)
+             return AVERROR_UNKNOWN;
+     }
+ 
+     return 0;
+ }
+ 
+ int ff_qsvvpp_create(AVFilterContext *avctx, QSVVPPContext **vpp, QSVVPPParam *param)
+ {
+     int i;
+     int ret;
+     QSVVPPContext *s;
+ 
+     s = av_mallocz(sizeof(*s));
+     if (!s)
+         return AVERROR(ENOMEM);
+ 
+     s->filter_frame  = param->filter_frame;
+     if (!s->filter_frame)
+         s->filter_frame = ff_filter_frame;
+     s->out_sw_format = param->out_sw_format;
+ 
+     /* create the vpp session */
+     ret = init_vpp_session(avctx, s);
+     if (ret < 0)
+         goto failed;
+ 
+     s->frame_infos = av_mallocz_array(avctx->nb_inputs, sizeof(*s->frame_infos));
+     if (!s->frame_infos) {
+         ret = AVERROR(ENOMEM);
+         goto failed;
+     }
+ 
+     /* Init each input's information */
+     for (i = 0; i < avctx->nb_inputs; i++) {
+         ret = fill_frameinfo_by_link(&s->frame_infos[i], avctx->inputs[i]);
+         if (ret < 0)
+             goto failed;
+     }
+ 
+     /* Update input's frame info according to crop */
+     for (i = 0; i < param->num_crop; i++) {
+         QSVVPPCrop *crop = param->crop + i;
+         if (crop->in_idx > avctx->nb_inputs) {
+             ret = AVERROR(EINVAL);
+             goto failed;
+         }
+         s->frame_infos[crop->in_idx].CropX = crop->x;
+         s->frame_infos[crop->in_idx].CropY = crop->y;
+         s->frame_infos[crop->in_idx].CropW = crop->w;
+         s->frame_infos[crop->in_idx].CropH = crop->h;
+     }
+ 
+     s->vpp_param.vpp.In = s->frame_infos[0];
+ 
+     ret = fill_frameinfo_by_link(&s->vpp_param.vpp.Out, avctx->outputs[0]);
+     if (ret < 0) {
+         av_log(avctx, AV_LOG_ERROR, "Fail to get frame info from link.\n");
+         goto failed;
+     }
+ 
+     if (IS_OPAQUE_MEMORY(s->in_mem_mode) || IS_OPAQUE_MEMORY(s->out_mem_mode)) {
+         s->nb_ext_buffers = param->num_ext_buf + 1;
+         s->ext_buffers = av_mallocz_array(s->nb_ext_buffers, sizeof(*s->ext_buffers));
+         if (!s->ext_buffers) {
+             ret = AVERROR(ENOMEM);
+             goto failed;
+         }
+ 
+         s->ext_buffers[0] = (mfxExtBuffer *)&s->opaque_alloc;
+         for (i = 1; i < param->num_ext_buf; i++)
+             s->ext_buffers[i]    = param->ext_buf[i - 1];
+         s->vpp_param.ExtParam    = s->ext_buffers;
+         s->vpp_param.NumExtParam = s->nb_ext_buffers;
+     } else {
+         s->vpp_param.NumExtParam = param->num_ext_buf;
+         s->vpp_param.ExtParam    = param->ext_buf;
+     }
+ 
+     s->vpp_param.AsyncDepth = 1;
+ 
+     if (IS_SYSTEM_MEMORY(s->in_mem_mode))
+         s->vpp_param.IOPattern |= MFX_IOPATTERN_IN_SYSTEM_MEMORY;
+     else if (IS_VIDEO_MEMORY(s->in_mem_mode))
+         s->vpp_param.IOPattern |= MFX_IOPATTERN_IN_VIDEO_MEMORY;
+     else if (IS_OPAQUE_MEMORY(s->in_mem_mode))
+         s->vpp_param.IOPattern |= MFX_IOPATTERN_IN_OPAQUE_MEMORY;
+ 
+     if (IS_SYSTEM_MEMORY(s->out_mem_mode))
+         s->vpp_param.IOPattern |= MFX_IOPATTERN_OUT_SYSTEM_MEMORY;
+     else if (IS_VIDEO_MEMORY(s->out_mem_mode))
+         s->vpp_param.IOPattern |= MFX_IOPATTERN_OUT_VIDEO_MEMORY;
+     else if (IS_OPAQUE_MEMORY(s->out_mem_mode))
+         s->vpp_param.IOPattern |= MFX_IOPATTERN_OUT_OPAQUE_MEMORY;
+ 
+     ret = MFXVideoVPP_Init(s->session, &s->vpp_param);
+     if (ret < 0) {
+         av_log(avctx, AV_LOG_ERROR, "Failed to create a qsvvpp, ret = %d.\n", ret);
+         goto failed;
+     }
+ 
+     *vpp = s;
+     return 0;
+ 
+ failed:
+     ff_qsvvpp_free(&s);
+ 
+     return ret;
+ }
+ 
+ int ff_qsvvpp_free(QSVVPPContext **vpp)
+ {
+     QSVVPPContext *s = *vpp;
+ 
+     if (!s)
+         return 0;
+ 
+     if (s->session) {
+         MFXVideoVPP_Close(s->session);
+         MFXClose(s->session);
+     }
+ 
+     /* release all the resources */
+     clear_frame_list(&s->in_frame_list);
+     clear_frame_list(&s->out_frame_list);
+     av_freep(&s->surface_ptrs_in);
+     av_freep(&s->surface_ptrs_out);
+     av_freep(&s->ext_buffers);
+     av_freep(&s->frame_infos);
+     av_freep(vpp);
+ 
+     return 0;
+ }
+ 
+ int ff_qsvvpp_filter_frame(QSVVPPContext *s, AVFilterLink *inlink, AVFrame *picref)
+ {
+     AVFilterContext  *ctx     = inlink->dst;
+     AVFilterLink     *outlink = ctx->outputs[0];
+     mfxSyncPoint      sync;
+     QSVFrame         *in_frame, *out_frame;
+     int               ret, filter_ret;
+ 
+     in_frame = submit_frame(s, inlink, picref);
+     if (!in_frame) {
+         av_log(ctx, AV_LOG_ERROR, "Failed to submit frame on input[%d]\n",
+                FF_INLINK_IDX(inlink));
+         return AVERROR(ENOMEM);
+     }
+ 
+     do {
+         out_frame = query_frame(s, outlink);
+         if (!out_frame) {
+             av_log(ctx, AV_LOG_ERROR, "Failed to query an output frame.\n");
+             return AVERROR(ENOMEM);
+         }
+ 
+         do {
+             ret = MFXVideoVPP_RunFrameVPPAsync(s->session, in_frame->surface,
+                                                out_frame->surface, NULL, &sync);
+             if (ret == MFX_WRN_DEVICE_BUSY)
+                 av_usleep(500);
+         } while (ret == MFX_WRN_DEVICE_BUSY);
+ 
+         if (ret < 0 && ret != MFX_ERR_MORE_SURFACE) {
+             /* Ignore more_data error */
+             if (ret == MFX_ERR_MORE_DATA)
+                 ret = AVERROR(EAGAIN);
+             break;
+         }
+ 
+         if (MFXVideoCORE_SyncOperation(s->session, sync, 1000) < 0)
+             av_log(ctx, AV_LOG_WARNING, "Sync failed.\n");
+ 
+         out_frame->frame->pts = av_rescale_q(out_frame->surface->Data.TimeStamp,
+                                              default_tb, outlink->time_base);
+ 
+         filter_ret = s->filter_frame(outlink, out_frame->frame);
+         if (filter_ret < 0) {
+             av_frame_free(&out_frame->frame);
+             ret = filter_ret;
+             break;
+         }
+         out_frame->frame = NULL;
+     } while(ret == MFX_ERR_MORE_SURFACE);
+ 
+     return ret;
+ }
diff --cc libavfilter/qsvvpp.h
index 0000000000,082c0a8994..d720c9ba42
mode 000000,100644..100644
--- a/libavfilter/qsvvpp.h
+++ b/libavfilter/qsvvpp.h
@@@ -1,0 -1,66 +1,66 @@@
+ /*
 - * This file is part of Libav.
++ * This file is part of FFmpeg.
+  *
 - * Libav is free software; you can redistribute it and/or
++ * FFmpeg is free software; you can redistribute it and/or
+  * modify it under the terms of the GNU Lesser General Public
+  * License as published by the Free Software Foundation; either
+  * version 2.1 of the License, or (at your option) any later version.
+  *
 - * Libav is distributed in the hope that it will be useful,
++ * FFmpeg is distributed in the hope that it will be useful,
+  * but WITHOUT ANY WARRANTY; without even the implied warranty of
+  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  * Lesser General Public License for more details.
+  *
+  * You should have received a copy of the GNU Lesser General Public
 - * License along with Libav; if not, write to the Free Software
++ * License along with FFmpeg; if not, write to the Free Software
+  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+  */
+ 
+ /**
+  * @file
+  * Intel Quick Sync Video VPP base function
+  */
+ 
+ #ifndef AVFILTER_QSVVPP_H
+ #define AVFILTER_QSVVPP_H
+ 
+ #include <mfx/mfxvideo.h>
+ 
+ #include "avfilter.h"
+ 
+ #define FF_INLINK_IDX(link)  ((int)((link)->dstpad - (link)->dst->input_pads))
+ #define FF_OUTLINK_IDX(link) ((int)((link)->srcpad - (link)->src->output_pads))
+ 
+ typedef struct QSVVPPContext QSVVPPContext;
+ 
+ typedef struct QSVVPPCrop {
+     int in_idx;        ///< Input index
+     int x, y, w, h;    ///< Crop rectangle
+ } QSVVPPCrop;
+ 
+ typedef struct QSVVPPParam {
+     /* default is ff_filter_frame */
+     int (*filter_frame)(AVFilterLink *outlink, AVFrame *frame);
+ 
+     /* To fill with MFX enhanced filter configurations */
+     int num_ext_buf;
+     mfxExtBuffer **ext_buf;
+ 
+     /* Real output format */
+     enum AVPixelFormat out_sw_format;
+ 
+     /* Crop information for each input, if needed */
+     int num_crop;
+     QSVVPPCrop *crop;
+ } QSVVPPParam;
+ 
+ /* create and initialize the QSV session */
+ int ff_qsvvpp_create(AVFilterContext *avctx, QSVVPPContext **vpp, QSVVPPParam *param);
+ 
+ /* release the resources (eg.surfaces) */
+ int ff_qsvvpp_free(QSVVPPContext **vpp);
+ 
+ /* vpp filter frame and call the cb if needed */
+ int ff_qsvvpp_filter_frame(QSVVPPContext *vpp, AVFilterLink *inlink, AVFrame *frame);
+ 
+ #endif /* AVFILTER_QSVVPP_H */
diff --cc libavfilter/vf_overlay_qsv.c
index 0000000000,89f2fb134d..471576e35a
mode 000000,100644..100644
--- a/libavfilter/vf_overlay_qsv.c
+++ b/libavfilter/vf_overlay_qsv.c
@@@ -1,0 -1,487 +1,487 @@@
+ /*
 - * This file is part of Libav.
++ * This file is part of FFmpeg.
+  *
 - * Libav is free software; you can redistribute it and/or
++ * FFmpeg is free software; you can redistribute it and/or
+  * modify it under the terms of the GNU Lesser General Public
+  * License as published by the Free Software Foundation; either
+  * version 2.1 of the License, or (at your option) any later version.
+  *
 - * Libav is distributed in the hope that it will be useful,
++ * FFmpeg is distributed in the hope that it will be useful,
+  * but WITHOUT ANY WARRANTY; without even the implied warranty of
+  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  * Lesser General Public License for more details.
+  *
+  * You should have received a copy of the GNU Lesser General Public
 - * License along with Libav; if not, write to the Free Software
++ * License along with FFmpeg; if not, write to the Free Software
+  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+  */
+ 
+ /**
+  * @file
+  * A hardware accelerated overlay filter based on Intel Quick Sync Video VPP
+  */
+ 
+ #include "libavutil/opt.h"
+ #include "libavutil/common.h"
+ #include "libavutil/pixdesc.h"
+ #include "libavutil/eval.h"
+ #include "libavutil/hwcontext.h"
+ #include "libavutil/avstring.h"
+ #include "libavutil/avassert.h"
+ #include "libavutil/imgutils.h"
+ #include "libavutil/mathematics.h"
+ 
+ #include "internal.h"
+ #include "avfilter.h"
+ #include "formats.h"
+ #include "video.h"
+ 
+ #include "qsvvpp.h"
+ 
+ #define MAIN    0
+ #define OVERLAY 1
+ 
+ #define OFFSET(x) offsetof(QSVOverlayContext, x)
+ #define FLAGS AV_OPT_FLAG_VIDEO_PARAM
+ 
+ enum var_name {
+     VAR_MAIN_iW,     VAR_MW,
+     VAR_MAIN_iH,     VAR_MH,
+     VAR_OVERLAY_iW,
+     VAR_OVERLAY_iH,
+     VAR_OVERLAY_X,  VAR_OX,
+     VAR_OVERLAY_Y,  VAR_OY,
+     VAR_OVERLAY_W,  VAR_OW,
+     VAR_OVERLAY_H,  VAR_OH,
+     VAR_VARS_NB
+ };
+ 
+ enum EOFAction {
+     EOF_ACTION_REPEAT,
+     EOF_ACTION_ENDALL
+ };
+ 
+ typedef struct QSVOverlayContext {
+     const AVClass      *class;
+ 
+     QSVVPPContext      *qsv;
+     QSVVPPParam        qsv_param;
+     mfxExtVPPComposite comp_conf;
+     double             var_values[VAR_VARS_NB];
+ 
+     char     *overlay_ox, *overlay_oy, *overlay_ow, *overlay_oh;
+     uint16_t  overlay_alpha, overlay_pixel_alpha;
+ 
+     enum EOFAction eof_action;  /* action to take on EOF from source */
+ 
+     AVFrame *main;
+     AVFrame *over_prev, *over_next;
+ } QSVOverlayContext;
+ 
+ static const char *const var_names[] = {
+     "main_w",     "W",   /* input width of the main layer */
+     "main_h",     "H",   /* input height of the main layer */
+     "overlay_iw",        /* input width of the overlay layer */
+     "overlay_ih",        /* input height of the overlay layer */
+     "overlay_x",  "x",   /* x position of the overlay layer inside of main */
+     "overlay_y",  "y",   /* y position of the overlay layer inside of main */
+     "overlay_w",  "w",   /* output width of overlay layer */
+     "overlay_h",  "h",   /* output height of overlay layer */
+     NULL
+ };
+ 
+ static const AVOption options[] = {
+     { "x", "Overlay x position", OFFSET(overlay_ox), AV_OPT_TYPE_STRING, { .str="0"}, 0, 255, .flags = FLAGS},
+     { "y", "Overlay y position", OFFSET(overlay_oy), AV_OPT_TYPE_STRING, { .str="0"}, 0, 255, .flags = FLAGS},
+     { "w", "Overlay width",      OFFSET(overlay_ow), AV_OPT_TYPE_STRING, { .str="overlay_iw"}, 0, 255, .flags = FLAGS},
+     { "h", "Overlay height",     OFFSET(overlay_oh), AV_OPT_TYPE_STRING, { .str="overlay_ih*w/overlay_iw"}, 0, 255, .flags = FLAGS},
+     { "alpha", "Overlay global alpha", OFFSET(overlay_alpha), AV_OPT_TYPE_INT, { .i64 = 255}, 0, 255, .flags = FLAGS},
+     { "eof_action", "Action to take when encountering EOF from secondary input ",
+         OFFSET(eof_action), AV_OPT_TYPE_INT, { .i64 = EOF_ACTION_REPEAT },
+         EOF_ACTION_REPEAT, EOF_ACTION_ENDALL, .flags = FLAGS, "eof_action" },
+         { "repeat", "Repeat the previous frame.", 0, AV_OPT_TYPE_CONST, { .i64 = EOF_ACTION_REPEAT }, .flags = FLAGS, "eof_action" },
+         { "endall", "End both streams.",          0, AV_OPT_TYPE_CONST, { .i64 = EOF_ACTION_ENDALL }, .flags = FLAGS, "eof_action" },
+     { NULL }
+ };
+ 
+ static int eval_expr(AVFilterContext *ctx)
+ {
+     QSVOverlayContext *vpp = ctx->priv;
+     double     *var_values = vpp->var_values;
+     int                ret = 0;
+     AVExpr *ox_expr = NULL, *oy_expr = NULL;
+     AVExpr *ow_expr = NULL, *oh_expr = NULL;
+ 
+ #define PASS_EXPR(e, s) {\
+     ret = av_expr_parse(&e, s, var_names, NULL, NULL, NULL, NULL, 0, ctx); \
+     if (ret < 0) {\
+         av_log(ctx, AV_LOG_ERROR, "Error when passing '%s'.\n", s);\
+         goto release;\
+     }\
+ }
+     PASS_EXPR(ox_expr, vpp->overlay_ox);
+     PASS_EXPR(oy_expr, vpp->overlay_oy);
+     PASS_EXPR(ow_expr, vpp->overlay_ow);
+     PASS_EXPR(oh_expr, vpp->overlay_oh);
+ #undef PASS_EXPR
+ 
+     var_values[VAR_OVERLAY_W] =
+     var_values[VAR_OW]        = av_expr_eval(ow_expr, var_values, NULL);
+     var_values[VAR_OVERLAY_H] =
+     var_values[VAR_OH]        = av_expr_eval(oh_expr, var_values, NULL);
+ 
+     /* calc again in case ow is relative to oh */
+     var_values[VAR_OVERLAY_W] =
+     var_values[VAR_OW]        = av_expr_eval(ow_expr, var_values, NULL);
+ 
+     var_values[VAR_OVERLAY_X] =
+     var_values[VAR_OX]        = av_expr_eval(ox_expr, var_values, NULL);
+     var_values[VAR_OVERLAY_Y] =
+     var_values[VAR_OY]        = av_expr_eval(oy_expr, var_values, NULL);
+ 
+     /* calc again in case ox is relative to oy */
+     var_values[VAR_OVERLAY_X] =
+     var_values[VAR_OX]        = av_expr_eval(ox_expr, var_values, NULL);
+ 
+     /* calc overlay_w and overlay_h again incase relative to ox,oy */
+     var_values[VAR_OVERLAY_W] =
+     var_values[VAR_OW]        = av_expr_eval(ow_expr, var_values, NULL);
+     var_values[VAR_OVERLAY_H] =
+     var_values[VAR_OH]        = av_expr_eval(oh_expr, var_values, NULL);
+     var_values[VAR_OVERLAY_W] =
+     var_values[VAR_OW]        = av_expr_eval(ow_expr, var_values, NULL);
+ 
+ release:
+     av_expr_free(ox_expr);
+     av_expr_free(oy_expr);
+     av_expr_free(ow_expr);
+     av_expr_free(oh_expr);
+ 
+     return ret;
+ }
+ 
+ static int have_alpha_planar(AVFilterLink *link)
+ {
+     enum AVPixelFormat pix_fmt;
+     const AVPixFmtDescriptor *desc;
+     AVHWFramesContext *fctx;
+ 
+     if (link->format == AV_PIX_FMT_QSV) {
+         fctx    = (AVHWFramesContext *)link->hw_frames_ctx->data;
+         pix_fmt = fctx->sw_format;
+     }
+ 
+     desc = av_pix_fmt_desc_get(pix_fmt);
+     if (!desc)
+         return 0;
+ 
+     return !!(desc->flags & AV_PIX_FMT_FLAG_ALPHA);
+ }
+ 
+ static int config_main_input(AVFilterLink *inlink)
+ {
+     AVFilterContext      *ctx = inlink->dst;
+     QSVOverlayContext    *vpp = ctx->priv;
+     mfxVPPCompInputStream *st = &vpp->comp_conf.InputStream[0];
+ 
+     av_log(ctx, AV_LOG_DEBUG, "Input[%d] is of %s.\n", FF_INLINK_IDX(inlink),
+            av_get_pix_fmt_name(inlink->format));
+ 
+     vpp->var_values[VAR_MAIN_iW] =
+     vpp->var_values[VAR_MW]      = inlink->w;
+     vpp->var_values[VAR_MAIN_iH] =
+     vpp->var_values[VAR_MH]      = inlink->h;
+ 
+     st->DstX              = 0;
+     st->DstY              = 0;
+     st->DstW              = inlink->w;
+     st->DstH              = inlink->h;
+     st->GlobalAlphaEnable = 0;
+     st->PixelAlphaEnable  = 0;
+ 
+     return 0;
+ }
+ 
+ static int config_overlay_input(AVFilterLink *inlink)
+ {
+     AVFilterContext       *ctx = inlink->dst;
+     QSVOverlayContext     *vpp = ctx->priv;
+     mfxVPPCompInputStream *st  = &vpp->comp_conf.InputStream[1];
+     int                    ret = 0;
+ 
+     av_log(ctx, AV_LOG_DEBUG, "Input[%d] is of %s.\n", FF_INLINK_IDX(inlink),
+            av_get_pix_fmt_name(inlink->format));
+ 
+     vpp->var_values[VAR_OVERLAY_iW] = inlink->w;
+     vpp->var_values[VAR_OVERLAY_iH] = inlink->h;
+ 
+     ret = eval_expr(ctx);
+     if (ret < 0)
+         return ret;
+ 
+     st->DstX              = vpp->var_values[VAR_OX];
+     st->DstY              = vpp->var_values[VAR_OY];
+     st->DstW              = vpp->var_values[VAR_OW];
+     st->DstH              = vpp->var_values[VAR_OH];
+     st->GlobalAlpha       = vpp->overlay_alpha;
+     st->GlobalAlphaEnable = (st->GlobalAlpha < 255);
+     st->PixelAlphaEnable  = have_alpha_planar(inlink);
+ 
+     return 0;
+ }
+ 
+ static int config_output(AVFilterLink *outlink)
+ {
+     AVFilterContext   *ctx = outlink->src;
+     QSVOverlayContext *vpp = ctx->priv;
+     AVFilterLink      *in0 = ctx->inputs[0];
+     AVFilterLink      *in1 = ctx->inputs[1];
+ 
+     av_log(ctx, AV_LOG_DEBUG, "Output is of %s.\n", av_get_pix_fmt_name(outlink->format));
+     if ((in0->format == AV_PIX_FMT_QSV && in1->format != AV_PIX_FMT_QSV) ||
+         (in0->format != AV_PIX_FMT_QSV && in1->format == AV_PIX_FMT_QSV)) {
+         av_log(ctx, AV_LOG_ERROR, "Mixing hardware and software pixel formats is not supported.\n");
+         return AVERROR(EINVAL);
+     } else if (in0->format == AV_PIX_FMT_QSV) {
+         AVHWFramesContext *hw_frame0 = (AVHWFramesContext *)in0->hw_frames_ctx->data;
+         AVHWFramesContext *hw_frame1 = (AVHWFramesContext *)in1->hw_frames_ctx->data;
+ 
+         if (hw_frame0->device_ctx != hw_frame1->device_ctx) {
+             av_log(ctx, AV_LOG_ERROR, "Inputs with different underlying QSV devices are forbidden.\n");
+             return AVERROR(EINVAL);
+         }
+     }
+ 
+     outlink->w          = vpp->var_values[VAR_MW];
+     outlink->h          = vpp->var_values[VAR_MH];
+     outlink->frame_rate = in0->frame_rate;
+     outlink->time_base  = av_inv_q(outlink->frame_rate);
+ 
+     return ff_qsvvpp_create(ctx, &vpp->qsv, &vpp->qsv_param);
+ }
+ 
+ static int blend_frame(AVFilterContext *ctx, AVFrame *mpic, AVFrame *opic)
+ {
+     int                ret = 0;
+     QSVOverlayContext *vpp = ctx->priv;
+     AVFrame     *opic_copy = NULL;
+ 
+     ret = ff_qsvvpp_filter_frame(vpp->qsv, ctx->inputs[0], mpic);
+     if (ret == 0 || ret == AVERROR(EAGAIN)) {
+         /* Reference the overlay frame. Because:
+          * 1. ff_qsvvpp_filter_frame will take control of the given frame
+          * 2. We need to repeat the overlay frame when 2nd input goes into EOF
+          */
+         opic_copy = av_frame_clone(opic);
+         if (!opic_copy)
+             return AVERROR(ENOMEM);
+ 
+         ret = ff_qsvvpp_filter_frame(vpp->qsv, ctx->inputs[1], opic_copy);
+     }
+ 
+     return ret;
+ }
+ 
+ static int handle_overlay_eof(AVFilterContext *ctx)
+ {
+     int              ret = 0;
+     QSVOverlayContext *s = ctx->priv;
+     /* Repeat previous frame on secondary input */
+     if (s->over_prev && s->eof_action == EOF_ACTION_REPEAT)
+         ret = blend_frame(ctx, s->main, s->over_prev);
+     /* End both streams */
+     else if (s->eof_action == EOF_ACTION_ENDALL)
+         return AVERROR_EOF;
+ 
+     s->main = NULL;
+ 
+     return ret;
+ }
+ 
+ static int request_frame(AVFilterLink *outlink)
+ {
+     AVFilterContext *ctx = outlink->src;
+     QSVOverlayContext *s = ctx->priv;
+     AVRational   tb_main = ctx->inputs[MAIN]->time_base;
+     AVRational   tb_over = ctx->inputs[OVERLAY]->time_base;
+     int              ret = 0;
+ 
+     /* get a frame on the main input */
+     if (!s->main) {
+         ret = ff_request_frame(ctx->inputs[MAIN]);
+         if (ret < 0)
+             return ret;
+     }
+ 
+     /* get a new frame on the overlay input, on EOF check setting 'eof_action' */
+     if (!s->over_next) {
+         ret = ff_request_frame(ctx->inputs[OVERLAY]);
+         if (ret == AVERROR_EOF)
+             return handle_overlay_eof(ctx);
+         else if (ret < 0)
+             return ret;
+     }
+ 
+     while (s->main->pts != AV_NOPTS_VALUE &&
+            s->over_next->pts != AV_NOPTS_VALUE &&
+            av_compare_ts(s->over_next->pts, tb_over, s->main->pts, tb_main) < 0) {
+         av_frame_free(&s->over_prev);
+         FFSWAP(AVFrame*, s->over_prev, s->over_next);
+ 
+         ret = ff_request_frame(ctx->inputs[OVERLAY]);
+         if (ret == AVERROR_EOF)
+             return handle_overlay_eof(ctx);
+         else if (ret < 0)
+             return ret;
+     }
+ 
+     if (s->main->pts == AV_NOPTS_VALUE ||
+         s->over_next->pts == AV_NOPTS_VALUE ||
+         !av_compare_ts(s->over_next->pts, tb_over, s->main->pts, tb_main)) {
+         ret = blend_frame(ctx, s->main, s->over_next);
+         av_frame_free(&s->over_prev);
+         FFSWAP(AVFrame*, s->over_prev, s->over_next);
+     } else if (s->over_prev) {
+         ret = blend_frame(ctx, s->main, s->over_prev);
+     } else {
+         av_frame_free(&s->main);
+         ret = AVERROR(EAGAIN);
+     }
+ 
+     s->main = NULL;
+ 
+     return ret;
+ }
+ 
+ static int filter_frame_main(AVFilterLink *inlink, AVFrame *frame)
+ {
+     QSVOverlayContext *s = inlink->dst->priv;
+ 
+     av_assert0(!s->main);
+     s->main = frame;
+ 
+     return 0;
+ }
+ 
+ static int filter_frame_overlay(AVFilterLink *inlink, AVFrame *frame)
+ {
+     QSVOverlayContext *s = inlink->dst->priv;
+ 
+     av_assert0(!s->over_next);
+     s->over_next = frame;
+ 
+     return 0;
+ }
+ 
+ static int overlay_qsv_init(AVFilterContext *ctx)
+ {
+     QSVOverlayContext *vpp = ctx->priv;
+ 
+     /* fill composite config */
+     vpp->comp_conf.Header.BufferId = MFX_EXTBUFF_VPP_COMPOSITE;
+     vpp->comp_conf.Header.BufferSz = sizeof(vpp->comp_conf);
+     vpp->comp_conf.NumInputStream  = ctx->nb_inputs;
+     vpp->comp_conf.InputStream     = av_mallocz_array(ctx->nb_inputs,
+                                                       sizeof(*vpp->comp_conf.InputStream));
+     if (!vpp->comp_conf.InputStream)
+         return AVERROR(ENOMEM);
+ 
+     /* initialize QSVVPP params */
+     vpp->qsv_param.filter_frame = NULL;
+     vpp->qsv_param.ext_buf      = av_mallocz(sizeof(*vpp->qsv_param.ext_buf));
+     if (!vpp->qsv_param.ext_buf)
+         return AVERROR(ENOMEM);
+ 
+     vpp->qsv_param.ext_buf[0]    = (mfxExtBuffer *)&vpp->comp_conf;
+     vpp->qsv_param.num_ext_buf   = 1;
+     vpp->qsv_param.out_sw_format = AV_PIX_FMT_NV12;
+     vpp->qsv_param.num_crop      = 0;
+ 
+     return 0;
+ }
+ 
+ static void overlay_qsv_uninit(AVFilterContext *ctx)
+ {
+     QSVOverlayContext *vpp = ctx->priv;
+ 
+     av_frame_free(&vpp->main);
+     av_frame_free(&vpp->over_prev);
+     av_frame_free(&vpp->over_next);
+     ff_qsvvpp_free(&vpp->qsv);
+     av_freep(&vpp->comp_conf.InputStream);
+     av_freep(&vpp->qsv_param.ext_buf);
+ }
+ 
+ static int overlay_qsv_query_formats(AVFilterContext *ctx)
+ {
+     int i;
+ 
+     static const enum AVPixelFormat main_in_fmts[] = {
+         AV_PIX_FMT_YUV420P,
+         AV_PIX_FMT_NV12,
+         AV_PIX_FMT_YUYV422,
+         AV_PIX_FMT_RGB32,
+         AV_PIX_FMT_QSV,
+         AV_PIX_FMT_NONE
+     };
+     static const enum AVPixelFormat out_pix_fmts[] = {
+         AV_PIX_FMT_NV12,
+         AV_PIX_FMT_QSV,
+         AV_PIX_FMT_NONE
+     };
+ 
+     for (i = 0; i < ctx->nb_inputs; i++)
+         ff_formats_ref(ff_make_format_list(main_in_fmts), &ctx->inputs[i]->out_formats);
+ 
+     ff_formats_ref(ff_make_format_list(out_pix_fmts), &ctx->outputs[0]->in_formats);
+ 
+     return 0;
+ }
+ 
+ static const AVClass overlay_qsv_class = {
+     .class_name = "overlay_qsv",
+     .item_name  = av_default_item_name,
+     .option     = options,
+     .version    = LIBAVUTIL_VERSION_INT,
+ };
+ 
+ static const AVFilterPad overlay_qsv_inputs[] = {
+     {
+         .name          = "main",
+         .type          = AVMEDIA_TYPE_VIDEO,
+         .filter_frame  = filter_frame_main,
+         .config_props  = config_main_input,
+         .needs_fifo    = 1,
+     },
+     {
+         .name          = "overlay",
+         .type          = AVMEDIA_TYPE_VIDEO,
+         .filter_frame  = filter_frame_overlay,
+         .config_props  = config_overlay_input,
+         .needs_fifo    = 1,
+     },
+     { NULL }
+ };
+ 
+ static const AVFilterPad overlay_qsv_outputs[] = {
+     {
+         .name          = "default",
+         .type          = AVMEDIA_TYPE_VIDEO,
+         .config_props  = config_output,
+         .request_frame = request_frame,
+     },
+     { NULL }
+ };
+ 
+ AVFilter ff_vf_overlay_qsv = {
+     .name           = "overlay_qsv",
+     .description    = NULL_IF_CONFIG_SMALL("Quick Sync Video overlay."),
+     .priv_size      = sizeof(QSVOverlayContext),
+     .query_formats  = overlay_qsv_query_formats,
+     .init           = overlay_qsv_init,
+     .uninit         = overlay_qsv_uninit,
+     .inputs         = overlay_qsv_inputs,
+     .outputs        = overlay_qsv_outputs,
+     .priv_class     = &overlay_qsv_class,
+     .flags_internal = FF_FILTER_FLAG_HWFRAME_AWARE,
+ };
diff --cc libavfilter/vf_vpp_qsv.c
index 0000000000,610e821c1a..86af017d2e
mode 000000,100644..100644
--- a/libavfilter/vf_vpp_qsv.c
+++ b/libavfilter/vf_vpp_qsv.c
@@@ -1,0 -1,401 +1,401 @@@
+ /*
 - * This file is part of Libav.
++ * This file is part of FFmpeg.
+  *
 - * Libav is free software; you can redistribute it and/or
++ * FFmpeg is free software; you can redistribute it and/or
+  * modify it under the terms of the GNU Lesser General Public
+  * License as published by the Free Software Foundation; either
+  * version 2.1 of the License, or (at your option) any later version.
+  *
 - * Libav is distributed in the hope that it will be useful,
++ * FFmpeg is distributed in the hope that it will be useful,
+  * but WITHOUT ANY WARRANTY; without even the implied warranty of
+  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  * Lesser General Public License for more details.
+  *
+  * You should have received a copy of the GNU Lesser General Public
 - * License along with Libav; if not, write to the Free Software
++ * License along with FFmpeg; if not, write to the Free Software
+  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+  */
+ 
+ /**
+  ** @file
+  ** Hardware accelerated common filters based on Intel Quick Sync Video VPP
+  **/
+ 
+ #include <float.h>
+ 
+ #include "libavutil/opt.h"
+ #include "libavutil/eval.h"
+ #include "libavutil/avassert.h"
+ #include "libavutil/pixdesc.h"
+ 
+ #include "formats.h"
+ #include "internal.h"
+ #include "avfilter.h"
+ #include "libavcodec/avcodec.h"
+ #include "libavformat/avformat.h"
+ 
+ #include "qsvvpp.h"
+ 
+ #define OFFSET(x) offsetof(VPPContext, x)
+ #define FLAGS AV_OPT_FLAG_VIDEO_PARAM
+ 
+ /* number of video enhancement filters */
+ #define ENH_FILTERS_COUNT (5)
+ 
+ typedef struct VPPContext{
+     const AVClass *class;
+ 
+     QSVVPPContext *qsv;
+ 
+     /* Video Enhancement Algorithms */
+     mfxExtVPPDeinterlacing  deinterlace_conf;
+     mfxExtVPPFrameRateConversion frc_conf;
+     mfxExtVPPDenoise denoise_conf;
+     mfxExtVPPDetail detail_conf;
+     mfxExtVPPProcAmp procamp_conf;
+ 
+     int out_width;
+     int out_height;
+ 
+     AVRational framerate;       /* target framerate */
+     int use_frc;                /* use framerate conversion */
+     int deinterlace;            /* deinterlace mode : 0=off, 1=bob, 2=advanced */
+     int denoise;                /* Enable Denoise algorithm. Value [0, 100] */
+     int detail;                 /* Enable Detail Enhancement algorithm. */
+                                 /* Level is the optional, value [0, 100] */
+     int use_crop;               /* 1 = use crop; 0=none */
+     int crop_w;
+     int crop_h;
+     int crop_x;
+     int crop_y;
+ 
+     /* param for the procamp */
+     int    procamp;            /* enable procamp */
+     float  hue;
+     float  saturation;
+     float  contrast;
+     float  brightness;
+ 
+     char *cx, *cy, *cw, *ch;
+     char *ow, *oh;
+ } VPPContext;
+ 
+ static const AVOption options[] = {
+     { "deinterlace", "deinterlace mode: 0=off, 1=bob, 2=advanced", OFFSET(deinterlace), AV_OPT_TYPE_INT,      { .i64 = 0 }, 0, MFX_DEINTERLACING_ADVANCED, .flags = FLAGS, "deinterlace" },
+     { "bob",         "Bob deinterlace mode.",                      0,                   AV_OPT_TYPE_CONST,    { .i64 = MFX_DEINTERLACING_BOB },            .flags = FLAGS, "deinterlace" },
+     { "advanced",    "Advanced deinterlace mode. ",                0,                   AV_OPT_TYPE_CONST,    { .i64 = MFX_DEINTERLACING_ADVANCED },       .flags = FLAGS, "deinterlace" },
+ 
+     { "denoise",     "denoise level [0, 100]",       OFFSET(denoise),     AV_OPT_TYPE_INT,      { .i64 = 0 }, 0, 100, .flags = FLAGS },
+     { "detail",      "enhancement level [0, 100]",   OFFSET(detail),      AV_OPT_TYPE_INT,      { .i64 = 0 }, 0, 100, .flags = FLAGS },
+     { "framerate",   "output framerate",             OFFSET(framerate),   AV_OPT_TYPE_RATIONAL, { .dbl = 0.0 },0, DBL_MAX, .flags = FLAGS },
+     { "procamp",     "Enable ProcAmp",               OFFSET(procamp),     AV_OPT_TYPE_INT,      { .i64 = 0 }, 0, 1, .flags = FLAGS},
+     { "hue",         "ProcAmp hue",                  OFFSET(hue),         AV_OPT_TYPE_FLOAT,    { .dbl = 0.0 }, -180.0, 180.0, .flags = FLAGS},
+     { "saturation",  "ProcAmp saturation",           OFFSET(saturation),  AV_OPT_TYPE_FLOAT,    { .dbl = 1.0 }, 0.0, 10.0, .flags = FLAGS},
+     { "contrast",    "ProcAmp contrast",             OFFSET(contrast),    AV_OPT_TYPE_FLOAT,    { .dbl = 1.0 }, 0.0, 10.0, .flags = FLAGS},
+     { "brightness",  "ProcAmp brightness",           OFFSET(brightness),  AV_OPT_TYPE_FLOAT,    { .dbl = 0.0 }, -100.0, 100.0, .flags = FLAGS},
+ 
+     { "cw",   "set the width crop area expression",   OFFSET(cw), AV_OPT_TYPE_STRING, { .str = "iw" }, CHAR_MIN, CHAR_MAX, FLAGS },
+     { "ch",   "set the height crop area expression",  OFFSET(ch), AV_OPT_TYPE_STRING, { .str = "ih" }, CHAR_MIN, CHAR_MAX, FLAGS },
+     { "cx",   "set the x crop area expression",       OFFSET(cx), AV_OPT_TYPE_STRING, { .str = "(in_w-out_w)/2" }, CHAR_MIN, CHAR_MAX, FLAGS },
+     { "cy",   "set the y crop area expression",       OFFSET(cy), AV_OPT_TYPE_STRING, { .str = "(in_h-out_h)/2" }, CHAR_MIN, CHAR_MAX, FLAGS },
+ 
+     { "w",      "Output video width",  OFFSET(ow), AV_OPT_TYPE_STRING, { .str="cw" }, 0, 255, .flags = FLAGS },
+     { "width",  "Output video width",  OFFSET(ow), AV_OPT_TYPE_STRING, { .str="cw" }, 0, 255, .flags = FLAGS },
+     { "h",      "Output video height", OFFSET(oh), AV_OPT_TYPE_STRING, { .str="w*ch/cw" }, 0, 255, .flags = FLAGS },
+     { "height", "Output video height", OFFSET(oh), AV_OPT_TYPE_STRING, { .str="w*ch/cw" }, 0, 255, .flags = FLAGS },
+     { NULL }
+ };
+ 
+ static const char *const var_names[] = {
+     "iw", "in_w",
+     "ih", "in_h",
+     "ow", "out_w", "w",
+     "oh", "out_h", "h",
+     "cw",
+     "ch",
+     "cx",
+     "cy",
+     NULL
+ };
+ 
+ enum var_name {
+     VAR_iW, VAR_IN_W,
+     VAR_iH, VAR_IN_H,
+     VAR_oW, VAR_OUT_W, VAR_W,
+     VAR_oH, VAR_OUT_H, VAR_H,
+     CW,
+     CH,
+     CX,
+     CY,
+     VAR_VARS_NB
+ };
+ 
+ static int eval_expr(AVFilterContext *ctx)
+ {
+ #define PASS_EXPR(e, s) {\
+     ret = av_expr_parse(&e, s, var_names, NULL, NULL, NULL, NULL, 0, ctx); \
+     if (ret < 0) {\
+         av_log(ctx, AV_LOG_ERROR, "Error when passing '%s'.\n", s);\
+         goto release;\
+     }\
+ }
+ #define CALC_EXPR(e, v, i) {\
+     i = v = av_expr_eval(e, var_values, NULL); \
+ }
+     VPPContext *vpp = ctx->priv;
+     double  var_values[VAR_VARS_NB] = { NAN };
+     AVExpr *w_expr  = NULL, *h_expr  = NULL;
+     AVExpr *cw_expr = NULL, *ch_expr = NULL;
+     AVExpr *cx_expr = NULL, *cy_expr = NULL;
+     int     ret = 0;
+ 
+     PASS_EXPR(cw_expr, vpp->cw);
+     PASS_EXPR(ch_expr, vpp->ch);
+ 
+     PASS_EXPR(w_expr, vpp->ow);
+     PASS_EXPR(h_expr, vpp->oh);
+ 
+     PASS_EXPR(cx_expr, vpp->cx);
+     PASS_EXPR(cy_expr, vpp->cy);
+ 
+     var_values[VAR_iW] =
+     var_values[VAR_IN_W] = ctx->inputs[0]->w;
+ 
+     var_values[VAR_iH] =
+     var_values[VAR_IN_H] = ctx->inputs[0]->h;
+ 
+     /* crop params */
+     CALC_EXPR(cw_expr, var_values[CW], vpp->crop_w);
+     CALC_EXPR(ch_expr, var_values[CH], vpp->crop_h);
+ 
+     /* calc again in case cw is relative to ch */
+     CALC_EXPR(cw_expr, var_values[CW], vpp->crop_w);
+ 
+     CALC_EXPR(w_expr,
+             var_values[VAR_OUT_W] = var_values[VAR_oW] = var_values[VAR_W],
+             vpp->out_width);
+     CALC_EXPR(h_expr,
+             var_values[VAR_OUT_H] = var_values[VAR_oH] = var_values[VAR_H],
+             vpp->out_height);
+ 
+     /* calc again in case ow is relative to oh */
+     CALC_EXPR(w_expr,
+             var_values[VAR_OUT_W] = var_values[VAR_oW] = var_values[VAR_W],
+             vpp->out_width);
+ 
+ 
+     CALC_EXPR(cx_expr, var_values[CX], vpp->crop_x);
+     CALC_EXPR(cy_expr, var_values[CY], vpp->crop_y);
+ 
+     /* calc again in case cx is relative to cy */
+     CALC_EXPR(cx_expr, var_values[CX], vpp->crop_x);
+ 
+     if ((vpp->crop_w != var_values[VAR_iW]) || (vpp->crop_h != var_values[VAR_iH]))
+         vpp->use_crop = 1;
+ 
+ release:
+     av_expr_free(w_expr);
+     av_expr_free(h_expr);
+     av_expr_free(cw_expr);
+     av_expr_free(ch_expr);
+     av_expr_free(cx_expr);
+     av_expr_free(cy_expr);
+ #undef PASS_EXPR
+ #undef CALC_EXPR
+ 
+     return ret;
+ }
+ 
+ static int config_input(AVFilterLink *inlink)
+ {
+     AVFilterContext *ctx = inlink->dst;
+     VPPContext      *vpp = ctx->priv;
+     int              ret;
+ 
+     if (vpp->framerate.den == 0 || vpp->framerate.num == 0)
+         vpp->framerate = inlink->frame_rate;
+ 
+     if (av_cmp_q(vpp->framerate, inlink->frame_rate))
+         vpp->use_frc = 1;
+ 
+     ret = eval_expr(ctx);
+     if (ret != 0) {
+         av_log(ctx, AV_LOG_ERROR, "Fail to eval expr.\n");
+         return ret;
+     }
+ 
+     if (vpp->out_height == 0 || vpp->out_width == 0) {
+         vpp->out_width  = inlink->w;
+         vpp->out_height = inlink->h;
+     }
+ 
+     if (vpp->use_crop) {
+         vpp->crop_x = FFMAX(vpp->crop_x, 0);
+         vpp->crop_y = FFMAX(vpp->crop_y, 0);
+ 
+         if(vpp->crop_w + vpp->crop_x > inlink->w)
+            vpp->crop_x = inlink->w - vpp->crop_w;
+         if(vpp->crop_h + vpp->crop_y > inlink->h)
+            vpp->crop_y = inlink->h - vpp->crop_h;
+     }
+ 
+     return 0;
+ }
+ 
+ static int config_output(AVFilterLink *outlink)
+ {
+     AVFilterContext *ctx = outlink->src;
+     VPPContext      *vpp = ctx->priv;
+     QSVVPPParam     param = { NULL };
+     QSVVPPCrop      crop  = { 0 };
+     mfxExtBuffer    *ext_buf[ENH_FILTERS_COUNT];
+ 
+     outlink->w          = vpp->out_width;
+     outlink->h          = vpp->out_height;
+     outlink->frame_rate = vpp->framerate;
+     outlink->time_base  = av_inv_q(vpp->framerate);
+ 
+     param.filter_frame  = NULL;
+     param.out_sw_format = AV_PIX_FMT_NV12;
+     param.num_ext_buf   = 0;
+     param.ext_buf       = ext_buf;
+ 
+     if (vpp->use_crop) {
+         crop.in_idx = 0;
+         crop.x = vpp->crop_x;
+         crop.y = vpp->crop_y;
+         crop.w = vpp->crop_w;
+         crop.h = vpp->crop_h;
+ 
+         param.num_crop = 1;
+         param.crop     = &crop;
+     }
+ 
+     if (vpp->deinterlace) {
+         memset(&vpp->deinterlace_conf, 0, sizeof(mfxExtVPPDeinterlacing));
+         vpp->deinterlace_conf.Header.BufferId = MFX_EXTBUFF_VPP_DEINTERLACING;
+         vpp->deinterlace_conf.Header.BufferSz = sizeof(mfxExtVPPDeinterlacing);
+         vpp->deinterlace_conf.Mode = vpp->deinterlace == 1 ?
+                                      MFX_DEINTERLACING_BOB : MFX_DEINTERLACING_ADVANCED;
+ 
+         param.ext_buf[param.num_ext_buf++] = (mfxExtBuffer*)&vpp->deinterlace_conf;
+     }
+ 
+     if (vpp->use_frc) {
+         memset(&vpp->frc_conf, 0, sizeof(mfxExtVPPFrameRateConversion));
+         vpp->frc_conf.Header.BufferId = MFX_EXTBUFF_VPP_FRAME_RATE_CONVERSION;
+         vpp->frc_conf.Header.BufferSz = sizeof(mfxExtVPPFrameRateConversion);
+         vpp->frc_conf.Algorithm = MFX_FRCALGM_DISTRIBUTED_TIMESTAMP;
+ 
+         param.ext_buf[param.num_ext_buf++] = (mfxExtBuffer*)&vpp->frc_conf;
+     }
+ 
+     if (vpp->denoise) {
+         memset(&vpp->denoise_conf, 0, sizeof(mfxExtVPPDenoise));
+         vpp->denoise_conf.Header.BufferId = MFX_EXTBUFF_VPP_DENOISE;
+         vpp->denoise_conf.Header.BufferSz = sizeof(mfxExtVPPDenoise);
+         vpp->denoise_conf.DenoiseFactor   = vpp->denoise;
+ 
+         param.ext_buf[param.num_ext_buf++] = (mfxExtBuffer*)&vpp->denoise_conf;
+     }
+ 
+     if (vpp->detail) {
+         memset(&vpp->detail_conf, 0, sizeof(mfxExtVPPDetail));
+         vpp->detail_conf.Header.BufferId  = MFX_EXTBUFF_VPP_DETAIL;
+         vpp->detail_conf.Header.BufferSz  = sizeof(mfxExtVPPDetail);
+         vpp->detail_conf.DetailFactor = vpp->detail;
+ 
+         param.ext_buf[param.num_ext_buf++] = (mfxExtBuffer*)&vpp->detail_conf;
+     }
+ 
+     if (vpp->procamp) {
+         memset(&vpp->procamp_conf, 0, sizeof(mfxExtVPPProcAmp));
+         vpp->procamp_conf.Header.BufferId  = MFX_EXTBUFF_VPP_PROCAMP;
+         vpp->procamp_conf.Header.BufferSz  = sizeof(mfxExtVPPProcAmp);
+         vpp->procamp_conf.Hue              = vpp->hue;
+         vpp->procamp_conf.Saturation       = vpp->saturation;
+         vpp->procamp_conf.Contrast         = vpp->contrast;
+         vpp->procamp_conf.Brightness       = vpp->brightness;
+ 
+         param.ext_buf[param.num_ext_buf++] = (mfxExtBuffer*)&vpp->procamp_conf;
+     }
+ 
+     return ff_qsvvpp_create(ctx, &vpp->qsv, &param);
+ }
+ 
+ static int filter_frame(AVFilterLink *inlink, AVFrame *picref)
+ {
+     VPPContext *vpp = inlink->dst->priv;
+ 
+     return ff_qsvvpp_filter_frame(vpp->qsv, inlink, picref);
+ }
+ 
+ static int query_formats(AVFilterContext *ctx)
+ {
+     AVFilterFormats *in_fmts, *out_fmts;
+     static const enum AVPixelFormat in_pix_fmts[] = {
+         AV_PIX_FMT_YUV420P,
+         AV_PIX_FMT_NV12,
+         AV_PIX_FMT_YUYV422,
+         AV_PIX_FMT_RGB32,
+         AV_PIX_FMT_QSV,
+         AV_PIX_FMT_NONE
+     };
+     static const enum AVPixelFormat out_pix_fmts[] = {
+         AV_PIX_FMT_NV12,
+         AV_PIX_FMT_QSV,
+         AV_PIX_FMT_NONE
+     };
+ 
+     in_fmts  = ff_make_format_list(in_pix_fmts);
+     out_fmts = ff_make_format_list(out_pix_fmts);
+     ff_formats_ref(in_fmts, &ctx->inputs[0]->out_formats);
+     ff_formats_ref(out_fmts, &ctx->outputs[0]->in_formats);
+ 
+     return 0;
+ }
+ 
+ static av_cold void vpp_uninit(AVFilterContext *ctx)
+ {
+     VPPContext *vpp = ctx->priv;
+ 
+     ff_qsvvpp_free(&vpp->qsv);
+ }
+ 
+ static const AVClass vpp_class = {
+     .class_name = "vpp_qsv",
+     .item_name  = av_default_item_name,
+     .option     = options,
+     .version    = LIBAVUTIL_VERSION_INT,
+ };
+ 
+ static const AVFilterPad vpp_inputs[] = {
+     {
+         .name          = "default",
+         .type          = AVMEDIA_TYPE_VIDEO,
+         .config_props  = config_input,
+         .filter_frame  = filter_frame,
+     },
+     { NULL }
+ };
+ 
+ static const AVFilterPad vpp_outputs[] = {
+     {
+         .name          = "default",
+         .type          = AVMEDIA_TYPE_VIDEO,
+         .config_props  = config_output,
+     },
+     { NULL }
+ };
+ 
+ AVFilter ff_vf_vpp_qsv = {
+     .name          = "vpp_qsv",
+     .description   = NULL_IF_CONFIG_SMALL("Quick Sync Video VPP."),
+     .priv_size     = sizeof(VPPContext),
+     .query_formats = query_formats,
+     .uninit        = vpp_uninit,
+     .inputs        = vpp_inputs,
+     .outputs       = vpp_outputs,
+     .priv_class    = &vpp_class,
+     .flags_internal = FF_FILTER_FLAG_HWFRAME_AWARE,
+ };



More information about the ffmpeg-cvslog mailing list