[FFmpeg-devel] [PATCH 3/5] avcodec/nvdec: Explicitly mark codecs that support 444 output formats
Timo Rothenpieler
timo at rothenpieler.org
Sun Oct 21 00:01:49 EEST 2018
On 20.10.2018 22:46, Philip Langdale wrote:
> With the introduction of HEVC 444 support, we technically have two
> codecs that can handle 444 - HEVC and MJPEG. In the case of MJPEG,
> it can decode, but can only output one of the semi-planar formats.
>
> That means we need additional logic to decide whether to use a
> 444 output format or not.
>
> Signed-off-by: Philip Langdale <philipl at overt.org>
> ---
> libavcodec/nvdec.c | 7 ++++---
> libavcodec/nvdec.h | 5 ++++-
> libavcodec/nvdec_h264.c | 2 +-
> libavcodec/nvdec_hevc.c | 10 ++++++++--
> libavcodec/nvdec_mjpeg.c | 2 +-
> libavcodec/nvdec_mpeg12.c | 2 +-
> libavcodec/nvdec_mpeg4.c | 2 +-
> libavcodec/nvdec_vc1.c | 2 +-
> libavcodec/nvdec_vp8.c | 2 +-
> libavcodec/nvdec_vp9.c | 2 +-
> 10 files changed, 23 insertions(+), 13 deletions(-)
>
> diff --git a/libavcodec/nvdec.c b/libavcodec/nvdec.c
> index 43cc38485a..76e8b7c7bc 100644
> --- a/libavcodec/nvdec.c
> +++ b/libavcodec/nvdec.c
> @@ -295,7 +295,7 @@ int ff_nvdec_decode_init(AVCodecContext *avctx)
> av_log(avctx, AV_LOG_ERROR, "Unsupported chroma format\n");
> return AVERROR(ENOSYS);
> }
> - chroma_444 = cuvid_chroma_format == cudaVideoChromaFormat_444;
> + chroma_444 = ctx->supports_444 && cuvid_chroma_format == cudaVideoChromaFormat_444;
>
> if (!avctx->hw_frames_ctx) {
> ret = ff_decode_get_hw_frames_ctx(avctx, AV_HWDEVICE_TYPE_CUDA);
> @@ -595,7 +595,8 @@ static AVBufferRef *nvdec_alloc_dummy(int size)
>
> int ff_nvdec_frame_params(AVCodecContext *avctx,
> AVBufferRef *hw_frames_ctx,
> - int dpb_size)
> + int dpb_size,
> + int supports_444)
> {
> AVHWFramesContext *frames_ctx = (AVHWFramesContext*)hw_frames_ctx->data;
> const AVPixFmtDescriptor *sw_desc;
> @@ -616,7 +617,7 @@ int ff_nvdec_frame_params(AVCodecContext *avctx,
> av_log(avctx, AV_LOG_VERBOSE, "Unsupported chroma format\n");
> return AVERROR(EINVAL);
> }
> - chroma_444 = cuvid_chroma_format == cudaVideoChromaFormat_444;
> + chroma_444 = supports_444 && cuvid_chroma_format == cudaVideoChromaFormat_444;
>
> frames_ctx->format = AV_PIX_FMT_CUDA;
> frames_ctx->width = (avctx->coded_width + 1) & ~1;
> diff --git a/libavcodec/nvdec.h b/libavcodec/nvdec.h
> index 85a0fcf725..09ae8c37e6 100644
> --- a/libavcodec/nvdec.h
> +++ b/libavcodec/nvdec.h
> @@ -61,6 +61,8 @@ typedef struct NVDECContext {
> unsigned *slice_offsets;
> int nb_slices;
> unsigned int slice_offsets_allocated;
> +
> + int supports_444;
> } NVDECContext;
>
> int ff_nvdec_decode_init(AVCodecContext *avctx);
> @@ -72,7 +74,8 @@ int ff_nvdec_simple_decode_slice(AVCodecContext *avctx, const uint8_t *buffer,
> uint32_t size);
> int ff_nvdec_frame_params(AVCodecContext *avctx,
> AVBufferRef *hw_frames_ctx,
> - int dpb_size);
> + int dpb_size,
> + int supports_444);
> int ff_nvdec_get_ref_idx(AVFrame *frame);
>
> #endif /* AVCODEC_NVDEC_H */
> diff --git a/libavcodec/nvdec_h264.c b/libavcodec/nvdec_h264.c
> index 25b30329d0..116bd4fb5d 100644
> --- a/libavcodec/nvdec_h264.c
> +++ b/libavcodec/nvdec_h264.c
> @@ -166,7 +166,7 @@ static int nvdec_h264_frame_params(AVCodecContext *avctx,
> {
> const H264Context *h = avctx->priv_data;
> const SPS *sps = h->ps.sps;
> - return ff_nvdec_frame_params(avctx, hw_frames_ctx, sps->ref_frame_count + sps->num_reorder_frames);
> + return ff_nvdec_frame_params(avctx, hw_frames_ctx, sps->ref_frame_count + sps->num_reorder_frames, 0);
> }
>
> const AVHWAccel ff_h264_nvdec_hwaccel = {
> diff --git a/libavcodec/nvdec_hevc.c b/libavcodec/nvdec_hevc.c
> index e04a701f3a..9e726f708e 100644
> --- a/libavcodec/nvdec_hevc.c
> +++ b/libavcodec/nvdec_hevc.c
> @@ -269,7 +269,13 @@ static int nvdec_hevc_frame_params(AVCodecContext *avctx,
> {
> const HEVCContext *s = avctx->priv_data;
> const HEVCSPS *sps = s->ps.sps;
> - return ff_nvdec_frame_params(avctx, hw_frames_ctx, sps->temporal_layer[sps->max_sub_layers - 1].max_dec_pic_buffering + 1);
> + return ff_nvdec_frame_params(avctx, hw_frames_ctx, sps->temporal_layer[sps->max_sub_layers - 1].max_dec_pic_buffering + 1, 1);
> +}
> +
> +static int nvdec_hevc_decode_init(AVCodecContext *avctx) {
> + NVDECContext *ctx = avctx->internal->hwaccel_priv_data;
> + ctx->supports_444 = 1;
> + return ff_nvdec_decode_init(avctx);
> }
>
> const AVHWAccel ff_hevc_nvdec_hwaccel = {
> @@ -281,7 +287,7 @@ const AVHWAccel ff_hevc_nvdec_hwaccel = {
> .end_frame = ff_nvdec_end_frame,
> .decode_slice = nvdec_hevc_decode_slice,
> .frame_params = nvdec_hevc_frame_params,
> - .init = ff_nvdec_decode_init,
> + .init = nvdec_hevc_decode_init,
> .uninit = ff_nvdec_decode_uninit,
> .priv_data_size = sizeof(NVDECContext),
> };
> diff --git a/libavcodec/nvdec_mjpeg.c b/libavcodec/nvdec_mjpeg.c
> index 7e404246ce..be39d23bae 100644
> --- a/libavcodec/nvdec_mjpeg.c
> +++ b/libavcodec/nvdec_mjpeg.c
> @@ -66,7 +66,7 @@ static int nvdec_mjpeg_frame_params(AVCodecContext *avctx,
> AVBufferRef *hw_frames_ctx)
> {
> // Only need storage for the current frame
> - return ff_nvdec_frame_params(avctx, hw_frames_ctx, 1);
> + return ff_nvdec_frame_params(avctx, hw_frames_ctx, 1, 0);
> }
>
> #if CONFIG_MJPEG_NVDEC_HWACCEL
> diff --git a/libavcodec/nvdec_mpeg12.c b/libavcodec/nvdec_mpeg12.c
> index 7293d50555..300e1d3d88 100644
> --- a/libavcodec/nvdec_mpeg12.c
> +++ b/libavcodec/nvdec_mpeg12.c
> @@ -87,7 +87,7 @@ static int nvdec_mpeg12_frame_params(AVCodecContext *avctx,
> AVBufferRef *hw_frames_ctx)
> {
> // Each frame can at most have one P and one B reference
> - return ff_nvdec_frame_params(avctx, hw_frames_ctx, 2);
> + return ff_nvdec_frame_params(avctx, hw_frames_ctx, 2, 0);
> }
>
> #if CONFIG_MPEG2_NVDEC_HWACCEL
> diff --git a/libavcodec/nvdec_mpeg4.c b/libavcodec/nvdec_mpeg4.c
> index 907af1391a..739b049933 100644
> --- a/libavcodec/nvdec_mpeg4.c
> +++ b/libavcodec/nvdec_mpeg4.c
> @@ -103,7 +103,7 @@ static int nvdec_mpeg4_frame_params(AVCodecContext *avctx,
> AVBufferRef *hw_frames_ctx)
> {
> // Each frame can at most have one P and one B reference
> - return ff_nvdec_frame_params(avctx, hw_frames_ctx, 2);
> + return ff_nvdec_frame_params(avctx, hw_frames_ctx, 2, 0);
> }
>
> const AVHWAccel ff_mpeg4_nvdec_hwaccel = {
> diff --git a/libavcodec/nvdec_vc1.c b/libavcodec/nvdec_vc1.c
> index 7257692d66..10e7b5ab0d 100644
> --- a/libavcodec/nvdec_vc1.c
> +++ b/libavcodec/nvdec_vc1.c
> @@ -107,7 +107,7 @@ static int nvdec_vc1_frame_params(AVCodecContext *avctx,
> AVBufferRef *hw_frames_ctx)
> {
> // Each frame can at most have one P and one B reference
> - return ff_nvdec_frame_params(avctx, hw_frames_ctx, 2);
> + return ff_nvdec_frame_params(avctx, hw_frames_ctx, 2, 0);
> }
>
> const AVHWAccel ff_vc1_nvdec_hwaccel = {
> diff --git a/libavcodec/nvdec_vp8.c b/libavcodec/nvdec_vp8.c
> index 7b37445613..9c4608d8cf 100644
> --- a/libavcodec/nvdec_vp8.c
> +++ b/libavcodec/nvdec_vp8.c
> @@ -87,7 +87,7 @@ static int nvdec_vp8_frame_params(AVCodecContext *avctx,
> AVBufferRef *hw_frames_ctx)
> {
> // VP8 uses a fixed size pool of 3 possible reference frames
> - return ff_nvdec_frame_params(avctx, hw_frames_ctx, 3);
> + return ff_nvdec_frame_params(avctx, hw_frames_ctx, 3, 0);
> }
>
> AVHWAccel ff_vp8_nvdec_hwaccel = {
> diff --git a/libavcodec/nvdec_vp9.c b/libavcodec/nvdec_vp9.c
> index 3b665a9bc7..a76bcf9943 100644
> --- a/libavcodec/nvdec_vp9.c
> +++ b/libavcodec/nvdec_vp9.c
> @@ -166,7 +166,7 @@ static int nvdec_vp9_frame_params(AVCodecContext *avctx,
> AVBufferRef *hw_frames_ctx)
> {
> // VP9 uses a fixed size pool of 8 possible reference frames
> - return ff_nvdec_frame_params(avctx, hw_frames_ctx, 8);
> + return ff_nvdec_frame_params(avctx, hw_frames_ctx, 8, 0);
> }
>
> const AVHWAccel ff_vp9_nvdec_hwaccel = {
>
LGTM
-------------- next part --------------
A non-text attachment was scrubbed...
Name: smime.p7s
Type: application/pkcs7-signature
Size: 4538 bytes
Desc: S/MIME Cryptographic Signature
URL: <http://ffmpeg.org/pipermail/ffmpeg-devel/attachments/20181020/74da1afb/attachment.bin>
More information about the ffmpeg-devel
mailing list