[FFmpeg-devel] [PATCH] CrystalHD decoder support v6

Michael Niedermayer michaelni
Sun Mar 6 04:40:36 CET 2011


[...]
> +/*
> + * The CrystalHD doesn't report interlaced H.264 content in a way that allows
> + * us to distinguish between specific cases that require different handling.
> + * So, for now, we have to hard-code the behaviour we want.
> + *
> + * The default behaviour is to assume MBAFF with input and output fieldpairs.
> + *
> + * Define ASSUME_PAFF_OVER_MBAFF to treat input as PAFF with separate input
> + * and output fields.
> + *
> + * Define ASSUME_TWO_INPUTS_ONE_OUTPUT to treat input as separate fields but
> + * output as a single fieldpair.
> + *
> + * Define both to mess up your playback.
> + */
> +#define ASSUME_PAFF_OVER_MBAFF 0
> +#define ASSUME_TWO_INPUTS_ONE_OUTPUT 0
> +static inline CopyRet copy_frame(AVCodecContext *avctx,
> +                                 BC_DTS_PROC_OUT *output,
> +                                 void *data, int *data_size,
> +                                 uint8_t second_field)
> +{
> +    BC_STATUS ret;
> +    BC_DTS_STATUS decoder_status;
> +    uint8_t is_paff;
> +    uint8_t next_frame_same;
> +    uint8_t interlaced;
> +    uint8_t need_second_field;
> +
> +    CHDContext *priv = avctx->priv_data;
> +
> +    uint8_t bottom_field = (output->PicInfo.flags & VDEC_FLAG_BOTTOMFIELD) ==
> +                           VDEC_FLAG_BOTTOMFIELD;
> +    uint8_t bottom_first = !!(output->PicInfo.flags & VDEC_FLAG_BOTTOM_FIRST);
> +
> +    int width    = output->PicInfo.width;
> +    int height   = output->PicInfo.height;
> +    int bwidth;
> +    uint8_t *src = output->Ybuff;
> +    int sStride;
> +    uint8_t *dst;
> +    int dStride;
> +
> +    ret = DtsGetDriverStatus(priv->dev, &decoder_status);
> +    if (ret != BC_STS_SUCCESS) {
> +        av_log(avctx, AV_LOG_ERROR,
> +               "CrystalHD: GetDriverStatus failed: %u\n", ret);
> +       return RET_ERROR;
> +    }
> +
> +    is_paff           = ASSUME_PAFF_OVER_MBAFF ||
> +                        !(output->PicInfo.flags & VDEC_FLAG_UNKNOWN_SRC);
> +    next_frame_same   = output->PicInfo.picture_number ==
> +                        (decoder_status.picNumFlags & ~0x40000000);
> +    interlaced        = ((output->PicInfo.flags &
> +                          VDEC_FLAG_INTERLACED_SRC) && is_paff) ||
> +                         next_frame_same || bottom_field || second_field;


see libavcodec/h264_parser.c:
            if(h->sps.frame_mbs_only_flag){
                h->s.picture_structure= PICT_FRAME;
            }else{
                if(get_bits1(&h->s.gb)) { //field_pic_flag
                    h->s.picture_structure= PICT_TOP_FIELD + get_bits1(&h->s.gb); //bottom_field_flag
                } else {
                    h->s.picture_structure= PICT_FRAME;
                }
            }

above will tell you if its top/bottom field or frame
iam not completely sure how to cleanly hook that up though but its likely
better than hardcoding guesses.
mpeg2 field pictures can also be detected through the corresponding parser



> +    /*
> +     * XXX: Is the first field always the first one encountered?
> +     */

this question is not very well written "is the X always the X"


> +    need_second_field = interlaced && !bottom_field;
> +
> +    av_log(avctx, AV_LOG_VERBOSE, "CrystalHD: next_frame_same: %u | %u | %u\n",
> +           next_frame_same, output->PicInfo.picture_number,
> +           decoder_status.picNumFlags & ~0x40000000);
> +
> +    priv->pic.buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_PRESERVE |
> +                             FF_BUFFER_HINTS_REUSABLE;
> +    if(avctx->reget_buffer(avctx, &priv->pic) < 0) {
> +        av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
> +        return RET_ERROR;
> +    }

reget_buffer() doesnt feel correct here, why do you need it instead of
get_buffer()?

also might be able to directly return the frames from the decoder without
using get_buffer()


> +
> +    bwidth = av_image_get_linesize(avctx->pix_fmt, width, 0);
> +    if (priv->is_70012) {
> +        int pStride;
> +
> +        if (width <= 720)
> +            pStride = 720;
> +        else if (width <= 1280)
> +            pStride = 1280;
> +        else if (width <= 1080)
> +            pStride = 1080;
> +        sStride = av_image_get_linesize(avctx->pix_fmt, pStride, 0);
> +    } else {
> +        sStride = bwidth;
> +    }
> +
> +    dStride = priv->pic.linesize[0];
> +    dst     = priv->pic.data[0];
> +
> +    av_log(priv->avctx, AV_LOG_VERBOSE, "CrystalHD: Copying out frame\n");
> +
> +    if (interlaced) {
> +        int dY = 0;
> +        int sY = 0;
> +
> +        height /= 2;
> +        if (bottom_field) {
> +            av_log(priv->avctx, AV_LOG_VERBOSE, "Interlaced: bottom field\n");
> +            dY = 1;
> +        } else {
> +            av_log(priv->avctx, AV_LOG_VERBOSE, "Interlaced: top field\n");
> +            dY = 0;
> +        }

dY= bottom_field


> +
> +        for (sY = 0; sY < height; dY++, sY++) {
> +            memcpy(&(dst[dY * dStride]), &(src[sY * sStride]), bwidth);
> +            if (interlaced)
> +                dY++;

interlaced is alwys true here



> +        }
> +    } else {
> +        av_image_copy_plane(dst, dStride, src, sStride, bwidth, height);
> +    }




> +
> +    priv->pic.interlaced_frame = interlaced;
> +    if (interlaced)
> +        priv->pic.top_field_first = !bottom_first;
> +
> +    if (output->PicInfo.timeStamp != 0) {
> +        priv->pic.pkt_pts = opaque_list_pop(priv, output->PicInfo.timeStamp);
> +        av_log(avctx, AV_LOG_VERBOSE, "output \"pts\": %"PRIu64"\n",
> +               priv->pic.pkt_pts);
> +    }

make sure pkt_dts is set to AV_NOPTS_VALUE (see avcodec_decode_video2)
or try to set it correctly but thats harder


[...]
> +    /*
> +     * No frames ready. Don't try to extract.
> +     *
> +     * Empirical testing shows that ReadyListCount can be a damn lie,

maybe its not fields/frames but slices that are ready?
or maybe its something to do with reordering  that it doesnt match the
expectation
(this could be tested with streams with alot of slices and 1 slice = 1 frame,
 and streams with various reordering structures)


> +     * and ProcOut still fails when count > 0. The same testing showed
> +     * that two more iterations were needed before ProcOutput would
> +     * succeed.
> +     */
> +    if (priv->output_ready < 2) {
> +        if (decoder_status.ReadyListCount != 0)
> +            priv->output_ready++;
> +        usleep(BASE_WAIT);
> +        av_log(avctx, AV_LOG_INFO, "CrystalHD: Filling pipeline.\n");
> +        return len;
> +    } else if (decoder_status.ReadyListCount == 0) {
> +        /*
> +         * After the pipeline is established, if we encounter a lack of frames
> +         * that probably means we're not giving the hardware enough time to
> +         * decode them, so start increasing the wait time at the end of a
> +         * decode call.
> +         */
> +        usleep(BASE_WAIT);
> +        priv->decode_wait += WAIT_UNIT;
> +        av_log(avctx, AV_LOG_INFO, "CrystalHD: No frames ready. Returning\n");
> +        return len;
> +    }
> +
> +    do {
> +        rec_ret = receive_frame(avctx, data, data_size, 0);
> +        if (rec_ret == 0 && *data_size == 0) {
> +            if (avctx->codec->id == CODEC_ID_H264) {
> +                /*
> +                 * This case is for when the encoded fields are stored
> +                 * separately and we get a separate avpkt for each one. To keep
> +                 * the pipeline stable, we should return nothing and wait for
> +                 * the next time round to grab the second field.
> +                 * H.264 PAFF is an example of this.
> +                 */
> +                av_log(avctx, AV_LOG_VERBOSE, "Returning after first field.\n");
> +                avctx->has_b_frames--;
> +            } else {
> +                /*
> +                 * This case is for when the encoded fields are stored in a
> +                 * single avpkt but the hardware returns then separately. Unless
> +                 * we grab the second field before returning, we'll slip another
> +                 * frame in the pipeline and if that happens a lot, we're sunk.
> +                 * So we have to get that second field now.
> +                 * Interlaced mpeg2 and vc1 are examples of this.
> +                 */
> +                av_log(avctx, AV_LOG_VERBOSE, "Trying to get second field.\n");
> +                while (1) {
> +                    usleep(priv->decode_wait);
> +                    ret = DtsGetDriverStatus(dev, &decoder_status);
> +                    if (ret == BC_STS_SUCCESS &&
> +                        decoder_status.ReadyListCount > 0) {
> +                        rec_ret = receive_frame(avctx, data, data_size, 1);
> +                        if ((rec_ret == 0 && *data_size > 0) ||
> +                            rec_ret == RET_ERROR)
> +                            break;
> +                    }
> +                }

would it make sense to move all this to a seperate thread to avoid blocking
the outside vissible decoder with usleep() calls? or is the waiting
time negliible?

[...]
-- 
Michael     GnuPG fingerprint: 9FF2128B147EF6730BADF133611EC787040B0FAB

DNS cache poisoning attacks, popular search engine, Google internet authority
dont be evil, please
-------------- next part --------------
A non-text attachment was scrubbed...
Name: not available
Type: application/pgp-signature
Size: 198 bytes
Desc: Digital signature
URL: <http://lists.mplayerhq.hu/pipermail/ffmpeg-devel/attachments/20110306/fd922034/attachment.pgp>



More information about the ffmpeg-devel mailing list