Go to the documentation of this file.
70 #if FF_API_CONVERGENCE_DURATION
72 s->convergence_duration = 0;
75 s->dts_sync_point = INT_MIN;
76 s->dts_ref_dts_delta = INT_MIN;
77 s->pts_dts_delta = INT_MIN;
100 if (
s->cur_offset + off >=
s->cur_frame_offset[
i] &&
101 (
s->frame_offset <
s->cur_frame_offset[
i] ||
102 (!
s->frame_offset && !
s->next_frame_offset)) &&
104 s->cur_frame_end[
i]){
107 s->dts =
s->cur_frame_dts[
i];
108 s->pts =
s->cur_frame_pts[
i];
109 s->pos =
s->cur_frame_pos[
i];
110 s->offset =
s->next_frame_offset -
s->cur_frame_offset[
i];
113 s->cur_frame_offset[
i] = INT64_MAX;
114 if (
s->cur_offset + off < s->cur_frame_end[
i])
121 uint8_t **poutbuf,
int *poutbuf_size,
122 const uint8_t *buf,
int buf_size,
123 int64_t
pts, int64_t dts, int64_t
pos)
132 avctx->
codec_id ==
s->parser->codec_ids[1] ||
133 avctx->
codec_id ==
s->parser->codec_ids[2] ||
134 avctx->
codec_id ==
s->parser->codec_ids[3] ||
135 avctx->
codec_id ==
s->parser->codec_ids[4]);
138 s->next_frame_offset =
145 memset(dummy_buf, 0,
sizeof(dummy_buf));
147 }
else if (
s->cur_offset + buf_size !=
s->cur_frame_end[
s->cur_frame_start_index]) {
150 s->cur_frame_start_index =
i;
151 s->cur_frame_offset[
i] =
s->cur_offset;
152 s->cur_frame_end[
i] =
s->cur_offset + buf_size;
153 s->cur_frame_pts[
i] =
pts;
154 s->cur_frame_dts[
i] = dts;
155 s->cur_frame_pos[
i] =
pos;
158 if (
s->fetch_timestamp) {
159 s->fetch_timestamp = 0;
160 s->last_pts =
s->pts;
161 s->last_dts =
s->dts;
162 s->last_pos =
s->pos;
166 index =
s->parser->parser_parse(
s, avctx, (
const uint8_t **) poutbuf,
167 poutbuf_size, buf, buf_size);
169 #define FILL(name) if(s->name > 0 && avctx->name <= 0) avctx->name = s->name
177 s->frame_offset =
s->next_frame_offset;
180 s->next_frame_offset =
s->cur_offset +
index;
181 s->fetch_timestamp = 1;
190 uint8_t **poutbuf,
int *poutbuf_size,
191 const uint8_t *buf,
int buf_size,
int keyframe)
193 if (
s &&
s->parser->split) {
196 int i =
s->parser->split(avctx, buf, buf_size);
204 *poutbuf_size = buf_size;
209 *poutbuf_size =
size;
227 if (
s->parser->parser_close)
228 s->parser->parser_close(
s);
235 const uint8_t **buf,
int *buf_size)
238 ff_dlog(
NULL,
"overread %d, state:%"PRIX32
" next:%d index:%d o_index:%d\n",
241 (*buf)[0], (*buf)[1], (*buf)[2], (*buf)[3]);
248 if (next > *buf_size)
260 *buf_size + pc->
index +
270 pc->
index += *buf_size;
303 for (; next < 0; next++) {
310 ff_dlog(
NULL,
"overread %d, state:%"PRIX32
" next:%d index:%d o_index:%d\n",
313 (*buf)[0], (*buf)[1], (*buf)[2], (*buf)[3]);
329 const uint8_t *ptr = buf, *
end = buf + buf_size;
334 return ptr - 4 - buf;
#define FF_ENABLE_DEPRECATION_WARNINGS
int ff_mpeg4video_split(AVCodecContext *avctx, const uint8_t *buf, int buf_size)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
const AVCodecParser * av_parser_iterate(void **opaque)
Iterate over all registered codec parsers.
int overread_index
the index into ParseContext.buffer of the overread bytes
static av_cold int end(AVCodecContext *avctx)
void ff_parse_close(AVCodecParserContext *s)
const uint8_t * avpriv_find_start_code(const uint8_t *p, const uint8_t *end, uint32_t *state)
uint32_t state
contains the last few bytes in MSB order
#define AV_CODEC_FLAG_GLOBAL_HEADER
Place global headers in extradata instead of every keyframe.
void ff_fetch_timestamp(AVCodecParserContext *s, int off, int remove, int fuzzy)
Fetch timestamps for a specific byte within the current access unit.
int flags
AV_CODEC_FLAG_*.
AVCodecParserContext * av_parser_init(int codec_id)
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
void * av_fast_realloc(void *ptr, unsigned int *size, size_t min_size)
Reallocate the given buffer if it is not large enough, otherwise do nothing.
int overread
the number of bytes which where irreversibly read from the next frame
#define av_assert0(cond)
assert() equivalent, that is always enabled.
@ AV_PICTURE_TYPE_I
Intra.
int flags2
AV_CODEC_FLAG2_*.
#define AV_NOPTS_VALUE
Undefined timestamp value.
int ff_combine_frame(ParseContext *pc, int next, const uint8_t **buf, int *buf_size)
Combine the (truncated) bitstream to a complete frame.
#define i(width, name, range_min, range_max)
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
int(* parser_init)(AVCodecParserContext *s)
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
#define AV_INPUT_BUFFER_PADDING_SIZE
main external API structure.
#define AV_CODEC_FLAG2_LOCAL_HEADER
Place global headers at every keyframe instead of in extradata.
#define PARSER_FLAG_FETCHED_OFFSET
Set if the parser has a valid file offset.
uint64_t state64
contains the last 8 bytes in MSB order
#define FF_DISABLE_DEPRECATION_WARNINGS
enum AVMediaType codec_type
int av_parser_parse2(AVCodecParserContext *s, AVCodecContext *avctx, uint8_t **poutbuf, int *poutbuf_size, const uint8_t *buf, int buf_size, int64_t pts, int64_t dts, int64_t pos)
Parse a packet.
int av_parser_change(AVCodecParserContext *s, AVCodecContext *avctx, uint8_t **poutbuf, int *poutbuf_size, const uint8_t *buf, int buf_size, int keyframe)
void av_parser_close(AVCodecParserContext *s)