Go to the documentation of this file.
31 #define MAX_HUFF_CODES 16
73 int w4 = (avctx->
width + 3) & ~3;
74 int h4 = (avctx->
height + 3) & ~3;
87 if (!
mp->changes_map || !
mp->vpt || !
mp->hpt)
95 #if !CONFIG_HARDCODED_TABLES
115 if (y >=
mp->avctx->height)
119 pixels = (uint16_t *)&
mp->frame->data[0][y *
mp->frame->linesize[0] + x * 2];
123 for (
i = 0;
i <
w; ++
i)
126 pixels +=
mp->frame->linesize[0] / 2;
135 if (
size >
mp->max_codes_bits) {
142 if (
mp->current_codes_count >=
mp->codes_count) {
147 mp->codes[
mp->current_codes_count++].size =
size;
153 if (
mp->codes_count == 1) {
160 for (
i = 0;
i <
mp->codes_count; ++
i)
162 mp->current_codes_count = 0;
165 if (
mp->current_codes_count <
mp->codes_count) {
177 delta = (v - 7) *
mp->gradient_scale[component];
178 mp->gradient_scale[component] = (v == 0 || v == 14) ? 2 : 1;
186 color = *(uint16_t *)&
mp->frame->data[0][y *
mp->frame->linesize[0] + x * 2];
195 *(uint16_t *)&
mp->frame->data[0][y *
mp->frame->linesize[0] + x * 2] =
color;
200 return mp->vlc.table ?
get_vlc2(gb,
mp->vlc.table,
mp->max_codes_bits, 1)
201 :
mp->codes[0].delta;
207 const int y0 = y *
mp->avctx->width;
211 if (
mp->changes_map[y0 + x] == 0) {
212 memset(
mp->gradient_scale, 1,
sizeof(
mp->gradient_scale));
215 while (x < mp->avctx->width) {
216 w =
mp->changes_map[y0 + x];
219 if (
mp->changes_map[y0 + x +
mp->avctx->width] <
w ||
220 mp->changes_map[y0 + x +
mp->avctx->width * 2] <
w ||
221 mp->changes_map[y0 + x +
mp->avctx->width * 3] <
w) {
222 for (
i = (x + 3) & ~3;
i < x +
w;
i += 4) {
228 memset(
mp->gradient_scale, 1,
sizeof(
mp->gradient_scale));
239 mp->hpt[((y / 4) *
mp->avctx->width + x) / 4] = p;
241 p.
v =
mp->hpt[((y / 4) *
mp->avctx->width + x) / 4].v;
242 p.
u =
mp->hpt[((y / 4) *
mp->avctx->width + x) / 4].u;
258 for (y = 0; y <
mp->avctx->height; ++y) {
259 if (
mp->changes_map[y *
mp->avctx->width] != 0) {
260 memset(
mp->gradient_scale, 1,
sizeof(
mp->gradient_scale));
275 for (y0 = 0; y0 < 2; ++y0)
276 for (y = y0; y <
mp->avctx->height; y += 2)
281 void *
data,
int *got_frame,
284 const uint8_t *buf = avpkt->
data;
285 int buf_size = avpkt->
size;
288 int i, count1, count2, sz,
ret;
297 mp->bdsp.bswap_buf((uint32_t *)
mp->bswapbuf, (
const uint32_t *) buf,
300 memcpy(
mp->bswapbuf + (buf_size & ~3), buf + (buf_size & ~3), buf_size & 3);
312 if (
mp->codes_count == 0)
315 if (
mp->changes_map[0] == 0) {
316 *(uint16_t *)
mp->frame->data[0] =
get_bits(&gb, 15);
317 mp->changes_map[0] = 1;
328 if (
mp->codes_count > 1) {
332 &
mp->codes[
mp->codes_count - 1].size, -(
int)
sizeof(
HuffCode),
333 &
mp->codes[
mp->codes_count - 1].delta, -(
int)
sizeof(
HuffCode), 1,
349 .
name =
"motionpixels",
#define FF_CODEC_CAP_INIT_THREADSAFE
The codec does not modify any global variables in the init function, allowing to call the init functi...
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
static unsigned int get_bits_long(GetBitContext *s, int n)
Read 0-32 bits.
uint8_t gradient_scale[3]
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
static double mp(int i, double w0, double r)
This structure describes decoded (raw) audio or video data.
static av_always_inline int get_vlc2(GetBitContext *s, VLC_TYPE(*table)[2], int bits, int max_depth)
Parse a vlc code.
static int mp_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
const AVCodec ff_motionpixels_decoder
static av_cold int mp_decode_init(AVCodecContext *avctx)
static void mp_read_changes_map(MotionPixelsContext *mp, GetBitContext *gb, int count, int bits_len, int read_color)
int ff_init_vlc_from_lengths(VLC *vlc_arg, int nb_bits, int nb_codes, const int8_t *lens, int lens_wrap, const void *symbols, int symbols_wrap, int symbols_size, int offset, int flags, void *logctx)
Build VLC decoding tables suitable for use with get_vlc2()
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
static int mp_yuv_to_rgb(int y, int v, int u, int clip_rgb)
static int ff_thread_once(char *control, void(*routine)(void))
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
static YuvPixel mp_get_yuv_from_rgb(MotionPixelsContext *mp, int x, int y)
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
void ff_free_vlc(VLC *vlc)
static av_cold void motionpixels_tableinit(void)
av_cold void ff_bswapdsp_init(BswapDSPContext *c)
static void mp_decode_frame_helper(MotionPixelsContext *mp, GetBitContext *gb)
@ AV_CODEC_ID_MOTIONPIXELS
static av_always_inline int mp_gradient(MotionPixelsContext *mp, int component, int v)
static unsigned int get_bits1(GetBitContext *s)
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
static const uint32_t color[16+AV_CLASS_CATEGORY_NB]
static av_always_inline int mp_get_vlc(MotionPixelsContext *mp, GetBitContext *gb)
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
#define i(width, name, range_min, range_max)
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
static YuvPixel mp_rgb_yuv_table[1<< 15]
static int mp_get_code(MotionPixelsContext *mp, GetBitContext *gb, int size)
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
#define AV_PIX_FMT_RGB555
static void mp_set_rgb_from_yuv(MotionPixelsContext *mp, int x, int y, const YuvPixel *p)
HuffCode codes[MAX_HUFF_CODES]
static int mp_read_codes_table(MotionPixelsContext *mp, GetBitContext *gb)
const char * name
Name of the codec implementation.
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
void * av_calloc(size_t nmemb, size_t size)
int ff_reget_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Identical in function to ff_get_buffer(), except it reuses the existing buffer if available.
main external API structure.
static void mp_decode_line(MotionPixelsContext *mp, GetBitContext *gb, int y)
This structure stores compressed data.
int width
picture width / height.
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
static av_cold int mp_decode_end(AVCodecContext *avctx)