Go to the documentation of this file.
66 #define CACHE_SIZE (1<<(3*NBITS))
109 #define OFFSET(x) offsetof(PaletteUseContext, x)
110 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
122 {
"alpha_threshold",
"set the alpha threshold for transparency",
OFFSET(trans_thresh),
AV_OPT_TYPE_INT, {.i64=128}, 0, 255,
FLAGS },
146 &
ctx->inputs[0]->out_formats)) < 0 ||
148 &
ctx->inputs[1]->out_formats)) < 0 ||
150 &
ctx->outputs[0]->in_formats)) < 0)
157 return av_clip_uint8( px >> 24 ) << 24
158 | av_clip_uint8((px >> 16 & 0xff) + ((er * scale) / (1<<
shift))) << 16
159 | av_clip_uint8((px >> 8 & 0xff) + ((eg * scale) / (1<<
shift))) << 8
160 | av_clip_uint8((px & 0xff) + ((eb * scale) / (1<<
shift)));
166 const int dr =
c1[1] -
c2[1];
167 const int dg =
c1[2] -
c2[2];
168 const int db =
c1[3] -
c2[3];
170 if (
c1[0] < trans_thresh &&
c2[0] < trans_thresh) {
172 }
else if (
c1[0] >= trans_thresh &&
c2[0] >= trans_thresh) {
173 return dr*dr + dg*dg + db*db;
175 return 255*255 + 255*255 + 255*255;
181 int i, pal_id = -1, min_dist = INT_MAX;
184 const uint32_t
c = palette[
i];
186 if (
c >> 24 >= trans_thresh) {
188 palette[
i]>>24 & 0xff,
189 palette[
i]>>16 & 0xff,
190 palette[
i]>> 8 & 0xff,
193 const int d =
diff(palargb, argb, trans_thresh);
212 const int trans_thresh,
217 int dx, nearer_kd_id, further_kd_id;
219 const int current_to_target =
diff(target, current, trans_thresh);
221 if (current_to_target < nearest->dist_sqd) {
223 nearest->
dist_sqd = current_to_target;
227 dx = target[
s] - current[
s];
232 if (nearer_kd_id != -1)
235 if (further_kd_id != -1 && dx*dx < nearest->dist_sqd)
254 int pos = 0, best_node_id = -1, best_dist = INT_MAX, cur_color_id = 0;
260 const struct color_node *kd = &root[cur_color_id];
262 const int current_to_target =
diff(target, current, trans_thresh);
266 if (current_to_target < best_dist) {
267 best_node_id = cur_color_id;
268 if (!current_to_target)
270 best_dist = current_to_target;
276 const int dx = target[
split] - current[
split];
277 int nearer_kd_id, further_kd_id;
283 if (nearer_kd_id != -1) {
284 if (further_kd_id != -1) {
295 cur_color_id = nearer_kd_id;
297 }
else if (dx*dx < best_dist) {
302 cur_color_id = further_kd_id;
313 }
while (node->
dx2 >= best_dist);
324 #define COLORMAP_NEAREST(search, palette, root, target, trans_thresh) \
325 search == COLOR_SEARCH_NNS_ITERATIVE ? colormap_nearest_iterative(root, target, trans_thresh) : \
326 search == COLOR_SEARCH_NNS_RECURSIVE ? colormap_nearest_recursive(root, target, trans_thresh) : \
327 colormap_nearest_bruteforce(palette, target, trans_thresh)
349 if (a < s->trans_thresh &&
s->transparency_index >= 0) {
350 return s->transparency_index;
370 uint32_t
c,
int *er,
int *eg,
int *eb,
381 dstc =
s->palette[dstx];
382 *er =
r - (dstc >> 16 & 0xff);
383 *eg =
g - (dstc >> 8 & 0xff);
384 *eb =
b - (dstc & 0xff);
389 int x_start,
int y_start,
int w,
int h,
394 const int src_linesize =
in ->linesize[0] >> 2;
395 const int dst_linesize =
out->linesize[0];
396 uint32_t *
src = ((uint32_t *)
in ->
data[0]) + y_start*src_linesize;
397 uint8_t *dst =
out->data[0] + y_start*dst_linesize;
402 for (y = y_start; y <
h; y++) {
403 for (x = x_start; x <
w; x++) {
407 const int d =
s->ordered_dither[(y & 7)<<3 | (x & 7)];
412 const uint8_t r = av_clip_uint8(r8 + d);
413 const uint8_t g = av_clip_uint8(g8 + d);
414 const uint8_t b = av_clip_uint8(b8 + d);
422 const int right = x <
w - 1, down = y <
h - 1;
430 if ( down)
src[src_linesize + x ] =
dither_color(
src[src_linesize + x ], er, eg, eb, 3, 3);
431 if (right && down)
src[src_linesize + x + 1] =
dither_color(
src[src_linesize + x + 1], er, eg, eb, 2, 3);
434 const int right = x <
w - 1, down = y <
h - 1,
left = x > x_start;
443 if ( down)
src[src_linesize + x ] =
dither_color(
src[src_linesize + x ], er, eg, eb, 5, 4);
444 if (right && down)
src[src_linesize + x + 1] =
dither_color(
src[src_linesize + x + 1], er, eg, eb, 1, 4);
447 const int right = x <
w - 1, down = y <
h - 1,
left = x > x_start;
448 const int right2 = x <
w - 2, left2 = x > x_start + 1;
459 if (left2)
src[ src_linesize + x - 2] =
dither_color(
src[ src_linesize + x - 2], er, eg, eb, 1, 4);
461 if (1)
src[ src_linesize + x ] =
dither_color(
src[ src_linesize + x ], er, eg, eb, 3, 4);
462 if (right)
src[ src_linesize + x + 1] =
dither_color(
src[ src_linesize + x + 1], er, eg, eb, 2, 4);
463 if (right2)
src[ src_linesize + x + 2] =
dither_color(
src[ src_linesize + x + 2], er, eg, eb, 1, 4);
467 const int right = x <
w - 1, down = y <
h - 1,
left = x > x_start;
476 if ( down)
src[src_linesize + x ] =
dither_color(
src[src_linesize + x ], er, eg, eb, 1, 2);
499 int parent_id,
int node_id,
503 const uint32_t fontcolor = node->
val[1] > 0x50 &&
504 node->
val[2] > 0x50 &&
505 node->
val[3] > 0x50 ? 0 : 0xffffff;
506 const int rgb_comp = node->
split - 1;
508 "label=\"%c%02X%c%02X%c%02X%c\" "
509 "fillcolor=\"#%02x%02x%02x\" "
510 "fontcolor=\"#%06"PRIX32
"\"]\n",
512 "[ "[rgb_comp], node->
val[1],
513 "][ "[rgb_comp], node->
val[2],
514 " ]["[rgb_comp], node->
val[3],
516 node->
val[1], node->
val[2], node->
val[3],
541 av_bprintf(&buf,
" node [style=filled fontsize=10 shape=box]\n");
545 fwrite(buf.str, 1, buf.len,
f);
556 for (
r = 0;
r < 256;
r++) {
557 for (
g = 0;
g < 256;
g++) {
558 for (
b = 0;
b < 256;
b++) {
560 const int r1 =
COLORMAP_NEAREST(search_method, palette, node, argb, trans_thresh);
563 const uint32_t
c1 = palette[r1];
564 const uint32_t
c2 = palette[r2];
565 const uint8_t palargb1[] = { 0xff,
c1>>16 & 0xff,
c1>> 8 & 0xff,
c1 & 0xff };
566 const uint8_t palargb2[] = { 0xff,
c2>>16 & 0xff,
c2>> 8 & 0xff,
c2 & 0xff };
567 const int d1 =
diff(palargb1, argb, trans_thresh);
568 const int d2 =
diff(palargb2, argb, trans_thresh);
571 "/!\\ %02X%02X%02X: %d ! %d (%06"PRIX32
" ! %06"PRIX32
") / dist: %d ! %d\n",
572 r,
g,
b, r1, r2,
c1 & 0xffffff,
c2 & 0xffffff, d1, d2);
594 #define DECLARE_CMP_FUNC(name, pos) \
595 static int cmp_##name(const void *pa, const void *pb) \
597 const struct color *a = pa; \
598 const struct color *b = pb; \
599 return (a->value >> (8 * (3 - (pos))) & 0xff) \
600 - (b->value >> (8 * (3 - (pos))) & 0xff); \
611 const int trans_thresh,
616 unsigned nb_color = 0;
618 struct color tmp_pal[256];
621 ranges.
min[0] = ranges.
min[1] = ranges.
min[2] = 0xff;
622 ranges.
max[0] = ranges.
max[1] = ranges.
max[2] = 0x00;
625 const uint32_t
c = palette[
i];
631 if (
a < trans_thresh) {
635 if (color_used[
i] || (
a != 0xff) ||
636 r < box->
min[0] || g < box->
min[1] || b < box->
min[2] ||
640 if (
r < ranges.
min[0]) ranges.
min[0] =
r;
641 if (
g < ranges.
min[1]) ranges.
min[1] =
g;
642 if (
b < ranges.
min[2]) ranges.
min[2] =
b;
644 if (
r > ranges.
max[0]) ranges.
max[0] =
r;
645 if (
g > ranges.
max[1]) ranges.
max[1] =
g;
646 if (
b > ranges.
max[2]) ranges.
max[2] =
b;
648 tmp_pal[nb_color].
value =
c;
658 wr = ranges.
max[0] - ranges.
min[0];
659 wg = ranges.
max[1] - ranges.
min[1];
660 wb = ranges.
max[2] - ranges.
min[2];
661 if (wr >= wg && wr >= wb) longest = 1;
662 if (wg >= wr && wg >= wb) longest = 2;
663 if (wb >= wr && wb >= wg) longest = 3;
665 *component = longest;
670 return tmp_pal[nb_color >> 1].
pal_id;
676 const uint32_t *palette,
677 const int trans_thresh,
681 int component, cur_id;
682 int node_left_id = -1, node_right_id = -1;
685 const int pal_id =
get_next_color(color_used, palette, trans_thresh, &component, box);
691 cur_id = (*nb_used)++;
694 node->
split = component;
696 node->
val[0] =
c>>24 & 0xff;
697 node->
val[1] =
c>>16 & 0xff;
698 node->
val[2] =
c>> 8 & 0xff;
699 node->
val[3] =
c & 0xff;
701 color_used[pal_id] = 1;
705 box1.
max[component-1] = node->
val[component];
706 box2.
min[component-1] = node->
val[component] + 1;
708 node_left_id =
colormap_insert(
map, color_used, nb_used, palette, trans_thresh, &box1);
710 if (box2.
min[component-1] <= box2.
max[component-1])
711 node_right_id =
colormap_insert(
map, color_used, nb_used, palette, trans_thresh, &box2);
721 const int c1 = *(
const uint32_t *)
a & 0xffffff;
722 const int c2 = *(
const uint32_t *)
b & 0xffffff;
730 uint32_t last_color = 0;
736 if (
s->transparency_index >= 0) {
738 if ((
s->palette[
i]>>24 & 0xff) == 0) {
739 s->transparency_index =
i;
746 const uint32_t
c =
s->palette[
i];
747 if (
i != 0 &&
c == last_color) {
752 if (
c >> 24 <
s->trans_thresh) {
758 box.
min[0] = box.
min[1] = box.
min[2] = 0x00;
759 box.
max[0] = box.
max[1] = box.
max[2] = 0xff;
766 if (
s->debug_accuracy) {
773 const AVFrame *in2,
int frame_count)
776 const uint32_t *palette =
s->palette;
777 uint32_t *
src1 = (uint32_t *)in1->
data[0];
779 const int src1_linesize = in1->
linesize[0] >> 2;
780 const int src2_linesize = in2->
linesize[0];
782 unsigned mean_err = 0;
785 for (x = 0; x < in1->
width; x++) {
786 const uint32_t
c1 =
src1[x];
787 const uint32_t
c2 = palette[src2[x]];
788 const uint8_t argb1[] = {0xff,
c1 >> 16 & 0xff,
c1 >> 8 & 0xff,
c1 & 0xff};
789 const uint8_t argb2[] = {0xff,
c2 >> 16 & 0xff,
c2 >> 8 & 0xff,
c2 & 0xff};
790 mean_err +=
diff(argb1, argb2,
s->trans_thresh);
792 src1 += src1_linesize;
793 src2 += src2_linesize;
796 s->total_mean_err += mean_err;
799 mean_err / div,
s->total_mean_err / (div * frame_count));
805 int *xp,
int *yp,
int *wp,
int *hp)
807 int x_start = 0, y_start = 0;
813 int x_end = cur_src->
width - 1,
814 y_end = cur_src->
height - 1;
815 const uint32_t *prv_srcp = (
const uint32_t *)prv_src->
data[0];
816 const uint32_t *cur_srcp = (
const uint32_t *)cur_src->
data[0];
820 const int prv_src_linesize = prv_src->
linesize[0] >> 2;
821 const int cur_src_linesize = cur_src->
linesize[0] >> 2;
822 const int prv_dst_linesize = prv_dst->
linesize[0];
823 const int cur_dst_linesize = cur_dst->
linesize[0];
826 while (y_start < y_end && !memcmp(prv_srcp + y_start*prv_src_linesize,
827 cur_srcp + y_start*cur_src_linesize,
828 cur_src->
width * 4)) {
829 memcpy(cur_dstp + y_start*cur_dst_linesize,
830 prv_dstp + y_start*prv_dst_linesize,
834 while (y_end > y_start && !memcmp(prv_srcp + y_end*prv_src_linesize,
835 cur_srcp + y_end*cur_src_linesize,
836 cur_src->
width * 4)) {
837 memcpy(cur_dstp + y_end*cur_dst_linesize,
838 prv_dstp + y_end*prv_dst_linesize,
843 height = y_end + 1 - y_start;
846 while (x_start < x_end) {
848 for (y = y_start; y <= y_end; y++) {
849 if (prv_srcp[y*prv_src_linesize + x_start] != cur_srcp[y*cur_src_linesize + x_start]) {
858 while (x_end > x_start) {
860 for (y = y_start; y <= y_end; y++) {
861 if (prv_srcp[y*prv_src_linesize + x_end] != cur_srcp[y*cur_src_linesize + x_end]) {
870 width = x_end + 1 - x_start;
873 for (y = y_start; y <= y_end; y++)
874 memcpy(cur_dstp + y*cur_dst_linesize,
875 prv_dstp + y*prv_dst_linesize, x_start);
877 if (x_end != cur_src->
width - 1) {
878 const int copy_len = cur_src->
width - 1 - x_end;
879 for (y = y_start; y <= y_end; y++)
880 memcpy(cur_dstp + y*cur_dst_linesize + x_end + 1,
881 prv_dstp + y*prv_dst_linesize + x_end + 1,
906 s->last_out,
out, &x, &y, &
w, &
h);
917 ff_dlog(
ctx,
"%dx%d rect: (%d;%d) -> (%d,%d) [area:%dx%d]\n",
918 w,
h, x, y, x+
w, y+
h,
in->width,
in->height);
927 if (
s->calc_mean_err)
942 s->fs.opt_repeatlast = 1;
946 outlink->
w =
ctx->inputs[0]->w;
947 outlink->
h =
ctx->inputs[0]->h;
961 "Palette input must contain exactly %d pixels. "
962 "Specified input has %dx%d=%d pixels\n",
973 const uint32_t *p = (
const uint32_t *)palette_frame->
data[0];
974 const int p_linesize = palette_frame->
linesize[0] >> 2;
976 s->transparency_index = -1;
979 memset(
s->palette, 0,
sizeof(
s->palette));
980 memset(
s->map, 0,
sizeof(
s->map));
983 memset(
s->cache, 0,
sizeof(
s->cache));
987 for (y = 0; y < palette_frame->
height; y++) {
988 for (x = 0; x < palette_frame->
width; x++) {
989 s->palette[
i] = p[x];
990 if (p[x]>>24 <
s->trans_thresh) {
991 s->transparency_index =
i;
1001 s->palette_loaded = 1;
1016 if (!
master || !second) {
1020 if (!
s->palette_loaded) {
1030 #define DEFINE_SET_FRAME(color_search, name, value) \
1031 static int set_frame_##name(PaletteUseContext *s, AVFrame *out, AVFrame *in, \
1032 int x_start, int y_start, int w, int h) \
1034 return set_frame(s, out, in, x_start, y_start, w, h, value, color_search); \
1037 #define DEFINE_SET_FRAME_COLOR_SEARCH(color_search, color_search_macro) \
1038 DEFINE_SET_FRAME(color_search_macro, color_search##_##none, DITHERING_NONE) \
1039 DEFINE_SET_FRAME(color_search_macro, color_search##_##bayer, DITHERING_BAYER) \
1040 DEFINE_SET_FRAME(color_search_macro, color_search##_##heckbert, DITHERING_HECKBERT) \
1041 DEFINE_SET_FRAME(color_search_macro, color_search##_##floyd_steinberg, DITHERING_FLOYD_STEINBERG) \
1042 DEFINE_SET_FRAME(color_search_macro, color_search##_##sierra2, DITHERING_SIERRA2) \
1043 DEFINE_SET_FRAME(color_search_macro, color_search##_##sierra2_4a, DITHERING_SIERRA2_4A) \
1049 #define DITHERING_ENTRIES(color_search) { \
1050 set_frame_##color_search##_none, \
1051 set_frame_##color_search##_bayer, \
1052 set_frame_##color_search##_heckbert, \
1053 set_frame_##color_search##_floyd_steinberg, \
1054 set_frame_##color_search##_sierra2, \
1055 set_frame_##color_search##_sierra2_4a, \
1066 const int q = p ^ (p >> 3);
1067 return (p & 4) >> 2 | (q & 4) >> 1 \
1068 | (p & 2) << 1 | (q & 2) << 2 \
1069 | (p & 1) << 4 | (q & 1) << 5;
1078 if (!
s->last_in || !
s->last_out) {
1088 const int delta = 1 << (5 -
s->bayer_scale);
1137 .
name =
"paletteuse",
1146 .priv_class = &paletteuse_class,
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
int ff_framesync_configure(FFFrameSync *fs)
Configure a frame sync structure.
#define AV_BPRINT_SIZE_UNLIMITED
static int config_input_palette(AVFilterLink *inlink)
AVPixelFormat
Pixel format.
static av_always_inline int diff(const uint8_t *c1, const uint8_t *c2, const int trans_thresh)
static av_always_inline int get_dst_color_err(PaletteUseContext *s, uint32_t c, int *er, int *eg, int *eb, const enum color_search_method search_method)
static void colormap_nearest_node(const struct color_node *map, const int node_pos, const uint8_t *target, const int trans_thresh, struct nearest_color *nearest)
static int query_formats(AVFilterContext *ctx)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
int av_bprint_finalize(AVBPrint *buf, char **ret_str)
Finalize a print buffer.
void ff_framesync_uninit(FFFrameSync *fs)
Free all memory currently allocated.
static void debug_mean_error(PaletteUseContext *s, const AVFrame *in1, const AVFrame *in2, int frame_count)
void av_bprint_init(AVBPrint *buf, unsigned size_init, unsigned size_max)
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
static av_cold int end(AVCodecContext *avctx)
static av_cold int init(AVFilterContext *ctx)
int(* set_frame_func)(struct PaletteUseContext *s, AVFrame *out, AVFrame *in, int x_start, int y_start, int width, int height)
This structure describes decoded (raw) audio or video data.
int av_frame_make_writable(AVFrame *frame)
Ensure that the frame data is writable, avoiding data copy if possible.
static av_cold void uninit(AVFilterContext *ctx)
void * av_dynarray2_add(void **tab_ptr, int *nb_ptr, size_t elem_size, const uint8_t *elem_data)
Add an element of size elem_size to a dynamic array.
static av_always_inline uint8_t colormap_nearest_bruteforce(const uint32_t *palette, const uint8_t *argb, const int trans_thresh)
AVFilter ff_vf_paletteuse
static int disp_tree(const struct color_node *node, const char *fname)
const char * name
Filter name.
@ EXT_INFINITY
Extend the frame to infinity.
A link between two filters.
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
static av_always_inline uint8_t colormap_nearest_recursive(const struct color_node *node, const uint8_t *rgb, const int trans_thresh)
static int debug_accuracy(const struct color_node *node, const uint32_t *palette, const int trans_thresh, const enum color_search_method search_method)
static int dither_value(int p)
@ COLOR_SEARCH_BRUTEFORCE
static int apply_palette(AVFilterLink *inlink, AVFrame *in, AVFrame **outf)
struct cache_node cache[CACHE_SIZE]
A filter pad used for either input or output.
static int colormap_insert(struct color_node *map, uint8_t *color_used, int *nb_used, const uint32_t *palette, const int trans_thresh, const struct color_rect *box)
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
static av_always_inline uint8_t colormap_nearest_iterative(const struct color_node *root, const uint8_t *target, const int trans_thresh)
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
static void set_processing_window(enum diff_mode diff_mode, const AVFrame *prv_src, const AVFrame *cur_src, const AVFrame *prv_dst, AVFrame *cur_dst, int *xp, int *yp, int *wp, int *hp)
FILE * av_fopen_utf8(const char *path, const char *mode)
Open a file using a UTF-8 filename.
#define DEFINE_SET_FRAME_COLOR_SEARCH(color_search, color_search_macro)
static int config_output(AVFilterLink *outlink)
static const AVFilterPad outputs[]
static const set_frame_func set_frame_lut[NB_COLOR_SEARCHES][NB_DITHERING]
static int load_apply_palette(FFFrameSync *fs)
Describe the class of an AVClass context structure.
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
#define fs(width, name, subs,...)
#define COLORMAP_NEAREST(search, palette, root, target, trans_thresh)
static int get_next_color(const uint8_t *color_used, const uint32_t *palette, const int trans_thresh, int *component, const struct color_rect *box)
#define DITHERING_ENTRIES(color_search)
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
static const cmp_func cmp_funcs[]
static void disp_node(AVBPrint *buf, const struct color_node *map, int parent_id, int node_id, int depth)
static const AVOption paletteuse_options[]
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
int ff_framesync_init_dualinput(FFFrameSync *fs, AVFilterContext *parent)
Initialize a frame sync structure for dualinput.
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
#define av_err2str(errnum)
Convenience macro, the return value should be used only directly in function arguments but never stan...
@ DITHERING_FLOYD_STEINBERG
uint32_t palette[AVPALETTE_COUNT]
static const uint32_t color[16+AV_CLASS_CATEGORY_NB]
AVFilterContext * src
source filter
static char * split(char *message, char delim)
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
static av_always_inline int color_get(PaletteUseContext *s, uint32_t color, uint8_t a, uint8_t r, uint8_t g, uint8_t b, const enum color_search_method search_method)
Check if the requested color is in the cache already.
#define AV_LOG_INFO
Standard information.
#define DECLARE_CMP_FUNC(name, pos)
static int activate(AVFilterContext *ctx)
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) #define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac) { } void ff_audio_convert_free(AudioConvert **ac) { if(! *ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);} AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map) { AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method !=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2) { ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc) { av_free(ac);return NULL;} return ac;} in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar) { ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar ? ac->channels :1;} else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;} int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in) { int use_generic=1;int len=in->nb_samples;int p;if(ac->dc) { av_log(ac->avr, AV_LOG_TRACE, "%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
#define i(width, name, range_min, range_max)
#define AV_QSORT(p, num, type, cmp)
Quicksort This sort is fast, and fully inplace but not stable and it is possible to construct input t...
int w
agreed upon image width
static int cmp_pal_entry(const void *a, const void *b)
struct cached_color * entries
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
const char * name
Pad name.
static void load_colormap(PaletteUseContext *s)
@ AV_PIX_FMT_PAL8
8 bits with AV_PIX_FMT_RGB32 palette
void av_bprintf(AVBPrint *buf, const char *fmt,...)
static av_always_inline int set_frame(PaletteUseContext *s, AVFrame *out, AVFrame *in, int x_start, int y_start, int w, int h, enum dithering_mode dither, const enum color_search_method search_method)
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
#define FF_ARRAY_ELEMS(a)
int h
agreed upon image height
@ COLOR_SEARCH_NNS_ITERATIVE
int(* cmp_func)(const void *, const void *)
struct color_node map[AVPALETTE_COUNT]
@ COLOR_SEARCH_NNS_RECURSIVE
AVRational time_base
Define the time base used by the PTS of the frames/samples which will pass through this link.
static int shift(int a, int b)
const VDPAUPixFmtMap * map
static const AVFilterPad paletteuse_outputs[]
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
int ff_framesync_activate(FFFrameSync *fs)
Examine the frames in the filter's input and try to produce output.
AVFILTER_DEFINE_CLASS(paletteuse)
int ff_framesync_dualinput_get_writable(FFFrameSync *fs, AVFrame **f0, AVFrame **f1)
Same as ff_framesync_dualinput_get(), but make sure that f0 is writable.
static av_always_inline int dither_color(uint32_t px, int er, int eg, int eb, int scale, int shift)
static const AVFilterPad paletteuse_inputs[]
static void load_palette(PaletteUseContext *s, const AVFrame *palette_frame)
static const uint8_t dither[8][8]