Go to the documentation of this file.
55 #include "libavutil/ffversion.h"
58 #if HAVE_SYS_RESOURCE_H
60 #include <sys/resource.h>
98 vfprintf(stdout, fmt, vl);
105 static int print_prefix = 1;
119 #if HAVE_SETDLLDIRECTORY && defined(_WIN32)
148 error =
"Expected number for %s but found: %s\n";
149 else if (d < min || d >
max)
150 error =
"The value for %s was %s which is not within %f - %f\n";
152 error =
"Expected int64 for %s but found %s\n";
154 error =
"Expected int for %s but found %s\n";
168 is_duration ?
"duration" :
"date",
context, timestr);
175 int rej_flags,
int alt_flags)
184 if (((po->
flags & req_flags) != req_flags) ||
185 (alt_flags && !(po->
flags & alt_flags)) ||
186 (po->
flags & rej_flags))
217 const char *p = strchr(
name,
':');
231 #if HAVE_COMMANDLINETOARGVW && defined(_WIN32)
232 #include <shellapi.h>
234 static char** win32_argv_utf8 =
NULL;
235 static int win32_argc = 0;
248 int i, buffsize = 0,
offset = 0;
250 if (win32_argv_utf8) {
251 *argc_ptr = win32_argc;
252 *argv_ptr = win32_argv_utf8;
257 argv_w = CommandLineToArgvW(GetCommandLineW(), &win32_argc);
258 if (win32_argc <= 0 || !argv_w)
262 for (
i = 0;
i < win32_argc;
i++)
263 buffsize += WideCharToMultiByte(CP_UTF8, 0, argv_w[
i], -1,
266 win32_argv_utf8 =
av_mallocz(
sizeof(
char *) * (win32_argc + 1) + buffsize);
267 argstr_flat = (
char *)win32_argv_utf8 +
sizeof(
char *) * (win32_argc + 1);
268 if (!win32_argv_utf8) {
273 for (
i = 0;
i < win32_argc;
i++) {
274 win32_argv_utf8[
i] = &argstr_flat[
offset];
275 offset += WideCharToMultiByte(CP_UTF8, 0, argv_w[
i], -1,
279 win32_argv_utf8[
i] =
NULL;
282 *argc_ptr = win32_argc;
283 *argv_ptr = win32_argv_utf8;
303 char *p = strchr(opt,
':');
306 dstcount = (
int *)(so + 1);
307 *so =
grow_array(*so,
sizeof(**so), dstcount, *dstcount + 1);
311 (*so)[*dstcount - 1].specifier =
str;
312 dst = &(*so)[*dstcount - 1].u;
336 "Failed to set value '%s' for option '%s': %s\n",
354 if (!po->
name && opt[0] ==
'n' && opt[1] ==
'o') {
381 void (*parse_arg_function)(
void *,
const char*))
384 int optindex, handleoptions = 1,
ret;
391 while (optindex < argc) {
392 opt = argv[optindex++];
394 if (handleoptions && opt[0] ==
'-' && opt[1] !=
'\0') {
395 if (opt[1] ==
'-' && opt[2] ==
'\0') {
405 if (parse_arg_function)
406 parse_arg_function(optctx, opt);
416 g->group_def->name,
g->arg);
418 for (
i = 0;
i <
g->nb_opts;
i++) {
421 if (
g->group_def->flags &&
422 !(
g->group_def->flags & o->
opt->
flags)) {
424 "%s %s -- you are trying to apply an input option to an "
425 "output file or vice versa. Move this option before the "
426 "file it belongs to.\n", o->
key, o->
opt->
help,
427 g->group_def->name,
g->arg);
450 for (
i = 1;
i < argc;
i++) {
451 const char *cur_opt = argv[
i];
453 if (*cur_opt++ !=
'-')
457 if (!po->
name && cur_opt[0] ==
'n' && cur_opt[1] ==
'o')
460 if ((!po->
name && !strcmp(cur_opt, optname)) ||
461 (po->
name && !strcmp(optname, po->
name)))
472 const unsigned char *p;
475 if (!((*p >=
'+' && *p <=
':') || (*p >=
'@' && *p <=
'Z') ||
476 *p ==
'_' || (*p >=
'a' && *p <=
'z')))
483 for (p =
a; *p; p++) {
484 if (*p ==
'\\' || *p ==
'"' || *p ==
'$' || *p ==
'`')
486 else if (*p < ' ' || *p >
'~')
512 if (idx && argv[idx + 1])
515 if ((env = getenv(
"FFREPORT")) || idx) {
520 for (
i = 0;
i < argc;
i++) {
533 int opt_flags,
int search_flags)
541 #define FLAGS (o->type == AV_OPT_TYPE_FLAGS && (arg[0]=='-' || arg[0]=='+')) ? AV_DICT_APPEND : 0
546 char opt_stripped[128];
549 #if CONFIG_AVRESAMPLE
555 #if CONFIG_SWRESAMPLE
559 if (!strcmp(opt,
"debug") || !strcmp(opt,
"fdebug"))
562 if (!(p = strchr(opt,
':')))
563 p = opt + strlen(opt);
564 av_strlcpy(opt_stripped, opt,
FFMIN(
sizeof(opt_stripped), p - opt + 1));
568 ((opt[0] ==
'v' || opt[0] ==
'a' || opt[0] ==
's') &&
586 if (!strcmp(opt,
"srcw") || !strcmp(opt,
"srch") ||
587 !strcmp(opt,
"dstw") || !strcmp(opt,
"dsth") ||
588 !strcmp(opt,
"src_format") || !strcmp(opt,
"dst_format")) {
589 av_log(
NULL,
AV_LOG_ERROR,
"Directly using swscale dimensions/format options is not supported, please use the -s or -pix_fmt options\n");
602 if (!consumed && !strcmp(opt,
"sws_flags")) {
607 #if CONFIG_SWRESAMPLE
621 #if CONFIG_AVRESAMPLE
644 for (
i = 0;
i < nb_groups;
i++) {
646 if (p->
sep && !strcmp(p->
sep, opt))
691 const char *
key,
const char *
val)
697 g->opts[
g->nb_opts - 1].opt = opt;
698 g->opts[
g->nb_opts - 1].key =
key;
699 g->opts[
g->nb_opts - 1].val =
val;
708 memset(octx, 0,
sizeof(*octx));
763 while (optindex < argc) {
764 const char *opt = argv[optindex++], *
arg;
770 if (opt[0] ==
'-' && opt[1] ==
'-' && !opt[2]) {
775 if (opt[0] !=
'-' || !opt[1] || dashdash+1 == optindex) {
782 #define GET_ARG(arg) \
784 arg = argv[optindex++]; \
786 av_log(NULL, AV_LOG_ERROR, "Missing argument for option '%s'.\n", opt);\
787 return AVERROR(EINVAL); \
805 arg = argv[optindex++];
819 if (argv[optindex]) {
823 "argument '%s'.\n", opt, argv[optindex]);
828 "with argument '%s'.\n", opt, argv[optindex]);
834 if (opt[0] ==
'n' && opt[1] ==
'o' &&
839 "argument 0.\n", po->
name, po->
help);
849 "command: may be ignored.\n");
870 const struct {
const char *
name;
int level; } log_levels[] = {
890 if (*token ==
'+' || *token ==
'-') {
898 if (!strncmp(token,
"repeat", 6)) {
905 }
else if (!strncmp(token,
"level", 5)) {
919 }
else if (*
arg ==
'+') {
926 if (!strcmp(log_levels[
i].
name,
arg)) {
927 level = log_levels[
i].level;
935 "Possible levels are numbers or:\n",
arg);
952 while ((
c = *(
template++))) {
954 if (!(
c = *(
template++)))
962 tm->tm_year + 1900, tm->tm_mon + 1, tm->tm_mday,
963 tm->tm_hour, tm->tm_min, tm->tm_sec);
977 char *filename_template =
NULL;
980 int prog_loglevel, envlevel = 0;
988 tm = localtime(&now);
990 while (env && *env) {
994 "Failed to parse FFREPORT environment variable: %s\n",
1001 if (!strcmp(
key,
"file")) {
1003 filename_template =
val;
1005 }
else if (!strcmp(
key,
"level")) {
1037 filename.str, strerror(errno));
1042 "%s started on %04d-%02d-%02d at %02d:%02d:%02d\n"
1043 "Report written to \"%s\"\n"
1046 tm->tm_year + 1900, tm->tm_mon + 1, tm->tm_mday,
1047 tm->tm_hour, tm->tm_min, tm->tm_sec,
1063 max = strtol(
arg, &tail, 10);
1076 struct rlimit rl = { lim, lim + 1 };
1077 if (setrlimit(RLIMIT_CPU, &rl))
1078 perror(
"setrlimit");
1088 const char *errbuf_ptr = errbuf;
1098 #define SHOW_VERSION 2
1099 #define SHOW_CONFIG 4
1100 #define SHOW_COPYRIGHT 8
1102 #define PRINT_LIB_INFO(libname, LIBNAME, flags, level) \
1103 if (CONFIG_##LIBNAME) { \
1104 const char *indent = flags & INDENT? " " : ""; \
1105 if (flags & SHOW_VERSION) { \
1106 unsigned int version = libname##_version(); \
1107 av_log(NULL, level, \
1108 "%slib%-11s %2d.%3d.%3d / %2d.%3d.%3d\n", \
1110 LIB##LIBNAME##_VERSION_MAJOR, \
1111 LIB##LIBNAME##_VERSION_MINOR, \
1112 LIB##LIBNAME##_VERSION_MICRO, \
1113 AV_VERSION_MAJOR(version), AV_VERSION_MINOR(version),\
1114 AV_VERSION_MICRO(version)); \
1116 if (flags & SHOW_CONFIG) { \
1117 const char *cfg = libname##_configuration(); \
1118 if (strcmp(FFMPEG_CONFIGURATION, cfg)) { \
1119 if (!warned_cfg) { \
1120 av_log(NULL, level, \
1121 "%sWARNING: library configuration mismatch\n", \
1125 av_log(NULL, level, "%s%-11s configuration: %s\n", \
1126 indent, #libname, cfg); \
1155 av_log(
NULL,
level,
"%sconfiguration: " FFMPEG_CONFIGURATION
"\n", indent);
1161 char str[] = { FFMPEG_CONFIGURATION };
1162 char *conflist, *remove_tilde, *splitconf;
1166 while ((conflist = strstr(
str,
" --")) !=
NULL) {
1167 strncpy(conflist,
"~--", 3);
1172 while ((remove_tilde = strstr(
str,
"pkg-config~")) !=
NULL) {
1173 strncpy(remove_tilde,
"pkg-config ", 11);
1176 splitconf = strtok(
str,
"~");
1178 while (splitconf !=
NULL) {
1180 splitconf = strtok(
NULL,
"~");
1216 "This version of %s has nonfree parts compiled in.\n"
1217 "Therefore it is not legally redistributable.\n",
1221 "%s is free software; you can redistribute it and/or modify\n"
1222 "it under the terms of the GNU General Public License as published by\n"
1223 "the Free Software Foundation; either version 3 of the License, or\n"
1224 "(at your option) any later version.\n"
1226 "%s is distributed in the hope that it will be useful,\n"
1227 "but WITHOUT ANY WARRANTY; without even the implied warranty of\n"
1228 "MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n"
1229 "GNU General Public License for more details.\n"
1231 "You should have received a copy of the GNU General Public License\n"
1232 "along with %s. If not, see <http://www.gnu.org/licenses/>.\n",
1236 "%s is free software; you can redistribute it and/or modify\n"
1237 "it under the terms of the GNU General Public License as published by\n"
1238 "the Free Software Foundation; either version 2 of the License, or\n"
1239 "(at your option) any later version.\n"
1241 "%s is distributed in the hope that it will be useful,\n"
1242 "but WITHOUT ANY WARRANTY; without even the implied warranty of\n"
1243 "MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n"
1244 "GNU General Public License for more details.\n"
1246 "You should have received a copy of the GNU General Public License\n"
1247 "along with %s; if not, write to the Free Software\n"
1248 "Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n",
1252 "%s is free software; you can redistribute it and/or modify\n"
1253 "it under the terms of the GNU Lesser General Public License as published by\n"
1254 "the Free Software Foundation; either version 3 of the License, or\n"
1255 "(at your option) any later version.\n"
1257 "%s is distributed in the hope that it will be useful,\n"
1258 "but WITHOUT ANY WARRANTY; without even the implied warranty of\n"
1259 "MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n"
1260 "GNU Lesser General Public License for more details.\n"
1262 "You should have received a copy of the GNU Lesser General Public License\n"
1263 "along with %s. If not, see <http://www.gnu.org/licenses/>.\n",
1267 "%s is free software; you can redistribute it and/or\n"
1268 "modify it under the terms of the GNU Lesser General Public\n"
1269 "License as published by the Free Software Foundation; either\n"
1270 "version 2.1 of the License, or (at your option) any later version.\n"
1272 "%s is distributed in the hope that it will be useful,\n"
1273 "but WITHOUT ANY WARRANTY; without even the implied warranty of\n"
1274 "MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU\n"
1275 "Lesser General Public License for more details.\n"
1277 "You should have received a copy of the GNU Lesser General Public\n"
1278 "License along with %s; if not, write to the Free Software\n"
1279 "Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n",
1295 void *ifmt_opaque =
NULL;
1297 void *ofmt_opaque =
NULL;
1299 const char *last_name;
1303 " D. = Demuxing supported\n"
1304 " .E = Muxing supported\n"
1305 " --\n", device_only ?
"Devices:" :
"File formats:");
1311 const char *long_name =
NULL;
1317 if (!is_dev && device_only)
1320 strcmp(ofmt->
name, last_name) > 0) {
1331 if (!is_dev && device_only)
1334 strcmp(ifmt->
name, last_name) > 0) {
1347 printf(
" %s%s %-15s %s\n",
1351 long_name ? long_name:
" ");
1376 #define PRINT_CODEC_SUPPORTED(codec, field, type, list_name, term, get_name) \
1377 if (codec->field) { \
1378 const type *p = codec->field; \
1380 printf(" Supported " list_name ":"); \
1381 while (*p != term) { \
1383 printf(" %s", name); \
1393 printf(
"%s %s [%s]:\n", encoder ?
"Encoder" :
"Decoder",
c->name,
1394 c->long_name ?
c->long_name :
"");
1396 printf(
" General capabilities: ");
1427 if (!
c->capabilities)
1433 printf(
" Threading capabilities: ");
1442 default:
printf(
"none");
break;
1448 printf(
" Supported hardware devices: ");
1449 for (
int i = 0;;
i++) {
1458 if (
c->supported_framerates) {
1461 printf(
" Supported framerates:");
1477 if (
c->priv_class) {
1492 default:
return '?';
1513 return (*da)->type != (*db)->type ?
FFDIFFSIGN((*da)->type, (*db)->type) :
1514 strcmp((*da)->name, (*db)->name);
1521 unsigned nb_codecs = 0,
i = 0;
1543 printf(
" (%s: ", encoder ?
"encoders" :
"decoders");
1557 " D..... = Decoding supported\n"
1558 " .E.... = Encoding supported\n"
1559 " ..V... = Video codec\n"
1560 " ..A... = Audio codec\n"
1561 " ..S... = Subtitle codec\n"
1562 " ...I.. = Intra frame-only codec\n"
1563 " ....L. = Lossy compression\n"
1564 " .....S = Lossless compression\n"
1566 for (
i = 0;
i < nb_codecs;
i++) {
1571 if (strstr(
desc->name,
"_deprecated"))
1588 if (strcmp(codec->
name,
desc->name)) {
1595 if (strcmp(codec->
name,
desc->name)) {
1615 " S..... = Subtitle\n"
1616 " .F.... = Frame-level multithreading\n"
1617 " ..S... = Slice-level multithreading\n"
1618 " ...X.. = Codec is experimental\n"
1619 " ....B. = Supports draw_horiz_band\n"
1620 " .....D = Supports direct rendering method 1\n"
1622 encoder ?
"Encoders" :
"Decoders");
1623 for (
i = 0;
i < nb_codecs;
i++) {
1637 if (strcmp(codec->
name,
desc->name))
1661 void *opaque =
NULL;
1663 printf(
"Bitstream filters:\n");
1672 void *opaque =
NULL;
1675 printf(
"Supported file protocols:\n"
1689 char descr[64], *descr_cur;
1690 void *opaque =
NULL;
1695 " T.. = Timeline support\n"
1696 " .S. = Slice threading\n"
1697 " ..C = Command support\n"
1698 " A = Audio input/output\n"
1699 " V = Video input/output\n"
1700 " N = Dynamic number and/or type of input/output\n"
1701 " | = Source or sink filter\n");
1704 for (
i = 0;
i < 2;
i++) {
1706 *(descr_cur++) =
'-';
1707 *(descr_cur++) =
'>';
1711 if (descr_cur >= descr +
sizeof(descr) - 4)
1720 printf(
" %c%c%c %-17s %-10s %s\n",
1723 filter->process_command ?
'C' :
'.',
1727 printf(
"No filters available: libavfilter disabled\n");
1738 printf(
"%-32s #RRGGBB\n",
"name");
1741 printf(
"%-32s #%02x%02x%02x\n",
name, rgb[0], rgb[1], rgb[2]);
1750 printf(
"Pixel formats:\n"
1751 "I.... = Supported Input format for conversion\n"
1752 ".O... = Supported Output format for conversion\n"
1753 "..H.. = Hardware accelerated format\n"
1754 "...P. = Paletted format\n"
1755 "....B = Bitstream format\n"
1756 "FLAGS NAME NB_COMPONENTS BITS_PER_PIXEL\n"
1760 # define sws_isSupportedInput(x) 0
1761 # define sws_isSupportedOutput(x) 0
1766 printf(
"%c%c%c%c%c %-16s %d %2d\n",
1783 const char *
name, *descr;
1785 printf(
"Individual channels:\n"
1786 "NAME DESCRIPTION\n");
1787 for (
i = 0;
i < 63;
i++) {
1794 printf(
"\nStandard channel layouts:\n"
1795 "NAME DECOMPOSITION\n");
1799 for (j = 1; j; j <<= 1)
1843 "but no %s for it are available. FFmpeg might need to be "
1844 "recompiled with additional external libraries.\n",
1845 name, encoder ?
"encoders" :
"decoders");
1907 printf(
" Default video codec: %s.\n",
desc->name);
1911 printf(
" Default audio codec: %s.\n",
desc->name);
1915 printf(
" Default subtitle codec: %s.\n",
desc->name);
1923 static void show_help_filter(
const char *
name)
1937 printf(
"Filter %s\n",
f->name);
1939 printf(
" %s\n",
f->description);
1942 printf(
" slice threading supported\n");
1946 for (
i = 0;
i < count;
i++) {
1951 printf(
" dynamic (depending on the options)\n");
1953 printf(
" none (source filter)\n");
1957 for (
i = 0;
i < count;
i++) {
1962 printf(
" dynamic (depending on the options)\n");
1964 printf(
" none (sink filter)\n");
1970 printf(
"This filter has support for timeline through the 'enable' option.\n");
1973 "can not to satisfy request\n");
2005 par = strchr(topic,
'=');
2011 }
else if (!strcmp(topic,
"decoder")) {
2013 }
else if (!strcmp(topic,
"encoder")) {
2015 }
else if (!strcmp(topic,
"demuxer")) {
2017 }
else if (!strcmp(topic,
"muxer")) {
2019 }
else if (!strcmp(topic,
"protocol")) {
2022 }
else if (!strcmp(topic,
"filter")) {
2023 show_help_filter(par);
2025 }
else if (!strcmp(topic,
"bsf")) {
2040 while (
c !=
'\n' &&
c != EOF)
2047 const char *preset_name,
int is_path,
2048 const char *codec_name)
2052 const char *
base[3] = { getenv(
"FFMPEG_DATADIR"),
2057 av_strlcpy(filename, preset_name, filename_size);
2058 f = fopen(filename,
"r");
2060 #if HAVE_GETMODULEHANDLE && defined(_WIN32)
2061 char datadir[MAX_PATH], *ls;
2064 if (GetModuleFileNameA(GetModuleHandleA(
NULL), datadir,
sizeof(datadir) - 1))
2066 for (ls = datadir; ls < datadir + strlen(datadir); ls++)
2067 if (*ls ==
'\\') *ls =
'/';
2069 if (ls = strrchr(datadir,
'/'))
2072 strncat(datadir,
"/ffpresets",
sizeof(datadir) - 1 - strlen(datadir));
2077 for (
i = 0;
i < 3 && !
f;
i++) {
2080 snprintf(filename, filename_size,
"%s%s/%s.ffpreset",
base[
i],
2081 i != 1 ?
"" :
"/.ffmpeg", preset_name);
2082 f = fopen(filename,
"r");
2083 if (!
f && codec_name) {
2085 "%s%s/%s-%s.ffpreset",
2086 base[
i],
i != 1 ?
"" :
"/.ffmpeg", codec_name,
2088 f = fopen(filename,
"r");
2134 char *p = strchr(t->
key,
':');
2139 case 1: *p = 0;
break;
2150 else if (t->
key[0] == prefix &&
2172 "Could not alloc memory for stream options.\n");
2175 for (
i = 0;
i <
s->nb_streams;
i++)
2183 if (new_size >= INT_MAX / elem_size) {
2187 if (*
size < new_size) {
2193 memset(
tmp + *
size*elem_size, 0, (new_size-*
size) * elem_size);
2208 theta -= 360*floor(theta/360 + 0.9/360);
2210 if (fabs(theta - 90*
round(theta/90)) > 2)
2212 "If you want to help, upload a sample "
2213 "of this file to https://streams.videolan.org/upload/ "
2214 "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)");
2228 printf(
"Auto-detected sources for %s:\n", fmt->
name);
2231 printf(
"Cannot list sources. Not implemented.\n");
2236 printf(
"Cannot list sources.\n");
2258 printf(
"Auto-detected sinks for %s:\n", fmt->
name);
2261 printf(
"Cannot list sinks. Not implemented.\n");
2266 printf(
"Cannot list sinks.\n");
2284 char *opts_str =
NULL;
2289 if ((opts_str = strchr(*dev,
','))) {
2290 *(opts_str++) =
'\0';
2297 printf(
"\nDevice name is not provided.\n"
2298 "You can pass devicename[,opt1=val1[,opt2=val2...]] as an argument.\n\n");
2302 int show_sources(
void *optctx,
const char *opt,
const char *
arg)
2312 if ((
ret = show_sinks_sources_parse_arg(
arg, &dev, &
opts)) < 0)
2318 if (!strcmp(fmt->
name,
"lavfi"))
2322 print_device_sources(fmt,
opts);
2330 print_device_sources(fmt,
opts);
2340 int show_sinks(
void *optctx,
const char *opt,
const char *
arg)
2350 if ((
ret = show_sinks_sources_parse_arg(
arg, &dev, &
opts)) < 0)
2358 print_device_sinks(fmt,
opts);
2366 print_device_sinks(fmt,
opts);
static void error(const char *err)
static void add_bytes(HYuvContext *s, uint8_t *dst, uint8_t *src, int w)
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
AVFrame * ff_get_audio_buffer(AVFilterLink *link, int nb_samples)
Request an audio samples buffer with a specific set of permissions.
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it be(in the first position) for now. Options ------- Then comes the options array. This is what will define the user accessible options. For example
void av_force_cpu_flags(int arg)
Disables cpu detection and forces the specified flags.
static void print_codecs_for_id(enum AVCodecID id, int encoder)
F H1 F F H1 F F F F H1<-F-------F-------F v v v H2 H3 H2 ^ ^ ^ F-------F-------F-> H1<-F-------F-------F|||||||||F H1 F|||||||||F H1 Funavailable fullpel samples(outside the picture for example) shall be equalto the closest available fullpel sampleSmaller pel interpolation:--------------------------if diag_mc is set then points which lie on a line between 2 vertically, horizontally or diagonally adjacent halfpel points shall be interpolatedlinearly with rounding to nearest and halfway values rounded up.points which lie on 2 diagonals at the same time should only use the onediagonal not containing the fullpel point F--> O q O<--h1-> O q O<--F v \/v \/v O O O O O O O|/|\|q q q q q|/|\|O O O O O O O ^/\ ^/\ ^ h2--> O q O<--h3-> O q O<--h2 v \/v \/v O O O O O O O|\|/|q q q q q|\|/|O O O O O O O ^/\ ^/\ ^ F--> O q O<--h1-> O q O<--Fthe remaining points shall be bilinearly interpolated from theup to 4 surrounding halfpel and fullpel points, again rounding should be tonearest and halfway values rounded upcompliant Snow decoders MUST support 1-1/8 pel luma and 1/2-1/16 pel chromainterpolation at leastOverlapped block motion compensation:-------------------------------------FIXMELL band prediction:===================Each sample in the LL0 subband is predicted by the median of the left, top andleft+top-topleft samples, samples outside the subband shall be considered tobe 0. To reverse this prediction in the decoder apply the following.for(y=0;y< height;y++){ for(x=0;x< width;x++){ sample[y][x]+=median(sample[y-1][x], sample[y][x-1], sample[y-1][x]+sample[y][x-1]-sample[y-1][x-1]);}}sample[-1][ *]=sample[ *][-1]=0;width, height here are the width and height of the LL0 subband not of the finalvideoDequantization:===============FIXMEWavelet Transform:==================Snow supports 2 wavelet transforms, the symmetric biorthogonal 5/3 integertransform and an integer approximation of the symmetric biorthogonal 9/7daubechies wavelet.2D IDWT(inverse discrete wavelet transform) --------------------------------------------The 2D IDWT applies a 2D filter recursively, each time combining the4 lowest frequency subbands into a single subband until only 1 subbandremains.The 2D filter is done by first applying a 1D filter in the vertical directionand then applying it in the horizontal one. --------------- --------------- --------------- ---------------|LL0|HL0|||||||||||||---+---|HL1||L0|H0|HL1||LL1|HL1|||||LH0|HH0|||||||||||||-------+-------|-> L1 H1 LH1 HH1 LH1 HH1 LH1 HH1 L1
const OptionGroupDef * group_def
void show_help_default(const char *opt, const char *arg)
Per-fftool specific help handler.
#define AV_LOG_WARNING
Something somehow does not look correct.
static void direct(const float *in, const FFTComplex *ir, int len, float *out)
static void process(NormalizeContext *s, AVFrame *in, AVFrame *out)
static void draw_horiz_band(AVCodecContext *ctx, const AVFrame *fr, int offset[4], int slice_position, int type, int height)
#define sws_isSupportedOutput(x)
AVPixelFormat
Pixel format.
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option name
they must not be accessed directly The fifo field contains the frames that are queued in the input for processing by the filter The status_in and status_out fields contains the queued status(EOF or error) of the link
F H1 F F H1 F F F F H1<-F-------F-------F v v v H2 H3 H2 ^ ^ ^ F-------F-------F-> H1<-F-------F-------F|||||||||F H1 F|||||||||F H1 Funavailable fullpel samples(outside the picture for example) shall be equalto the closest available fullpel sampleSmaller pel interpolation:--------------------------if diag_mc is set then points which lie on a line between 2 vertically, horizontally or diagonally adjacent halfpel points shall be interpolatedlinearly with rounding to nearest and halfway values rounded up.points which lie on 2 diagonals at the same time should only use the onediagonal not containing the fullpel point F--> O q O<--h1-> O q O<--F v \/v \/v O O O O O O O|/|\|q q q q q|/|\|O O O O O O O ^/\ ^/\ ^ h2--> O q O<--h3-> O q O<--h2 v \/v \/v O O O O O O O|\|/|q q q q q|\|/|O O O O O O O ^/\ ^/\ ^ F--> O q O<--h1-> O q O<--Fthe remaining points shall be bilinearly interpolated from theup to 4 surrounding halfpel and fullpel points, again rounding should be tonearest and halfway values rounded upcompliant Snow decoders MUST support 1-1/8 pel luma and 1/2-1/16 pel chromainterpolation at leastOverlapped block motion compensation:-------------------------------------FIXMELL band prediction:===================Each sample in the LL0 subband is predicted by the median of the left, top andleft+top-topleft samples, samples outside the subband shall be considered tobe 0. To reverse this prediction in the decoder apply the following.for(y=0;y< height;y++){ for(x=0;x< width;x++){ sample[y][x]+=median(sample[y-1][x], sample[y][x-1], sample[y-1][x]+sample[y][x-1]-sample[y-1][x-1]);}}sample[-1][ *]=sample[ *][-1]=0;width, height here are the width and height of the LL0 subband not of the finalvideoDequantization:===============FIXMEWavelet Transform:==================Snow supports 2 wavelet transforms, the symmetric biorthogonal 5/3 integertransform and an integer approximation of the symmetric biorthogonal 9/7daubechies wavelet.2D IDWT(inverse discrete wavelet transform) --------------------------------------------The 2D IDWT applies a 2D filter recursively, each time combining the4 lowest frequency subbands into a single subband until only 1 subbandremains.The 2D filter is done by first applying a 1D filter in the vertical directionand then applying it in the horizontal one. --------------- --------------- --------------- ---------------|LL0|HL0|||||||||||||---+---|HL1||L0|H0|HL1||LL1|HL1|||||LH0|HH0|||||||||||||-------+-------|-> L1 H1 LH1 HH1 LH1 HH1 LH1 HH1 L2
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C program
The reader does not expect b to be semantically here and if the code is changed by maybe adding a cast
int avdevice_list_input_sources(AVInputFormat *device, const char *device_name, AVDictionary *device_options, AVDeviceInfoList **device_list)
List devices.
#define sws_isSupportedInput(x)
static av_cold int init(AVCodecContext *avctx)
static int mix(int c0, int c1)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
const AVClass * avio_protocol_get_class(const char *name)
Get AVClass by names of available protocols.
#define AV_OPT_FLAG_VIDEO_PARAM
#define GET_SAMPLE_RATE_NAME(rate)
enum AVMediaType codec_type
General type of the encoded data.
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated space
#define AV_IS_INPUT_DEVICE(category)
const char * avfilter_pad_get_name(const AVFilterPad *pads, int pad_idx)
Get the name of an AVFilterPad.
status_out is the status that have been taken into it is final when it is not The typical task of an activate callback is to first check the backward status of output and if relevant forward it to the corresponding input Then
const char * long_name
Descriptive name for the codec, meant to be more human readable than name.
static FILE * report_file
int show_formats(void *optctx, const char *opt, const char *arg)
Print a listing containing all the formats supported by the program (including devices).
int av_bprint_finalize(AVBPrint *buf, char **ret_str)
Finalize a print buffer.
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
#define AV_CODEC_PROP_LOSSY
Codec supports lossy compression.
static const ElemCat * elements[ELEMENT_COUNT]
===============The purpose of these rules is to ensure that frames flow in the filter graph without getting stuck and accumulating somewhere. Simple filters that output one frame for each input frame should not have to worry about it. There are two design for filters:one using the filter_frame() and request_frame() callbacks and the other using the activate() callback. The design using filter_frame() and request_frame() is legacy, but it is suitable for filters that have a single input and process one frame at a time. New filters with several inputs, that treat several frames at a time or that require a special treatment at EOF should probably use the design using activate(). activate -------- This method is called when something must be done in a filter scheduling
int opt_report(void *optctx, const char *opt, const char *arg)
char * av_get_sample_fmt_string(char *buf, int buf_size, enum AVSampleFormat sample_fmt)
Generate a string corresponding to the sample format with sample_fmt, or a header if sample_fmt is ne...
void av_bprint_init(AVBPrint *buf, unsigned size_init, unsigned size_max)
int show_layouts(void *optctx, const char *opt, const char *arg)
Print a listing containing all the standard channel layouts supported by the program.
static double cb(void *priv, double x, double y)
static void nothing(void *foo)
The official guide to swscale for confused that is
#define u(width, name, range_min, range_max)
#define AV_LOG_QUIET
Print no output.
const AVClass * priv_class
AVClass for the private context.
static void init_parse_context(OptionParseContext *octx, const OptionGroupDef *groups, int nb_groups)
The official guide to swscale for confused developers
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
static void log_callback_report(void *ptr, int level, const char *fmt, va_list vl)
static enum AVSampleFormat sample_fmts[]
#define AVERROR_EOF
End of file.
#define AV_CODEC_CAP_HARDWARE
Codec is backed by a hardware implementation.
static const struct PPFilter filters[]
#define AV_LOG_PANIC
Something went really wrong and we will crash now.
This document is a tutorial initiation for writing simple filters in libavfilter libavfilter is which means that it is highly recommended that you submit your filters to the FFmpeg development mailing list and make sure that they are applied your filters are likely to have a very short lifetime due to more or less regular internal API and a limited and testing changes the pixels in whatever fashion you and outputs the modified frame The most simple way of doing this is to take a similar filter We ll pick edgedetect
char * device_name
device name, format depends on device
static void show_help_codec(const char *name, int encoder)
static char get_media_type_char(enum AVMediaType type)
static const int8_t mv[256][2]
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce output
static struct codec_string codecs[]
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
int show_version(void *optctx, const char *opt, const char *arg)
Print the version of the program to stdout.
static int16_t basis[64][64]
const AVClass * avformat_get_class(void)
Get the AVClass for AVFormatContext.
const char program_name[]
program name, defined by the program for show_version().
int nb_devices
number of autodetected devices
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
#define GET_PIX_FMT_NAME(pix_fmt)
static av_cold int end(AVCodecContext *avctx)
static const OMX_CALLBACKTYPE callbacks
#define AV_CODEC_CAP_TRUNCATED
This structure describes decoded (raw) audio or video data.
int av_frame_make_writable(AVFrame *frame)
Ensure that the frame data is writable, avoiding data copy if possible.
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about which is also called distortion Distortion can be quantified by almost any quality measurement one chooses the sum of squared differences is used but more complex methods that consider psychovisual effects can be used as well It makes no difference in this discussion First step
static void print_codec(const AVCodec *c)
int capabilities
Codec capabilities.
Note except for filters that can have queued frames and sources
static const uint16_t table[]
static int linear(InterplayACMContext *s, unsigned ind, unsigned col)
const AVPixFmtDescriptor * av_pix_fmt_desc_next(const AVPixFmtDescriptor *prev)
Iterate over all pixel format descriptors known to libavutil.
AVDictionary * format_opts
const char * avio_enum_protocols(void **opaque, int output)
Iterate through names of available protocols.
int ff_request_frame(AVFilterLink *link)
Request an input frame from the filter at the other end of the link.
#define AV_DICT_IGNORE_SUFFIX
Return first entry in a dictionary whose first part corresponds to the search key,...
the frame and frame reference mechanism is intended to as much as possible
void * av_mallocz_array(size_t nmemb, size_t size)
#define AV_LOG_VERBOSE
Detailed information.
#define fc(width, name, range_min, range_max)
#define allocate(name, size)
void show_help_children(const AVClass *class, int flags)
Show help for all options with given flags in class and all its children.
int av_get_bits_per_pixel(const AVPixFmtDescriptor *pixdesc)
Return the number of bits per pixel used by the pixel format described by pixdesc.
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce then the filter should push the output frames on the output link immediately As an exception to the previous rule if the input frame is enough to produce several output frames then the filter needs output only at least one per link The additional frames can be left buffered in the filter
const AVBitStreamFilter * av_bsf_iterate(void **opaque)
Iterate over all registered bitstream filters.
int av_get_cpu_flags(void)
Return the flags which specify extensions supported by the CPU.
status_in is a status change that must be taken into account after all frames in fifo have been processed
static int config_props(AVFilterLink *outlink)
static uint32_t reverse(uint32_t num, int bits)
static void put_pixel(uint16_t *dst, ptrdiff_t linesize, const int16_t *in, int bits_per_raw_sample)
Add bias value, clamp and output pixels of a slice.
A link between two filters.
#define AV_OPT_FLAG_FILTERING_PARAM
a generic parameter which can be set by the user for filtering
s EdgeDetect Foobar g libavfilter vf_edgedetect c libavfilter vf_foobar c edit libavfilter Makefile
#define FF_FILTER_FORWARD_STATUS_BACK(outlink, inlink)
Forward the status on an output link to an input link.
F H1 F F H1 F F F F H1<-F-------F-------F v v v H2 H3 H2 ^ ^ ^ F-------F-------F-> H1<-F-------F-------F|||||||||F H1 F|||||||||F H1 Funavailable fullpel samples(outside the picture for example) shall be equalto the closest available fullpel sampleSmaller pel interpolation:--------------------------if diag_mc is set then points which lie on a line between 2 vertically, horizontally or diagonally adjacent halfpel points shall be interpolatedlinearly with rounding to nearest and halfway values rounded up.points which lie on 2 diagonals at the same time should only use the onediagonal not containing the fullpel point F--> O q O<--h1-> O q O<--F v \/v \/v O O O O O O O|/|\|q q q q q|/|\|O O O O O O O ^/\ ^/\ ^ h2--> O q O<--h3-> O q O<--h2 v \/v \/v O O O O O O O|\|/|q q q q q|\|/|O O O O O O O ^/\ ^/\ ^ F--> O q O<--h1-> O q O<--Fthe remaining points shall be bilinearly interpolated from theup to 4 surrounding halfpel and fullpel points, again rounding should be tonearest and halfway values rounded upcompliant Snow decoders MUST support 1-1/8 pel luma and 1/2-1/16 pel chromainterpolation at leastOverlapped block motion compensation:-------------------------------------FIXMELL band prediction:===================Each sample in the LL0 subband is predicted by the median of the left, top andleft+top-topleft samples, samples outside the subband shall be considered tobe 0. To reverse this prediction in the decoder apply the following.for(y=0;y< height;y++){ for(x=0;x< width;x++){ sample[y][x]+=median(sample[y-1][x], sample[y][x-1], sample[y-1][x]+sample[y][x-1]-sample[y-1][x-1]);}}sample[-1][ *]=sample[ *][-1]=0;width, height here are the width and height of the LL0 subband not of the finalvideoDequantization:===============FIXMEWavelet Transform:==================Snow supports 2 wavelet transforms, the symmetric biorthogonal 5/3 integertransform and an integer approximation of the symmetric biorthogonal 9/7daubechies wavelet.2D IDWT(inverse discrete wavelet transform) --------------------------------------------The 2D IDWT applies a 2D filter recursively, each time combining the4 lowest frequency subbands into a single subband until only 1 subbandremains.The 2D filter is done by first applying a 1D filter in the vertical directionand then applying it in the horizontal one. --------------- --------------- --------------- ---------------|LL0|HL0|||||||||||||---+---|HL1||L0|H0|HL1||LL1|HL1|||||LH0|HH0|||||||||||||-------+-------|-> L1 H1 LH1 HH1 LH1 HH1 LH1 HH1 H0
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before ff_thread_await_progress() has been called on them. reget_buffer() and buffer age optimizations no longer work. *The contents of buffers must not be written to after ff_thread_report_progress() has been called on them. This includes draw_edges(). Porting codecs to frame threading
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about quality
const AVBitStreamFilter * av_bsf_get_by_name(const char *name)
#define bit(string, value)
void exit_program(int ret)
Wraps exit with a program-specific cleanup routine.
F H1 F F H1 F F F F H1<-F-------F-------F v v v H2 H3 H2 ^ ^ ^ F-------F-------F-> H1<-F-------F-------F|||||||||F H1 F|||||||||F H1 Funavailable fullpel samples(outside the picture for example) shall be equalto the closest available fullpel sampleSmaller pel interpolation:--------------------------if diag_mc is set then points which lie on a line between 2 vertically, horizontally or diagonally adjacent halfpel points shall be interpolatedlinearly with rounding to nearest and halfway values rounded up.points which lie on 2 diagonals at the same time should only use the onediagonal not containing the fullpel point F--> O q O<--h1-> O q O<--F v \/v \/v O O O O O O O|/|\|q q q q q|/|\|O O O O O O O ^/\ ^/\ ^ h2--> O q O<--h3-> O q O<--h2 v \/v \/v O O O O O O O|\|/|q q q q q|\|/|O O O O O O O ^/\ ^/\ ^ F--> O q O<--h1-> O q O<--Fthe remaining points shall be bilinearly interpolated from theup to 4 surrounding halfpel and fullpel points, again rounding should be tonearest and halfway values rounded upcompliant Snow decoders MUST support 1-1/8 pel luma and 1/2-1/16 pel chromainterpolation at leastOverlapped block motion compensation:-------------------------------------FIXMELL band prediction:===================Each sample in the LL0 subband is predicted by the median of the left, top andleft+top-topleft samples, samples outside the subband shall be considered tobe 0. To reverse this prediction in the decoder apply the following.for(y=0;y< height;y++){ for(x=0;x< width;x++){ sample[y][x]+=median(sample[y-1][x], sample[y][x-1], sample[y-1][x]+sample[y][x-1]-sample[y-1][x-1]);}}sample[-1][ *]=sample[ *][-1]=0;width, height here are the width and height of the LL0 subband not of the finalvideoDequantization:===============FIXMEWavelet Transform:==================Snow supports 2 wavelet transforms, the symmetric biorthogonal 5/3 integertransform and an integer approximation of the symmetric biorthogonal 9/7daubechies wavelet.2D IDWT(inverse discrete wavelet transform) --------------------------------------------The 2D IDWT applies a 2D filter recursively, each time combining the4 lowest frequency subbands into a single subband until only 1 subbandremains.The 2D filter is done by first applying a 1D filter in the vertical directionand then applying it in the horizontal one. --------------- --------------- --------------- ---------------|LL0|HL0|||||||||||||---+---|HL1||L0|H0|HL1||LL1|HL1|||||LH0|HH0|||||||||||||-------+-------|-> L1 H1 LH1 HH1 LH1 HH1 LH1 HH1 Filter
int ff_inlink_consume_frame(AVFilterLink *link, AVFrame **rframe)
Take a frame from the link's FIFO and update the link's stats.
void av_max_alloc(size_t max)
Set the maximum size that may be allocated in one block.
double parse_number_or_die(const char *context, const char *numstr, int type, double min, double max)
Parse a string and return its corresponding value as a double.
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a it should return
ff_const59 AVOutputFormat * av_guess_format(const char *short_name, const char *filename, const char *mime_type)
Return the output format in the list of registered output formats which best matches the provided par...
AVCodec * avcodec_find_decoder_by_name(const char *name)
Find a registered decoder with the specified name.
void print_error(const char *filename, int err)
Print an error message to stderr, indicating filename and a human readable description of the error c...
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that some(invalid) inputs can trigger overflows(undefined behavior). In these cases
static const chunk_decoder decoder[8]
A list of option groups that all have the same group type (e.g.
if it could not for temporary reasons
The official guide to swscale for confused that consecutive non overlapping rectangles of slice_bottom special converter These generally are unscaled converters of common like for each output line the vertical scaler pulls lines from a ring buffer When the ring buffer does not contain the wanted then it is pulled from the input slice through the input converter and horizontal scaler The result is also stored in the ring buffer to serve future vertical scaler requests When no more output can be generated because lines from a future slice would be then all remaining lines in the current slice are horizontally scaled and put in the ring buffer[This is done for luma and chroma, each with possibly different numbers of lines per picture.] Input to YUV Converter When the input to the main path is not planar bits per component YUV or bit it is converted to planar bit YUV Two sets of converters exist for this the other leaves the full chroma resolution
int av_strerror(int errnum, char *errbuf, size_t errbuf_size)
Put a description of the AVERROR code errnum in errbuf.
int show_decoders(void *optctx, const char *opt, const char *arg)
Print a listing containing all the decoders supported by the program.
AVOutputFormat * av_output_video_device_next(AVOutputFormat *d)
Video output devices iterator.
#define AV_PIX_FMT_FLAG_HWACCEL
Pixel format is an HW accelerated format.
if it could not because there are no more frames
status_out is the status that have been taken into it is final when it is not The typical task of an activate callback is to first check the backward status of output and if relevant forward it to the corresponding input if relevant
const AVFilter * av_filter_iterate(void **opaque)
Iterate over all registered filters.
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have so the codec calls ff_thread_report set FF_CODEC_CAP_ALLOCATE_PROGRESS in AVCodec caps_internal and use ff_thread_get_buffer() to allocate frames. The frames must then be freed with ff_thread_release_buffer(). Otherwise decode directly into the user-supplied frames. Call ff_thread_report_progress() after some part of the current picture has decoded. A good place to put this is where draw_horiz_band() is called - add this if it isn 't called anywhere
int av_parse_cpu_caps(unsigned *flags, const char *s)
Parse CPU caps from a string and update the given AV_CPU_* flags based on that.
Writing a table generator This documentation is preliminary Parts of the API are not good and should be changed Basic concepts A table generator consists of two *_tablegen c and *_tablegen h The h file will provide the variable declarations and initialization code for the tables
FFmpeg s bug feature request tracker new issues and changes to existing issues can be done through a web interface Issues can be different kinds of things we want to keep track of but that do not belong into the source tree itself This includes bug feature requests and license violations We might add more items to this list in the future
AVCodec * avcodec_find_encoder(enum AVCodecID id)
Find a registered encoder with a matching codec ID.
#define AVERROR_OPTION_NOT_FOUND
Option not found.
#define AV_BPRINT_SIZE_AUTOMATIC
An option extracted from the commandline.
static double val(void *priv, double ch)
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
static av_always_inline void update(SilenceDetectContext *s, AVFrame *insamples, int is_silence, int current_sample, int64_t nb_samples_notify, AVRational time_base)
status_out is the status that have been taken into account
const AVClass * sws_get_class(void)
Get the AVClass for swsContext.
#define us(width, name, range_min, range_max, subs,...)
@ AV_PKT_DATA_DISPLAYMATRIX
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
int av_opt_set(void *obj, const char *name, const char *val, int search_flags)
const OptionGroupDef * group_def
#define AVFILTER_FLAG_DYNAMIC_INPUTS
The number of the filter inputs is not determined just by AVFilter.inputs.
static void idct(int16_t block[64])
A filter pad used for either input or output.
static void show_help_bsf(const char *name)
static int resample(ResampleContext *c, void *dst, const void *src, int *consumed, int src_size, int dst_size, int update_ctx, int nearest_neighbour)
spatial_decomposition_type s header_state qlog s header_state mv_scale s header_state qbias s header_state block_max_depth s header_state qlogs
AVDeviceInfo ** devices
list of autodetected devices
static int aligned(int val)
s EdgeDetect Foobar g libavfilter vf_edgedetect c libavfilter vf_foobar c edit libavfilter and add an entry for foobar following the pattern of the other filters edit libavfilter allfilters and add an entry for foobar following the pattern of the other filters configure make j< whatever > ffmpeg ffmpeg i you should get a foobar png with Lena edge detected That s your new playground is ready Some little details about what s going which in turn will define variables for the build system and the C
int check_stream_specifier(AVFormatContext *s, AVStream *st, const char *spec)
Check if the given stream matches a stream specifier.
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But first
FFmpeg currently uses a custom build this text attempts to document some of its obscure features and options Makefile variables
#define AV_LOG_TRACE
Extremely verbose debugging, useful for libav* development.
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
static void print_buildconf(int flags, int level)
static av_cold int initFilter(int16_t **outFilter, int32_t **filterPos, int *outFilterSize, int xInc, int srcW, int dstW, int filterAlign, int one, int flags, int cpu_flags, SwsVector *srcFilter, SwsVector *dstFilter, double param[2], int srcPos, int dstPos)
#define AV_CODEC_CAP_EXPERIMENTAL
Codec is experimental and is thus avoided in favor of non experimental encoders.
AVDictionary * codec_opts
static void set(uint8_t *a[], int ch, int index, int ch_count, enum AVSampleFormat f, double v)
static void expand_filename_template(AVBPrint *bp, const char *template, struct tm *tm)
static void check_options(const OptionDef *po)
#define media_type_string
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have so the codec calls ff_thread_report await_progress()
void ff_thread_report_progress(ThreadFrame *f, int n, int field)
Notify later decoding threads when part of their reference picture is ready.
#define check(x, y, S, v)
The official guide to swscale for confused that consecutive non overlapping rectangles of slice_bottom special converter These generally are unscaled converters of common like YUV
AVDictionaryEntry * av_dict_get(const AVDictionary *m, const char *key, const AVDictionaryEntry *prev, int flags)
Get a dictionary entry with matching key.
void av_log_format_line(void *ptr, int level, const char *fmt, va_list vl, char *line, int line_size, int *print_prefix)
Format a line of log the same way as the default callback.
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
static void ff_outlink_set_status(AVFilterLink *link, int status, int64_t pts)
Set the status field of a link from the source filter.
AVDictionary * filter_codec_opts(AVDictionary *opts, enum AVCodecID codec_id, AVFormatContext *s, AVStream *st, AVCodec *codec)
Filter out options for given codec.
void ff_inlink_request_frame(AVFilterLink *link)
Mark that a frame is wanted on the link.
InputStream ** input_streams
#define MC(PEL, DIR, WIDTH)
This struct describes the properties of a single codec described by an AVCodecID.
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not matter(as it is from invalid input). In some cases the input can be checked easily in others checking the input is computationally too intensive. In these remaining cases a unsigned type can be used instead of a signed type. unsigned overflows are defined in C. SUINT ----- As we have above established there is a need to use "unsigned" sometimes in computations which work with signed integers(which overflow). Using "unsigned" for signed integers has the very significant potential to cause confusion as in unsigned a
int split_commandline(OptionParseContext *octx, int argc, char *argv[], const OptionDef *options, const OptionGroupDef *groups, int nb_groups)
Split the commandline into an intermediate form convenient for further processing.
#define AV_OPT_FLAG_ENCODING_PARAM
a generic parameter which can be set by the user for muxing or encoding
AVDictionary * resample_opts
void * av_realloc_array(void *ptr, size_t nmemb, size_t size)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample format(the sample packing is implied by the sample format) and sample rate. The lists are not just lists
#define SWS_FULL_CHR_H_INP
This document is a tutorial initiation for writing simple filters in libavfilter libavfilter is which means that it is highly recommended that you submit your filters to the FFmpeg development mailing list and make sure that they are applied your filters are likely to have a very short lifetime due to more or less regular internal API changes
int avfilter_pad_count(const AVFilterPad *pads)
Get the number of elements in a NULL-terminated array of AVFilterPads (e.g.
static int sse(MpegEncContext *s, uint8_t *src1, uint8_t *src2, int w, int h, int stride)
av_cold struct SwrContext * swr_alloc(void)
Allocate SwrContext.
const attribute_deprecated AVClass * avresample_get_class(void)
This is the more generic form
#define av_assert0(cond)
assert() equivalent, that is always enabled.
static void get(uint8_t *pixels, int stride, int16_t *block)
Note except for filters that can have queued frames and request_frame does not push and as a reaction
static const AVFilterPad outputs[]
static enum AVPixelFormat pix_fmts[]
the buffer is automatically deallocated once all corresponding references have been destroyed The characteristics of the data(resolution, sample rate, etc.) are stored in the reference
const char * av_get_channel_name(uint64_t channel)
Get the name of a given channel.
int flags
A combination of AVFILTER_FLAG_*.
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Undefined Behavior In the C some operations are like signed integer dereferencing freed pointers
static void dump_argument(const char *a)
static int report_file_level
static enum AVPixelFormat pix_fmt
s EdgeDetect Foobar g libavfilter vf_edgedetect c libavfilter vf_foobar c edit libavfilter and add an entry for foobar following the pattern of the other filters edit libavfilter allfilters and add an entry for foobar following the pattern of the other filters configure make j< whatever > ffmpeg ffmpeg i you should get a foobar png with Lena edge detected That s your new playground is ready Some little details about what s going on
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this field
write_fileheader() adds some minor things like a "this is a generated file" comment and some standard includes. tablegen.h defines some write functions for one- and two-dimensional arrays for standard types - they print only the "core" parts so they are easier to reuse for multi-dimensional arrays so the outermost
const char * av_hwdevice_get_type_name(enum AVHWDeviceType type)
Get the string name of an AVHWDeviceType.
static int export(AVFilterContext *ctx, StreamContext *sc, int input)
void parse_options(void *optctx, int argc, char **argv, const OptionDef *options, void(*parse_arg_function)(void *, const char *))
#define AV_OPT_FLAG_BSF_PARAM
a generic parameter which can be set by the user for bit stream filtering
The libswresample context.
@ AVMEDIA_TYPE_DATA
Opaque data information usually continuous.
#define XMM_CLOBBERS(...)
static int command(AVFilterContext *ctx, const char *cmd, const char *arg, char *res, int res_len, int flags)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a link
#define AV_OPT_FLAG_AUDIO_PARAM
static int compare_codec_desc(const void *a, const void *b)
int ff_inlink_make_frame_writable(AVFilterLink *link, AVFrame **rframe)
Make sure a frame is writable.
const AVOption * av_opt_find(void *obj, const char *name, const char *unit, int opt_flags, int search_flags)
Look for an option in an object.
static void callback(void *priv_data, int index, uint8_t *buf, int buf_size, int64_t time, enum dshowDeviceType devtype)
must be printed separately If there s no standard function for printing the type you the WRITE_1D_FUNC_ARGV macro is a very quick way to create one See libavcodec dv_tablegen c for an example The h file This file should the initialization functions should not do and instead of the variable declarations the generated *_tables h file should be included Since that will be generated in the build the path must be included
the definition of that something depends on the semantic of the filter The callback must examine the status of the filter s links and proceed accordingly The status of output links is stored in the status_in and status_out fields and tested by the then the processing requires a frame on this link and the filter is expected to make efforts in that direction The status of input links is stored by the fifo and status_out fields
static void finish_group(OptionParseContext *octx, int group_idx, const char *arg)
OutputStream ** output_streams
vertical halfpel samples are found by H2[y][x]
#define AV_IS_OUTPUT_DEVICE(category)
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option keep it simple and lowercase description are in without and describe what they for example set the foo of the bar offset is the offset of the field in your context
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format negotiation
int av_log_get_level(void)
Get the current log level.
must be printed separately If there s no standard function for printing the type you need
int show_buildconf(void *optctx, const char *opt, const char *arg)
Print the build configuration of the program to stdout.
#define AV_CODEC_PROP_INTRA_ONLY
Codec uses only intra compression.
const AVFilter * avfilter_get_by_name(const char *name)
Get a filter definition matching the given name.
static const int16_t quant_table[64]
void init_dynload(void)
Initialize dynamic library loading.
AVDictionary * format_opts
AVCodecParameters * codecpar
Codec parameters associated with this stream.
int main(int argc, char *argv[])
Describe the class of an AVClass context structure.
const AVClass * avcodec_get_class(void)
Get the AVClass for AVCodecContext.
and forward the result(frame or status change) to the corresponding input. If nothing is possible
static void flush(AVCodecContext *avctx)
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across frames
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
static void prepare_app_arguments(int *argc_ptr, char ***argv_ptr)
uint8_t nb_components
The number of components each pixel has, (1-4)
static void push(HysteresisContext *s, int x, int y, int w)
must be printed separately If there s no standard function for printing the type you the WRITE_1D_FUNC_ARGV macro is a very quick way to create one See libavcodec dv_tablegen c for an example The h file This file should the initialization functions should not do anything
const AVClass * swr_get_class(void)
Get the AVClass for SwrContext.
the definition of that something depends on the semantic of the filter The callback must examine the status of the filter s links and proceed accordingly The status of output links is stored in the frame_wanted_out
#define LIBAVFILTER_VERSION_MICRO
must be printed separately If there s no standard function for printing the type you the WRITE_1D_FUNC_ARGV macro is a very quick way to create one See libavcodec dv_tablegen c for an example The h file This file should contain
static const int8_t transform[32][32]
Rational number (pair of numerator and denominator).
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option keep it simple and lowercase description are in without period
static double coefficients[8 *8]
static const AVCodec * next_codec_for_id(enum AVCodecID id, void **iter, int encoder)
static int show_formats_devices(void *optctx, const char *opt, const char *arg, int device_only, int muxdemuxers)
filter_frame For filters that do not use the activate() callback
F H1 F F H1 F F F F H1<-F-------F-------F v v v H2 H3 H2 ^ ^ ^ F-------F-------F-> H1<-F-------F-------F|||||||||F H1 F|||||||||F H1 Funavailable fullpel samples(outside the picture for example) shall be equalto the closest available fullpel sampleSmaller pel interpolation:--------------------------if diag_mc is set then points which lie on a line between 2 vertically, horizontally or diagonally adjacent halfpel points shall be interpolatedlinearly with rounding to nearest and halfway values rounded up.points which lie on 2 diagonals at the same time should only use the onediagonal not containing the fullpel point F--> O q O<--h1-> O q O<--F v \/v \/v O O O O O O O|/|\|q q q q q|/|\|O O O O O O O ^/\ ^/\ ^ h2--> O q O<--h3-> O q O<--h2 v \/v \/v O O O O O O O|\|/|q q q q q|\|/|O O O O O O O ^/\ ^/\ ^ F--> O q O<--h1-> O q O<--Fthe remaining points shall be bilinearly interpolated from theup to 4 surrounding halfpel and fullpel points, again rounding should be tonearest and halfway values rounded upcompliant Snow decoders MUST support 1-1/8 pel luma and 1/2-1/16 pel chromainterpolation at leastOverlapped block motion compensation:-------------------------------------FIXMELL band prediction:===================Each sample in the LL0 subband is predicted by the median of the left, top andleft+top-topleft samples, samples outside the subband shall be considered tobe 0. To reverse this prediction in the decoder apply the following.for(y=0;y< height;y++){ for(x=0;x< width;x++){ sample[y][x]+=median(sample[y-1][x], sample[y][x-1], sample[y-1][x]+sample[y][x-1]-sample[y-1][x-1]);}}sample[-1][ *]=sample[ *][-1]=0;width, height here are the width and height of the LL0 subband not of the finalvideoDequantization:===============FIXMEWavelet Transform:==================Snow supports 2 wavelet transforms, the symmetric biorthogonal 5/3 integertransform and an integer approximation of the symmetric biorthogonal 9/7daubechies wavelet.2D IDWT(inverse discrete wavelet transform) --------------------------------------------The 2D IDWT applies a 2D filter recursively, each time combining the4 lowest frequency subbands into a single subband until only 1 subbandremains.The 2D filter is done by first applying a 1D filter in the vertical directionand then applying it in the horizontal one. --------------- --------------- --------------- ---------------|LL0|HL0|||||||||||||---+---|HL1||L0|H0|HL1||LL1|HL1|||||LH0|HH0|||||||||||||-------+-------|-> L1 H1 LH1 HH1 LH1 HH1 LH1 HH1 this can end with a L or a H
FFmpeg currently uses a custom build system
#define GET_CODEC_NAME(id)
void av_log_set_flags(int arg)
must be printed separately If there s no standard function for printing the type you the WRITE_1D_FUNC_ARGV macro is a very quick way to create one See libavcodec dv_tablegen c for an example The h file This file should the initialization functions should not do and instead of the variable declarations the generated *_tables h file should be included Since that will be generated in the build the path must be i e not Makefile changes To make the automatic table creation work
struct SwsContext * sws_alloc_context(void)
Allocate an empty SwsContext.
int show_muxers(void *optctx, const char *opt, const char *arg)
Print a listing containing all the muxers supported by the program (including devices).
void init_opts(void)
Initialize the cmdutils option system, in particular allocate the *_opts contexts.
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining list
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If not
const AVClass * priv_class
A class for the private data, used to declare bitstream filter private AVOptions.
different references for the same buffer can show different characteristics In particular
#define AV_CODEC_CAP_VARIABLE_FRAME_SIZE
Audio encoder supports receiving a different number of samples in each call.
size_t av_cpu_max_align(void)
Get the maximum data alignment that may be required by FFmpeg.
int av_parse_time(int64_t *timeval, const char *timestr, int duration)
Parse timestr and return in *time a corresponding number of microseconds.
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
static int write_option(void *optctx, const OptionDef *po, const char *opt, const char *arg)
int av_get_standard_channel_layout(unsigned index, uint64_t *layout, const char **name)
Get the value and name of a standard channel layout.
uint64_t flags
Combination of AV_PIX_FMT_FLAG_...
int ff_inlink_acknowledge_status(AVFilterLink *link, int *rstatus, int64_t *rpts)
Test and acknowledge the change of status on the link.
AVOutputFormat * av_output_audio_device_next(AVOutputFormat *d)
Audio output devices iterator.
static void diff_bytes(HYuvContext *s, uint8_t *dst, const uint8_t *src0, const uint8_t *src1, int w)
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
The official guide to swscale for confused that consecutive non overlapping rectangles of slice_bottom special converter These generally are unscaled converters of common like for each output line the vertical scaler pulls lines from a ring buffer When the ring buffer does not contain the wanted then it is pulled from the input slice through the input converter and horizontal scaler The result is also stored in the ring buffer to serve future vertical scaler requests When no more output can be generated because lines from a future slice would be then all remaining lines in the current slice are converted
F H1 F F H1 F F F F H1<-F-------F-------F v v v H2 H3 H2 ^ ^ ^ F-------F-------F-> H1<-F-------F-------F|||||||||F H1 F|||||||||F H1 Funavailable fullpel samples(outside the picture for example) shall be equalto the closest available fullpel sampleSmaller pel interpolation:--------------------------if diag_mc is set then points which lie on a line between 2 vertically, horizontally or diagonally adjacent halfpel points shall be interpolatedlinearly with rounding to nearest and halfway values rounded up.points which lie on 2 diagonals at the same time should only use the onediagonal not containing the fullpel point F--> O q O<--h1-> O q O<--F v \/v \/v O O O O O O O|/|\|q q q q q|/|\|O O O O O O O ^/\ ^/\ ^ h2--> O q O<--h3-> O q O<--h2 v \/v \/v O O O O O O O|\|/|q q q q q|\|/|O O O O O O O ^/\ ^/\ ^ F--> O q O<--h1-> O q O<--Fthe remaining points shall be bilinearly interpolated from theup to 4 surrounding halfpel and fullpel points, again rounding should be tonearest and halfway values rounded upcompliant Snow decoders MUST support 1-1/8 pel luma and 1/2-1/16 pel chromainterpolation at leastOverlapped block motion compensation:-------------------------------------FIXMELL band prediction:===================Each sample in the LL0 subband is predicted by the median of the left, top andleft+top-topleft samples, samples outside the subband shall be considered tobe 0. To reverse this prediction in the decoder apply the following.for(y=0;y< height;y++){ for(x=0;x< width;x++){ sample[y][x]+=median(sample[y-1][x], sample[y][x-1], sample[y-1][x]+sample[y][x-1]-sample[y-1][x-1]);}}sample[-1][ *]=sample[ *][-1]=0;width, height here are the width and height of the LL0 subband not of the finalvideoDequantization:===============FIXMEWavelet Transform:==================Snow supports 2 wavelet transforms, the symmetric biorthogonal 5/3 integertransform and an integer approximation of the symmetric biorthogonal 9/7daubechies wavelet.2D IDWT(inverse discrete wavelet transform) --------------------------------------------The 2D IDWT applies a 2D filter recursively, each time combining the4 lowest frequency subbands into a single subband until only 1 subbandremains.The 2D filter is done by first applying a 1D filter in the vertical directionand then applying it in the horizontal one. --------------- --------------- --------------- ---------------|LL0|HL0|||||||||||||---+---|HL1||L0|H0|HL1||LL1|HL1|||||LH0|HH0|||||||||||||-------+-------|-> L1 H1 LH1 HH1 LH1 HH1 LH1 HH1 H1
#define AV_OPT_SEARCH_FAKE_OBJ
The obj passed to av_opt_find() is fake – only a double pointer to AVClass instead of a required poin...
static int av_bprint_is_complete(const AVBPrint *buf)
Test if the print buffer is complete (not truncated).
AVCodecID
Identify the syntax and semantics of the bitstream.
AVInputFormat * av_input_video_device_next(AVInputFormat *d)
Video input devices iterator.
#define AVFILTER_FLAG_DYNAMIC_OUTPUTS
The number of the filter outputs is not determined just by AVFilter.outputs.
FFmpeg multithreading methods
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a source
#define AV_CODEC_CAP_CHANNEL_CONF
Codec should fill in channel configuration and samplerate instead of container.
s EdgeDetect Foobar g libavfilter vf_edgedetect c libavfilter vf_foobar c edit libavfilter and add an entry for foobar following the pattern of the other filters edit libavfilter allfilters and add an entry for foobar following the pattern of the other filters configure make j< whatever > ffmpeg ffmpeg i http
void ff_thread_release_buffer(AVCodecContext *avctx, ThreadFrame *f)
Wrapper around release_buffer() frame-for multithreaded codecs.
int locate_option(int argc, char **argv, const OptionDef *options, const char *optname)
Return index of option opt in argv or 0 if not found.
int av_codec_is_decoder(const AVCodec *codec)
static void interleave(uint8_t *dst, uint8_t *src, int w, int h, int dst_linesize, int src_linesize, enum FilterMode mode, int swap)
FF_FILTER_FORWARD_STATUS_ALL(outlink, filter)
AVDictionary * codec_opts
const OptionDef options[]
static void show_help_demuxer(const char *name)
#define AV_CODEC_CAP_AUTO_THREADS
Codec supports avctx->thread_count == 0 (auto).
static void blur(uint8_t *dst, int dst_step, const uint8_t *src, int src_step, int len, int radius, int pixsize)
vertical horizontal halfpel samples are found by H3[y][x]
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
static AVFrame * get_audio_buffer(AVFilterLink *inlink, int nb_samples)
@ AV_SAMPLE_FMT_NB
Number of sample formats. DO NOT USE if linking dynamically.
int show_help(void *optctx, const char *opt, const char *arg)
Generic -h handler common to all fftools.
void av_log_set_callback(void(*callback)(void *, int, const char *, va_list))
Set the logging callback.
void ff_inlink_set_status(AVFilterLink *link, int status)
Set the status on an input link.
int avformat_match_stream_specifier(AVFormatContext *s, AVStream *st, const char *spec)
Check if the stream st contained in s is matched by the stream specifier spec.
static void copy(const float *p1, float *p2, const int length)
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
uint8_t * av_stream_get_side_data(const AVStream *stream, enum AVPacketSideDataType type, int *size)
Get side information from stream.
AVClassCategory category
Category used for visualization (like color) This is only set if the category is equal for all object...
OutputFile ** output_files
#define av_err2str(errnum)
Convenience macro, the return value should be used only directly in function arguments but never stan...
FILE * get_preset_file(char *filename, size_t filename_size, const char *preset_name, int is_path, const char *codec_name)
Get a file corresponding to a preset file.
#define PRINT_CODEC_SUPPORTED(codec, field, type, list_name, term, get_name)
void uninit_opts(void)
Uninitialize the cmdutils option system, in particular free the *_opts contexts and their contents.
static void print_codecs(int encoder)
av_cold void swr_free(SwrContext **ss)
Free the given SwrContext and set the pointer to NULL.
#define AV_PIX_FMT_FLAG_BITSTREAM
All values of a component are bit-wise packed end to end.
int av_frame_is_writable(AVFrame *frame)
Check if the frame data is writable.
#define FFDIFFSIGN(x, y)
Comparator.
s EdgeDetect Foobar g libavfilter vf_edgedetect c libavfilter vf_foobar c edit libavfilter and add an entry for foobar following the pattern of the other filters edit libavfilter allfilters and add an entry for foobar following the pattern of the other filters configure make j< whatever > ffmpeg ffmpeg i you should get a foobar png with Lena edge detected That s your new playground is ready Some little details about what s going which in turn will define variables for the build system and the and we are assuming vf_foobar is as well We are also assuming vf_foobar is not an edge detector so you can update the boilerplate with your credits Doxy Next chunk is the Doxygen about the file See does
AVDictionary ** setup_find_stream_info_opts(AVFormatContext *s, AVDictionary *codec_opts)
Setup AVCodecContext options for avformat_find_stream_info().
#define GET_SAMPLE_FMT_NAME(sample_fmt)
static int swscale(SwsContext *c, const uint8_t *src[], int srcStride[], int srcSliceY, int srcSliceH, uint8_t *dst[], int dstStride[])
const AVInputFormat * av_demuxer_iterate(void **opaque)
Iterate over all registered demuxers.
printf("static const uint8_t my_array[100] = {\n")
The official guide to swscale for confused that consecutive non overlapping rectangles of slice_bottom special converter These generally are unscaled converters of common like for each output line the vertical scaler pulls lines from a ring buffer When the ring buffer does not contain the wanted then it is pulled from the input slice through the input converter and horizontal scaler The result is also stored in the ring buffer to serve future vertical scaler requests When no more output can be generated because lines from a future slice would be then all remaining lines in the current slice are horizontally scaled and put in the ring buffer[This is done for luma and chroma, each with possibly different numbers of lines per picture.] Input to YUV Converter When the input to the main path is not planar bits per component YUV or bit gray
int show_protocols(void *optctx, const char *opt, const char *arg)
Print a listing containing all the protocols supported by the program.
int av_log_get_flags(void)
const char * description
A description of the filter.
void avdevice_free_list_devices(AVDeviceInfoList **device_list)
Convenient function to free result of avdevice_list_devices().
static const uint8_t header[24]
#define AV_OPT_SEARCH_CHILDREN
Search in possible children of the given object first.
static char * split(char *message, char delim)
#define CONFIG_FOOBAR_FILTER
AVInputFormat * av_input_audio_device_next(AVInputFormat *d)
Audio input devices iterator.
static void encode(AVCodecContext *ctx, AVFrame *frame, AVPacket *pkt, FILE *output)
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
#define AV_CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
FF_FILTER_FORWARD_WANTED(outlink, inlink)
enum AVPixelFormat av_pix_fmt_desc_get_id(const AVPixFmtDescriptor *desc)
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values.
int show_devices(void *optctx, const char *opt, const char *arg)
Print a listing containing all the devices supported by the program.
if it could not because there are no more it should return AVERROR_EOF The typical implementation of request_frame for a filter with several inputs will look like that
#define va_copy(dst, src)
char * device_description
human friendly name
int avdevice_list_output_sinks(AVOutputFormat *device, const char *device_name, AVDictionary *device_options, AVDeviceInfoList **device_list)
int show_pix_fmts(void *optctx, const char *opt, const char *arg)
Print a listing containing all the pixel formats supported by the program.
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some input
static int interpolation(DeclickChannel *c, const double *src, int ar_order, double *acoefficients, int *index, int nb_errors, double *auxiliary, double *interpolated)
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was introduced
#define XMM_CLOBBERS_ONLY(...)
#define AV_LOG_INFO
Standard information.
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec implementations
This document is a tutorial initiation for writing simple filters in libavfilter libavfilter is which means that it is highly recommended that you submit your filters to the FFmpeg development mailing list and make sure that they are applied your filters are likely to have a very short lifetime due to more or less regular internal API and a limited distribution
int show_sample_fmts(void *optctx, const char *opt, const char *arg)
Print a listing containing all the sample formats supported by the program.
const AVCodecDescriptor * avcodec_descriptor_next(const AVCodecDescriptor *prev)
Iterate over all codec descriptors known to libavcodec.
AVCodec * avcodec_find_encoder_by_name(const char *name)
Find a registered encoder with the specified name.
#define AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC
Some filters support a generic "enable" expression option that can be used to enable or disable a fil...
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not b
void show_banner(int argc, char **argv, const OptionDef *options)
Print the program banner to stderr.
int av_codec_is_encoder(const AVCodec *codec)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel layout
static void(* program_exit)(int ret)
void register_exit(void(*cb)(int ret))
Register a program-specific cleanup routine.
#define GET_CH_LAYOUT_DESC(ch_layout)
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) #define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac) { } void ff_audio_convert_free(AudioConvert **ac) { if(! *ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);} AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map) { AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method !=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2) { ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc) { av_free(ac);return NULL;} return ac;} in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar) { ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar ? ac->channels :1;} else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;} int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in) { int use_generic=1;int len=in->nb_samples;int p;if(ac->dc) { av_log(ac->avr, AV_LOG_TRACE, "%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
#define AV_CODEC_PROP_LOSSLESS
Codec supports lossless compression.
ff_const59 AVInputFormat * av_find_input_format(const char *short_name)
Find AVInputFormat based on the short name of the input format.
void av_log_set_level(int level)
Set the log level.
#define i(width, name, range_min, range_max)
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some it can consider them to be part of the FIFO and delay acknowledging a status change accordingly Example code
static void print_all_libs_info(int flags, int level)
static av_always_inline av_const double round(double x)
int w
agreed upon image width
#define LIBAVFILTER_VERSION_MINOR
static int filter_frame(DBEContext *s, AVFrame *frame)
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is needed
AVSampleFormat
Audio sample formats.
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have so the codec calls ff_thread_report set FF_CODEC_CAP_ALLOCATE_PROGRESS in AVCodec caps_internal and use as it s useful too and the implementation is trivial when you re doing this Note that draw_edges() needs to be called before reporting progress. Before accessing a reference frame or its MVs
any process which generates a stream compliant to the syntactical and semantic requirements and which is decodable by the process described in this spec shall be considered a conformant Snow encoder but not strictly required ilog2(x) is the rounded down logarithm of x with basis 2 ilog2(0)=0Type definitions
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default value
static av_const int av_toupper(int c)
Locale-independent conversion of ASCII characters to uppercase.
@ AVMEDIA_TYPE_ATTACHMENT
Opaque data information usually sparse.
#define AV_OPT_FLAG_DECODING_PARAM
a generic parameter which can be set by the user for demuxing or decoding
#define CONFIG_HARDCODED_TABLES
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf description
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
static unsigned get_codecs_sorted(const AVCodecDescriptor ***rcodecs)
s EdgeDetect Foobar g libavfilter vf_edgedetect c libavfilter vf_foobar c edit libavfilter and add an entry for foobar following the pattern of the other filters edit libavfilter allfilters and add an entry for foobar following the pattern of the other filters configure make j< whatever > ffmpeg ffmpeg i you should get a foobar png with Lena edge detected That s your new playground is ready Some little details about what s going which in turn will define variables for the build system and the and we are assuming vf_foobar is as well We are also assuming vf_foobar is not an edge detector filter
static int pix_sum(uint8_t *pix, int line_size, int w, int h)
const char * name
Name of the codec implementation.
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have update_thread_context() run it in the next thread. Add AV_CODEC_CAP_FRAME_THREADS to the codec capabilities. There will be very little speed gain at this point but it should work. If there are inter-frame dependencies
int opt_timelimit(void *optctx, const char *opt, const char *arg)
Limit the execution time.
static av_always_inline void mv_scale(Mv *dst, Mv *src, int td, int tb)
static int headroom(int *la)
static const uint8_t plain[]
int opt_max_alloc(void *optctx, const char *opt, const char *arg)
const AVCodec * av_codec_iterate(void **opaque)
Iterate over all registered codecs.
The official guide to swscale for confused that consecutive non overlapping rectangles of slice_bottom special converter These generally are unscaled converters of common like for each output line the vertical scaler pulls lines from a ring buffer When the ring buffer does not contain the wanted then it is pulled from the input slice through the input converter and horizontal scaler The result is also stored in the ring buffer to serve future vertical scaler requests When no more output can be generated because lines from a future slice would be needed
int parse_optgroup(void *optctx, OptionGroup *g)
Parse an options group and write results into optctx.
void parse_loglevel(int argc, char **argv, const OptionDef *options)
Find the '-loglevel' option in the command line args and apply it.
FFmpeg currently uses a custom build this text attempts to document some of its obscure features and options Makefile the full command issued by make and its output will be shown on the screen DBG Preprocess x86 external assembler files to a dbg asm file in the object directory
Undefined Behavior In the C language
AVDictionary * resample_opts
static int array[MAX_W *MAX_W]
Writing a table generator This documentation is preliminary Parts of the API are not good and should be changed Basic concepts A table generator consists of two files
#define AV_LOG_FATAL
Something went wrong and recovery is not possible.
enum AVPixelFormat pixfmt
static const float pred[4]
The official guide to swscale for confused that consecutive non overlapping rectangles of slice_bottom special converter These generally are unscaled converters of common like for each output line the vertical scaler pulls lines from a ring buffer When the ring buffer does not contain the wanted then it is pulled from the input slice through the input converter and horizontal scaler The result is also stored in the ring buffer to serve future vertical scaler requests When no more output can be generated because lines from a future slice would be then all remaining lines in the current slice are horizontally scaled and put in the ring buffer[This is done for luma and chroma, each with possibly different numbers of lines per picture.] Input to YUV Converter When the input to the main path is not planar bits per component YUV or bit it is converted to planar bit YUV Two sets of converters exist for this currently
int read_yesno(void)
Return a positive value if a line read from standard input starts with [yY], otherwise return 0.
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output links
double av_strtod(const char *numstr, char **tail)
Parse the string in numstr and return its value as a double.
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
static int FUNC() comment(CodedBitstreamContext *ctx, RWContext *rw, JPEGRawComment *current)
size_t av_strlcat(char *dst, const char *src, size_t size)
Append the string src to the string dst, but to a total length of no more than size - 1 bytes,...
AVCodec * avcodec_find_decoder(enum AVCodecID id)
Find a registered decoder with a matching codec ID.
void uninit_parse_context(OptionParseContext *octx)
Free all allocated memory in an OptionParseContext.
void log_callback_help(void *ptr, int level, const char *fmt, va_list vl)
Trivial log callback.
int av_opt_get_key_value(const char **ropts, const char *key_val_sep, const char *pairs_sep, unsigned flags, char **rkey, char **rval)
Extract a key-value pair from the beginning of a string.
void av_bprintf(AVBPrint *buf, const char *fmt,...)
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call ff_thread_finish_setup() afterwards. If some code can 't be moved
const AVClass * av_opt_child_class_next(const AVClass *parent, const AVClass *prev)
Iterate over potential AVOptions-enabled children of parent.
const char * av_get_channel_description(uint64_t channel)
Get the description of a given channel.
#define AV_LOG_SKIP_REPEATED
Skip repeated messages, this requires the user app to use av_log() instead of (f)printf as the 2 woul...
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
=============================================Slice threading - *The client 's draw_horiz_band() must be thread-safe according to the comment in avcodec.h. Frame threading - *Restrictions with slice threading also apply. *For best performance, the client should set thread_safe_callbacks if it provides a thread-safe get_buffer() callback. *There is one frame of delay added for every thread beyond the first one. Clients must be able to handle this clients
static void add_opt(OptionParseContext *octx, const OptionDef *opt, const char *key, const char *val)
#define FF_ARRAY_ELEMS(a)
int show_codecs(void *optctx, const char *opt, const char *arg)
Print a listing containing all the codecs supported by the program.
static int init_report(const char *env)
static const struct @78 transforms[18]
enum AVMediaType avfilter_pad_get_type(const AVFilterPad *pads, int pad_idx)
Get the type of an AVFilterPad.
main external API structure.
static float compare(const AVFrame *haystack, const AVFrame *obj, int offx, int offy)
const AVOutputFormat * av_muxer_iterate(void **opaque)
Iterate over all registered muxers.
int parse_option(void *optctx, const char *opt, const char *arg, const OptionDef *options)
Parse one given option.
double get_rotation(AVStream *st)
void * av_calloc(size_t nmemb, size_t size)
Non-inlined equivalent of av_mallocz_array().
int default_device
index of default device or -1 if no default
int opt_cpuflags(void *optctx, const char *opt, const char *arg)
Override the cpuflags.
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining again
void sws_freeContext(struct SwsContext *swsContext)
Free the swscaler context swsContext.
int h
agreed upon image height
the frame and frame reference mechanism is intended to as much as expensive copies of that data while still allowing the filters to produce correct results The data is stored in buffers represented by AVFrame structures Several references can point to the same frame buffer
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling get_buffer()
FFmpeg currently uses a custom build this text attempts to document some of its obscure features and options Makefile the full command issued by make and its output will be shown on the screen DBG Preprocess x86 external assembler files to a dbg asm file in the object which then gets compiled Helps in developing those assembler files DESTDIR Destination directory for the install useful to prepare packages or install FFmpeg in cross environments GEN Set to ‘1’ to generate the missing or mismatched references Makefile builds all the libraries and the executables fate Run the fate test note that you must have installed it fate list List all fate regression test targets install Install headers
int av_match_name(const char *name, const char *names)
Match instances of a name in a comma-separated list of names.
A Quick Description Of Rate Distortion Theory We want to encode a video
static void test(const char *pattern, const char *host)
int av_dict_parse_string(AVDictionary **pm, const char *str, const char *key_val_sep, const char *pairs_sep, int flags)
Parse the key/value pairs list and add the parsed entries to a dictionary.
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return values
static int ref[MAX_W *MAX_W]
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
Filter the word “frame” indicates either a video frame or a group of audio samples
F H1 F F H1 F F F F H1<-F-------F-------F v v v H2 H3 H2 ^ ^ ^ F-------F-------F-> H1<-F-------F-------F|||||||||F H1 F|||||||||F H1 Funavailable fullpel samples(outside the picture for example) shall be equalto the closest available fullpel sampleSmaller pel interpolation:--------------------------if diag_mc is set then points which lie on a line between 2 vertically, horizontally or diagonally adjacent halfpel points shall be interpolatedlinearly with rounding to nearest and halfway values rounded up.points which lie on 2 diagonals at the same time should only use the onediagonal not containing the fullpel point F--> O q O<--h1-> O q O<--F v \/v \/v O O O O O O O|/|\|q q q q q|/|\|O O O O O O O ^/\ ^/\ ^ h2--> O q O<--h3-> O q O<--h2 v \/v \/v O O O O O O O|\|/|q q q q q|\|/|O O O O O O O ^/\ ^/\ ^ F--> O q O<--h1-> O q O<--Fthe remaining points shall be bilinearly interpolated from theup to 4 surrounding halfpel and fullpel points, again rounding should be tonearest and halfway values rounded upcompliant Snow decoders MUST support 1-1/8 pel luma and 1/2-1/16 pel chromainterpolation at leastOverlapped block motion compensation:-------------------------------------FIXMELL band prediction:===================Each sample in the LL0 subband is predicted by the median of the left, top andleft+top-topleft samples, samples outside the subband shall be considered tobe 0. To reverse this prediction in the decoder apply the following.for(y=0;y< height;y++){ for(x=0;x< width;x++){ sample[y][x]+=median(sample[y-1][x], sample[y][x-1], sample[y-1][x]+sample[y][x-1]-sample[y-1][x-1]);}}sample[-1][ *]=sample[ *][-1]=0;width, height here are the width and height of the LL0 subband not of the finalvideoDequantization:===============FIXMEWavelet Transform:==================Snow supports 2 wavelet transforms, the symmetric biorthogonal 5/3 integertransform and an integer approximation of the symmetric biorthogonal 9/7daubechies wavelet.2D IDWT(inverse discrete wavelet transform) --------------------------------------------The 2D IDWT applies a 2D filter recursively, each time combining the4 lowest frequency subbands into a single subband until only 1 subbandremains.The 2D filter is done by first applying a 1D filter in the vertical directionand then applying it in the horizontal one. --------------- --------------- --------------- ---------------|LL0|HL0|||||||||||||---+---|HL1||L0|H0|HL1||LL1|HL1|||||LH0|HH0|||||||||||||-------+-------|-> L1 H1 LH1 HH1 LH1 HH1 LH1 HH1 note
#define GROW_ARRAY(array, nb_elems)
const AVCodecHWConfig * avcodec_get_hw_config(const AVCodec *codec, int index)
Retrieve supported hardware configurations for a codec.
int ff_outlink_get_status(AVFilterLink *link)
Get the status on an output link.
s EdgeDetect Foobar g libavfilter vf_edgedetect c libavfilter vf_foobar c edit libavfilter and add an entry for foobar following the pattern of the other filters edit libavfilter allfilters and add an entry for foobar following the pattern of the other filters configure make j< whatever > ffmpeg ffmpeg i you should get a foobar png with Lena edge detected That s your new playground is ready Some little details about what s going which in turn will define variables for the build system and the and we are assuming vf_foobar is as well We are also assuming vf_foobar is not an edge detector so you can update the boilerplate with your credits Doxy Next chunk is the Doxygen about the file See and add some references if you feel like it Context Skip the headers and scroll down to the definition of FoobarContext This is your state context It is already filled with when you get it so do not worry about uninitialized reads into this context This is where you put all global information that you need
static const int factor[16]
#define AV_CODEC_CAP_PARAM_CHANGE
Codec supports changed parameters at any point.
s EdgeDetect Foobar g libavfilter vf_edgedetect c libavfilter vf_foobar c edit libavfilter and add an entry for foobar following the pattern of the other filters edit libavfilter allfilters and add an entry for foobar following the pattern of the other filters configure make j< whatever > ffmpeg ffmpeg i you should get a foobar png with Lena edge detected That s your new playground is ready Some little details about what s going which in turn will define variables for the build system and the and we are assuming vf_foobar is as well We are also assuming vf_foobar is not an edge detector so you can update the boilerplate with your credits Doxy Next chunk is the Doxygen about the file See https
static int bad(InterplayACMContext *s, unsigned ind, unsigned col)
static void print_program_info(int flags, int level)
static int shift(int a, int b)
#define AVFILTER_FLAG_SLICE_THREADS
The filter supports multithreading by splitting frames into multiple parts and processing them concur...
int opt_default(void *optctx, const char *opt, const char *arg)
Fallback for options that are not explicitly handled, these will be parsed through AVOptions.
char * av_strdup(const char *s)
Duplicate a string.
This document is a tutorial initiation for writing simple filters in libavfilter libavfilter is which means that it is highly recommended that you submit your filters to the FFmpeg development mailing list and make sure that they are applied your filters are likely to have a very short lifetime due to more or less regular internal API and a limited review
void av_log_default_callback(void *ptr, int level, const char *fmt, va_list vl)
Default logging callback.
#define AV_CODEC_CAP_SUBFRAMES
Codec can output multiple frames per AVPacket Normally demuxers return one frame at a time,...
#define AV_OPT_FLAG_SUBTITLE_PARAM
Undefined Behavior In the C some operations are like signed integer overflow
int show_colors(void *optctx, const char *opt, const char *arg)
Print a listing containing all the color names and values recognized by the program.
static enum AVCodecID codec_ids[]
#define FF_CODEC_CAP_ALLOCATE_PROGRESS
F H1 F F H1 F F F F H1<-F-------F-------F v v v H2 H3 H2 ^ ^ ^ F-------F-------F-> H1<-F-------F-------F|||||||||F H1 F|||||||||F H1 Funavailable fullpel samples(outside the picture for example) shall be equalto the closest available fullpel sampleSmaller pel interpolation:--------------------------if diag_mc is set then points which lie on a line between 2 vertically, horizontally or diagonally adjacent halfpel points shall be interpolatedlinearly with rounding to nearest and halfway values rounded up.points which lie on 2 diagonals at the same time should only use the onediagonal not containing the fullpel point F--> O q O<--h1-> O q O<--F v \/v \/v O O O O O O O|/|\|q q q q q|/|\|O O O O O O O ^/\ ^/\ ^ h2--> O q O<--h3-> O q O<--h2 v \/v \/v O O O O O O O|\|/|q q q q q|\|/|O O O O O O O ^/\ ^/\ ^ F--> O q O<--h1-> O q O<--Fthe remaining points shall be bilinearly interpolated from theup to 4 surrounding halfpel and fullpel points, again rounding should be tonearest and halfway values rounded upcompliant Snow decoders MUST support 1-1/8 pel luma and 1/2-1/16 pel chromainterpolation at leastOverlapped block motion compensation:-------------------------------------FIXMELL band prediction:===================Each sample in the LL0 subband is predicted by the median of the left, top andleft+top-topleft samples, samples outside the subband shall be considered tobe 0. To reverse this prediction in the decoder apply the following.for(y=0;y< height;y++){ for(x=0;x< width;x++){ sample[y][x]+=median(sample[y-1][x], sample[y][x-1], sample[y-1][x]+sample[y][x-1]-sample[y-1][x-1]);}}sample[-1][ *]=sample[ *][-1]=0;width, height here are the width and height of the LL0 subband not of the finalvideoDequantization:===============FIXMEWavelet Transform:==================Snow supports 2 wavelet transforms, the symmetric biorthogonal 5/3 integertransform and an integer approximation of the symmetric biorthogonal 9/7daubechies wavelet.2D IDWT(inverse discrete wavelet transform) --------------------------------------------The 2D IDWT applies a 2D filter recursively, each time combining the4 lowest frequency subbands into a single subband until only 1 subbandremains.The 2D filter is done by first applying a 1D filter in the vertical directionand then applying it in the horizontal one. --------------- --------------- --------------- ---------------|LL0|HL0|||||||||||||---+---|HL1||L0|H0|HL1||LL1|HL1|||||LH0|HH0|||||||||||||-------+-------|-> L1 H1 LH1 HH1 LH1 HH1 LH1 HH1 L3
s EdgeDetect Foobar g libavfilter vf_edgedetect c libavfilter vf_foobar c edit libavfilter and add an entry for foobar following the pattern of the other filters edit libavfilter allfilters and add an entry for foobar following the pattern of the other filters configure make j< whatever > ffmpeg ffmpeg i you should get a foobar png with Lena edge detected That s it
#define AV_CODEC_CAP_HYBRID
Codec is potentially backed by a hardware implementation, but not necessarily.
const char * av_get_known_color_name(int color_idx, const uint8_t **rgbp)
Get the name of a color from the internal table of hard-coded named colors.
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
int show_filters(void *optctx, const char *opt, const char *arg)
Print a listing containing all the filters supported by the program.
int show_encoders(void *optctx, const char *opt, const char *arg)
Print a listing containing all the encoders supported by the program.
static const struct drawtext_function functions[]
int opt_loglevel(void *optctx, const char *opt, const char *arg)
Set the libav* libraries log level.
static void show_help_protocol(const char *name)
FF_FILTER_FORWARD_STATUS(inlink, outlink)
static double cr(void *priv, double x, double y)
const char * sep
Option to be used as group separator.
static const uint16_t channel_layouts[7]
the definition of that something depends on the semantic of the filter The callback must examine the status of the filter s links and proceed accordingly The status of output links is stored in the status_in and status_out fields and tested by the then the processing requires a frame on this link and the filter is expected to make efforts in that direction The status of input links is stored by the status_in
#define AVFILTER_FLAG_SUPPORT_TIMELINE
Handy mask to test whether the filter supports or no the timeline feature (internally or generically)...
the frame and frame reference mechanism is intended to avoid
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already ready
static const double htaps[HTAPS]
The 2nd half (48 coeffs) of a 96-tap symmetric lowpass filter.
static pthread_once_t once
static uint32_t inverse(uint32_t v)
find multiplicative inverse modulo 2 ^ 32
static int query_formats(AVFilterContext *ctx)
int64_t parse_time_or_die(const char *context, const char *timestr, int is_duration)
Parse a string specifying a time and return its corresponding value as a number of microseconds.
void * grow_array(void *array, int elem_size, int *size, int new_size)
Realloc array to hold new_size elements of elem_size.
#define flags(name, subs,...)
#define AV_CODEC_CAP_DRAW_HORIZ_BAND
Decoder can use draw_horiz_band callback.
size_t av_strlcpy(char *dst, const char *src, size_t size)
Copy the string src to dst, but no more than size - 1 bytes, and null-terminate dst.
static const double coeff[2][5]
The exact code depends on how similar the blocks are and how related they are to the block
#define AV_CODEC_CAP_AVOID_PROBING
Decoder is not a preferred choice for probing.
static int pix_norm1(uint8_t *pix, int line_size, int w)
static const OptionDef * find_option(const OptionDef *po, const char *name)
static av_cold int uninit(AVCodecContext *avctx)
the definition of that something depends on the semantic of the filter The callback must examine the status of the filter s links and proceed accordingly The status of output links is stored in the status_in and status_out fields and tested by the ff_outlink_frame_wanted() function. If this function returns true
const AVCodecDescriptor * avcodec_descriptor_get(enum AVCodecID id)
The official guide to swscale for confused that consecutive non overlapping rectangles of dimension(0, slice_top) -(picture_width
static float project(float origin_x, float origin_y, float dest_x, float dest_y, int point_x, int point_y)
void show_help_options(const OptionDef *options, const char *msg, int req_flags, int rej_flags, int alt_flags)
Print help for all options matching specified flags.
int show_bsfs(void *optctx, const char *opt, const char *arg)
Print a listing containing all the bit stream filters supported by the program.
This document is a tutorial initiation for writing simple filters in libavfilter libavfilter is which means that it is highly recommended that you submit your filters to the FFmpeg development mailing list and make sure that they are applied Otherwise
int show_license(void *optctx, const char *opt, const char *arg)
Print the license of the program to stdout.
#define PRINT_LIB_INFO(libname, LIBNAME, flags, level)
This document is a tutorial initiation for writing simple filters in libavfilter libavfilter is monolithic
const AVCodecDescriptor * avcodec_descriptor_get_by_name(const char *name)
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(const uint8_t *) pi - 0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(const int16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1<< 16)) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(const int16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(const int32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(const int32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(const int64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0f/(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0/(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(const float *) pi *(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(const double *) pi *(UINT64_C(1)<< 63))) #define FMT_PAIR_FUNC(out, in) static conv_func_type *const fmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={ FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64), };static void cpy1(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, len);} static void cpy2(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 2 *len);} static void cpy4(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 4 *len);} static void cpy8(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 8 *len);} AudioConvert *swri_audio_convert_alloc(enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, const int *ch_map, int flags) { AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) return NULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) return NULL;if(channels==1){ in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);} ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map) { switch(av_get_bytes_per_sample(in_fmt)){ case 1:ctx->simd_f=cpy1;break;case 2:ctx->simd_f=cpy2;break;case 4:ctx->simd_f=cpy4;break;case 8:ctx->simd_f=cpy8;break;} } if(HAVE_X86ASM &&1) swri_audio_convert_init_x86(ctx, out_fmt, in_fmt, channels);if(ARCH_ARM) swri_audio_convert_init_arm(ctx, out_fmt, in_fmt, channels);if(ARCH_AARCH64) swri_audio_convert_init_aarch64(ctx, out_fmt, in_fmt, channels);return ctx;} void swri_audio_convert_free(AudioConvert **ctx) { av_freep(ctx);} int swri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, int len) { int ch;int off=0;const int os=(out->planar ? 1 :out->ch_count) *out->bps;unsigned misaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask) { int planes=in->planar ? in->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;} if(ctx->out_simd_align_mask) { int planes=out->planar ? out->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;} if(ctx->simd_f &&!ctx->ch_map &&!misaligned){ off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){ if(out->planar==in->planar){ int planes=out->planar ? out->ch_count :1;for(ch=0;ch< planes;ch++){ ctx->simd_f(out->ch+ch,(const uint8_t **) in->ch+ch, off *(out-> planar
#define AV_CODEC_CAP_SMALL_LAST_FRAME
Codec can be fed a final frame with a smaller size.
int av_opt_show2(void *obj, void *av_log_obj, int req_flags, int rej_flags)
Show the obj options.
static void show_help_muxer(const char *name)
#define AV_PIX_FMT_FLAG_PAL
Pixel format has a palette in data[1], values are indexes in this palette.
F H1 F F H1 F F F F H1<-F-------F-------F v v v H2 H3 H2 ^ ^ ^ F-------F-------F-> H1<-F-------F-------F|||||||||F H1 F|||||||||F H1 Funavailable fullpel samples(outside the picture for example) shall be equalto the closest available fullpel sampleSmaller pel interpolation:--------------------------if diag_mc is set then points which lie on a line between 2 vertically, horizontally or diagonally adjacent halfpel points shall be interpolatedlinearly with rounding to nearest and halfway values rounded up.points which lie on 2 diagonals at the same time should only use the onediagonal not containing the fullpel point F--> O q O<--h1-> O q O<--F v \/v \/v O O O O O O O|/|\|q q q q q|/|\|O O O O O O O ^/\ ^/\ ^ h2--> O q O<--h3-> O q O<--h2 v \/v \/v O O O O O O O|\|/|q q q q q|\|/|O O O O O O O ^/\ ^/\ ^ F--> O q O<--h1-> O q O<--Fthe remaining points shall be bilinearly interpolated from theup to 4 surrounding halfpel and fullpel points, again rounding should be tonearest and halfway values rounded upcompliant Snow decoders MUST support 1-1/8 pel luma and 1/2-1/16 pel chromainterpolation at leastOverlapped block motion compensation:-------------------------------------FIXMELL band prediction:===================Each sample in the LL0 subband is predicted by the median of the left, top andleft+top-topleft samples, samples outside the subband shall be considered tobe 0. To reverse this prediction in the decoder apply the following.for(y=0;y< height;y++){ for(x=0;x< width;x++){ sample[y][x]+=median(sample[y-1][x], sample[y][x-1], sample[y-1][x]+sample[y][x-1]-sample[y-1][x-1]);}}sample[-1][ *]=sample[ *][-1]=0;width, height here are the width and height of the LL0 subband not of the finalvideoDequantization:===============FIXMEWavelet Transform:==================Snow supports 2 wavelet transforms, the symmetric biorthogonal 5/3 integertransform and an integer approximation of the symmetric biorthogonal 9/7daubechies wavelet.2D IDWT(inverse discrete wavelet transform) --------------------------------------------The 2D IDWT applies a 2D filter recursively, each time combining the4 lowest frequency subbands into a single subband until only 1 subbandremains.The 2D filter is done by first applying a 1D filter in the vertical directionand then applying it in the horizontal one. --------------- --------------- --------------- ---------------|LL0|HL0|||||||||||||---+---|HL1||L0|H0|HL1||LL1|HL1|||||LH0|HH0|||||||||||||-------+-------|-> L1 H1 LH1 HH1 LH1 HH1 LH1 HH1 this can end with a L or a the number of elements shall be w s[-1] shall be considered equivalent to s[1] s[w] shall be considered equivalent to s[w-2] perform the lifting steps in order as described below Integer filter
static void sanitize(uint8_t *line)
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option keep it simple and lowercase description are in without and describe what they do
static int passed(HysteresisContext *s, int x, int y, int w)
#define AV_LOG_PRINT_LEVEL
Include the log severity in messages originating from codecs.
enum AVHWDeviceType device_type
The device type associated with the configuration.
static AVFrame * get_video_buffer(AVFilterLink *inlink, int w, int h)
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input Therefore
Undefined Behavior In the C some operations are undefined
static int is_device(const AVClass *avclass)
void av_bprint_chars(AVBPrint *buf, char c, unsigned n)
Append char c n times to a print buffer.
int show_demuxers(void *optctx, const char *opt, const char *arg)
Print a listing containing all the demuxer supported by the program (including devices).
static int request_frame(AVFilterLink *outlink)
static int match_group_separator(const OptionGroupDef *groups, int nb_groups, const char *opt)
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf foobar
int(* func_arg)(void *, const char *, const char *)
static const AVOption * opt_find(void *obj, const char *name, const char *unit, int opt_flags, int search_flags)
static void * av_x_if_null(const void *p, const void *x)
Return x default pointer in case p is NULL.
double av_display_rotation_get(const int32_t matrix[9])
Extract the rotation component of the transformation matrix.
const int program_birth_year
program birth year, defined by the program for show_banner()
static const uint8_t dither[8][8]