Go to the documentation of this file.
59 #include <SDL_thread.h>
68 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
70 #define EXTERNAL_CLOCK_MIN_FRAMES 2
71 #define EXTERNAL_CLOCK_MAX_FRAMES 10
74 #define SDL_AUDIO_MIN_BUFFER_SIZE 512
76 #define SDL_AUDIO_MAX_CALLBACKS_PER_SEC 30
79 #define SDL_VOLUME_STEP (0.75)
82 #define AV_SYNC_THRESHOLD_MIN 0.04
84 #define AV_SYNC_THRESHOLD_MAX 0.1
86 #define AV_SYNC_FRAMEDUP_THRESHOLD 0.1
88 #define AV_NOSYNC_THRESHOLD 10.0
91 #define SAMPLE_CORRECTION_PERCENT_MAX 10
94 #define EXTERNAL_CLOCK_SPEED_MIN 0.900
95 #define EXTERNAL_CLOCK_SPEED_MAX 1.010
96 #define EXTERNAL_CLOCK_SPEED_STEP 0.001
99 #define AUDIO_DIFF_AVG_NB 20
102 #define REFRESH_RATE 0.01
106 #define SAMPLE_ARRAY_SIZE (8 * 65536)
108 #define CURSOR_HIDE_DELAY 1000000
110 #define USE_ONEPASS_SUBTITLE_RENDER 1
130 #define VIDEO_PICTURE_QUEUE_SIZE 3
131 #define SUBPICTURE_QUEUE_SIZE 16
132 #define SAMPLE_QUEUE_SIZE 9
133 #define FRAME_QUEUE_SIZE FFMAX(SAMPLE_QUEUE_SIZE, FFMAX(VIDEO_PICTURE_QUEUE_SIZE, SUBPICTURE_QUEUE_SIZE))
352 static const char **vfilters_list =
NULL;
353 static int nb_vfilters = 0;
354 static char *afilters =
NULL;
364 #define FF_QUIT_EVENT (SDL_USEREVENT + 2)
398 static int opt_add_vfilter(
void *optctx,
const char *opt,
const char *
arg)
401 vfilters_list[nb_vfilters - 1] =
arg;
411 if (channel_count1 == 1 && channel_count2 == 1)
414 return channel_count1 != channel_count2 || fmt1 != fmt2;
421 return channel_layout;
446 SDL_CondSignal(q->
cond);
462 SDL_LockMutex(q->
mutex);
464 SDL_UnlockMutex(q->
mutex);
485 q->
mutex = SDL_CreateMutex();
490 q->
cond = SDL_CreateCond();
503 SDL_LockMutex(q->
mutex);
512 SDL_UnlockMutex(q->
mutex);
519 SDL_DestroyMutex(q->
mutex);
520 SDL_DestroyCond(q->
cond);
525 SDL_LockMutex(q->
mutex);
529 SDL_CondSignal(q->
cond);
531 SDL_UnlockMutex(q->
mutex);
536 SDL_LockMutex(q->
mutex);
539 SDL_UnlockMutex(q->
mutex);
548 SDL_LockMutex(q->
mutex);
574 SDL_UnlockMutex(q->
mutex);
663 if (got_frame && !d->
pkt->
data) {
671 av_log(d->
avctx,
AV_LOG_ERROR,
"Receive_frame and send_packet both returned EAGAIN, which is an API violation.\n");
695 if (!(
f->mutex = SDL_CreateMutex())) {
699 if (!(
f->cond = SDL_CreateCond())) {
705 f->keep_last = !!keep_last;
706 for (
i = 0;
i <
f->max_size;
i++)
715 for (
i = 0;
i <
f->max_size;
i++) {
720 SDL_DestroyMutex(
f->mutex);
721 SDL_DestroyCond(
f->cond);
726 SDL_LockMutex(
f->mutex);
727 SDL_CondSignal(
f->cond);
728 SDL_UnlockMutex(
f->mutex);
733 return &
f->queue[(
f->rindex +
f->rindex_shown) %
f->max_size];
738 return &
f->queue[(
f->rindex +
f->rindex_shown + 1) %
f->max_size];
743 return &
f->queue[
f->rindex];
749 SDL_LockMutex(
f->mutex);
750 while (
f->size >=
f->max_size &&
751 !
f->pktq->abort_request) {
752 SDL_CondWait(
f->cond,
f->mutex);
754 SDL_UnlockMutex(
f->mutex);
756 if (
f->pktq->abort_request)
759 return &
f->queue[
f->windex];
765 SDL_LockMutex(
f->mutex);
766 while (
f->size -
f->rindex_shown <= 0 &&
767 !
f->pktq->abort_request) {
768 SDL_CondWait(
f->cond,
f->mutex);
770 SDL_UnlockMutex(
f->mutex);
772 if (
f->pktq->abort_request)
775 return &
f->queue[(
f->rindex +
f->rindex_shown) %
f->max_size];
780 if (++
f->windex ==
f->max_size)
782 SDL_LockMutex(
f->mutex);
784 SDL_CondSignal(
f->cond);
785 SDL_UnlockMutex(
f->mutex);
790 if (
f->keep_last && !
f->rindex_shown) {
795 if (++
f->rindex ==
f->max_size)
797 SDL_LockMutex(
f->mutex);
799 SDL_CondSignal(
f->cond);
800 SDL_UnlockMutex(
f->mutex);
806 return f->size -
f->rindex_shown;
813 if (
f->rindex_shown &&
fp->serial ==
f->pktq->serial)
839 static int realloc_texture(SDL_Texture **texture, Uint32 new_format,
int new_width,
int new_height, SDL_BlendMode blendmode,
int init_texture)
843 if (!*texture || SDL_QueryTexture(*texture, &
format, &access, &
w, &
h) < 0 || new_width !=
w || new_height !=
h || new_format !=
format) {
847 SDL_DestroyTexture(*texture);
848 if (!(*texture = SDL_CreateTexture(
renderer, new_format, SDL_TEXTUREACCESS_STREAMING, new_width, new_height)))
850 if (SDL_SetTextureBlendMode(*texture, blendmode) < 0)
853 if (SDL_LockTexture(*texture,
NULL, &pixels, &pitch) < 0)
855 memset(pixels, 0, pitch * new_height);
856 SDL_UnlockTexture(*texture);
858 av_log(
NULL,
AV_LOG_VERBOSE,
"Created %dx%d texture with %s.\n", new_width, new_height, SDL_GetPixelFormatName(new_format));
864 int scr_xleft,
int scr_ytop,
int scr_width,
int scr_height,
865 int pic_width,
int pic_height,
AVRational pic_sar)
878 if (
width > scr_width) {
882 x = (scr_width -
width) / 2;
883 y = (scr_height -
height) / 2;
884 rect->
x = scr_xleft + x;
885 rect->
y = scr_ytop + y;
893 *sdl_blendmode = SDL_BLENDMODE_NONE;
894 *sdl_pix_fmt = SDL_PIXELFORMAT_UNKNOWN;
899 *sdl_blendmode = SDL_BLENDMODE_BLEND;
911 SDL_BlendMode sdl_blendmode;
913 if (
realloc_texture(tex, sdl_pix_fmt == SDL_PIXELFORMAT_UNKNOWN ? SDL_PIXELFORMAT_ARGB8888 : sdl_pix_fmt,
frame->width,
frame->height, sdl_blendmode, 0) < 0)
915 switch (sdl_pix_fmt) {
916 case SDL_PIXELFORMAT_UNKNOWN:
921 if (*img_convert_ctx !=
NULL) {
924 if (!SDL_LockTexture(*tex,
NULL, (
void **)pixels, pitch)) {
926 0,
frame->height, pixels, pitch);
927 SDL_UnlockTexture(*tex);
934 case SDL_PIXELFORMAT_IYUV:
935 if (
frame->linesize[0] > 0 &&
frame->linesize[1] > 0 &&
frame->linesize[2] > 0) {
939 }
else if (
frame->linesize[0] < 0 &&
frame->linesize[1] < 0 &&
frame->linesize[2] < 0) {
949 if (
frame->linesize[0] < 0) {
961 #if SDL_VERSION_ATLEAST(2,0,8)
962 SDL_YUV_CONVERSION_MODE
mode = SDL_YUV_CONVERSION_AUTOMATIC;
965 mode = SDL_YUV_CONVERSION_JPEG;
967 mode = SDL_YUV_CONVERSION_BT709;
969 mode = SDL_YUV_CONVERSION_BT601;
971 SDL_SetYUVConversionMode(
mode);
982 if (
is->subtitle_st) {
986 if (vp->
pts >=
sp->pts + ((
float)
sp->sub.start_display_time / 1000)) {
991 if (!
sp->width || !
sp->height) {
995 if (
realloc_texture(&
is->sub_texture, SDL_PIXELFORMAT_ARGB8888,
sp->width,
sp->height, SDL_BLENDMODE_BLEND, 1) < 0)
998 for (
i = 0;
i <
sp->sub.num_rects;
i++) {
1001 sub_rect->
x =
av_clip(sub_rect->
x, 0,
sp->width );
1002 sub_rect->
y =
av_clip(sub_rect->
y, 0,
sp->height);
1003 sub_rect->
w =
av_clip(sub_rect->
w, 0,
sp->width - sub_rect->
x);
1004 sub_rect->
h =
av_clip(sub_rect->
h, 0,
sp->height - sub_rect->
y);
1010 if (!
is->sub_convert_ctx) {
1014 if (!SDL_LockTexture(
is->sub_texture, (SDL_Rect *)sub_rect, (
void **)pixels, pitch)) {
1015 sws_scale(
is->sub_convert_ctx, (
const uint8_t *
const *)sub_rect->data, sub_rect->linesize,
1016 0, sub_rect->h, pixels, pitch);
1017 SDL_UnlockTexture(
is->sub_texture);
1040 #if USE_ONEPASS_SUBTITLE_RENDER
1044 double xratio = (double)
rect.
w / (
double)
sp->width;
1045 double yratio = (double)
rect.
h / (
double)
sp->height;
1046 for (
i = 0;
i <
sp->sub.num_rects;
i++) {
1047 SDL_Rect *sub_rect = (SDL_Rect*)
sp->sub.rects[
i];
1048 SDL_Rect target = {.x =
rect.
x + sub_rect->x * xratio,
1049 .y =
rect.
y + sub_rect->y * yratio,
1050 .w = sub_rect->w * xratio,
1051 .h = sub_rect->h * yratio};
1052 SDL_RenderCopy(
renderer,
is->sub_texture, sub_rect, &target);
1060 return a < 0 ?
a%
b +
b :
a%
b;
1065 int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
1068 int rdft_bits, nb_freq;
1070 for (rdft_bits = 1; (1 << rdft_bits) < 2 *
s->height; rdft_bits++)
1072 nb_freq = 1 << (rdft_bits - 1);
1078 int data_used=
s->show_mode == SHOW_MODE_WAVES ?
s->width : (2*nb_freq);
1080 delay =
s->audio_write_buf_size;
1087 delay -= (time_diff *
s->audio_tgt.freq) / 1000000;
1090 delay += 2 * data_used;
1091 if (delay < data_used)
1095 if (
s->show_mode == SHOW_MODE_WAVES) {
1099 int a =
s->sample_array[idx];
1104 if (
h < score && (
b ^
c) < 0) {
1111 s->last_i_start = i_start;
1113 i_start =
s->last_i_start;
1116 if (
s->show_mode == SHOW_MODE_WAVES) {
1117 SDL_SetRenderDrawColor(
renderer, 255, 255, 255, 255);
1120 h =
s->height / nb_display_channels;
1123 for (ch = 0; ch < nb_display_channels; ch++) {
1125 y1 =
s->ytop + ch *
h + (
h / 2);
1126 for (x = 0; x <
s->width; x++) {
1127 y = (
s->sample_array[
i] * h2) >> 15;
1141 SDL_SetRenderDrawColor(
renderer, 0, 0, 255, 255);
1143 for (ch = 1; ch < nb_display_channels; ch++) {
1144 y =
s->ytop + ch *
h;
1148 if (
realloc_texture(&
s->vis_texture, SDL_PIXELFORMAT_ARGB8888,
s->width,
s->height, SDL_BLENDMODE_NONE, 1) < 0)
1151 if (
s->xpos >=
s->width)
1153 nb_display_channels=
FFMIN(nb_display_channels, 2);
1154 if (rdft_bits !=
s->rdft_bits) {
1158 s->rdft_bits = rdft_bits;
1161 if (!
s->rdft || !
s->rdft_data){
1163 s->show_mode = SHOW_MODE_WAVES;
1166 SDL_Rect
rect = {.
x =
s->xpos, .y = 0, .w = 1, .h =
s->height};
1169 for (ch = 0; ch < nb_display_channels; ch++) {
1170 data[ch] =
s->rdft_data + 2 * nb_freq * ch;
1172 for (x = 0; x < 2 * nb_freq; x++) {
1173 double w = (x-nb_freq) * (1.0 / nb_freq);
1174 data[ch][x] =
s->sample_array[
i] * (1.0 -
w *
w);
1183 if (!SDL_LockTexture(
s->vis_texture, &
rect, (
void **)&pixels, &pitch)) {
1185 pixels += pitch *
s->height;
1186 for (y = 0; y <
s->height; y++) {
1187 double w = 1 / sqrt(nb_freq);
1188 int a = sqrt(
w * sqrt(
data[0][2 * y + 0] *
data[0][2 * y + 0] +
data[0][2 * y + 1] *
data[0][2 * y + 1]));
1189 int b = (nb_display_channels == 2 ) ? sqrt(
w *
hypot(
data[1][2 * y + 0],
data[1][2 * y + 1]))
1194 *pixels = (
a << 16) + (
b << 8) + ((
a+
b) >> 1);
1196 SDL_UnlockTexture(
s->vis_texture);
1210 if (stream_index < 0 || stream_index >= ic->
nb_streams)
1221 is->audio_buf1_size = 0;
1247 is->audio_stream = -1;
1251 is->video_stream = -1;
1255 is->subtitle_stream = -1;
1265 is->abort_request = 1;
1266 SDL_WaitThread(
is->read_tid,
NULL);
1269 if (
is->audio_stream >= 0)
1271 if (
is->video_stream >= 0)
1273 if (
is->subtitle_stream >= 0)
1286 SDL_DestroyCond(
is->continue_read_thread);
1290 if (
is->vis_texture)
1291 SDL_DestroyTexture(
is->vis_texture);
1292 if (
is->vid_texture)
1293 SDL_DestroyTexture(
is->vid_texture);
1294 if (
is->sub_texture)
1295 SDL_DestroyTexture(
is->sub_texture);
1307 SDL_DestroyWindow(
window);
1330 if (max_width == INT_MAX && max_height == INT_MAX)
1351 SDL_SetWindowFullscreen(
window, SDL_WINDOW_FULLSCREEN_DESKTOP);
1366 SDL_SetRenderDrawColor(
renderer, 0, 0, 0, 255);
1368 if (
is->audio_st &&
is->show_mode != SHOW_MODE_VIDEO)
1370 else if (
is->video_st)
1377 if (*
c->queue_serial !=
c->serial)
1383 return c->pts_drift + time - (time -
c->last_updated) * (1.0 -
c->speed);
1390 c->last_updated = time;
1391 c->pts_drift =
c->pts - time;
1411 c->queue_serial = queue_serial;
1466 double speed =
is->extclk.speed;
1475 if (!
is->seek_req) {
1482 SDL_CondSignal(
is->continue_read_thread);
1491 if (
is->read_pause_return !=
AVERROR(ENOSYS)) {
1492 is->vidclk.paused = 0;
1497 is->paused =
is->audclk.paused =
is->vidclk.paused =
is->extclk.paused = !
is->paused;
1508 is->muted = !
is->muted;
1513 double volume_level =
is->audio_volume ? (20 * log(
is->audio_volume / (
double)SDL_MIX_MAXVOLUME) / log(10)) : -1000.0;
1514 int new_volume =
lrint(SDL_MIX_MAXVOLUME * pow(10.0, (volume_level + sign *
step) / 20.0));
1515 is->audio_volume =
av_clip(
is->audio_volume == new_volume ? (
is->audio_volume + sign) : new_volume, 0, SDL_MIX_MAXVOLUME);
1528 double sync_threshold,
diff = 0;
1541 if (
diff <= -sync_threshold)
1544 delay = delay +
diff;
1545 else if (
diff >= sync_threshold)
1559 if (
isnan(
duration) || duration <= 0 || duration >
is->max_frame_duration)
1587 if (
is->force_refresh ||
is->last_vis_time +
rdftspeed < time) {
1589 is->last_vis_time = time;
1591 *remaining_time =
FFMIN(*remaining_time,
is->last_vis_time +
rdftspeed - time);
1599 double last_duration,
duration, delay;
1606 if (vp->
serial !=
is->videoq.serial) {
1622 if (time < is->frame_timer + delay) {
1623 *remaining_time =
FFMIN(
is->frame_timer + delay - time, *remaining_time);
1627 is->frame_timer += delay;
1629 is->frame_timer = time;
1631 SDL_LockMutex(
is->pictq.mutex);
1634 SDL_UnlockMutex(
is->pictq.mutex);
1640 is->frame_drops_late++;
1646 if (
is->subtitle_st) {
1655 if (
sp->serial !=
is->subtitleq.serial
1656 || (
is->vidclk.pts > (
sp->pts + ((
float)
sp->sub.end_display_time / 1000)))
1661 for (
i = 0;
i <
sp->sub.num_rects;
i++) {
1666 if (!SDL_LockTexture(
is->sub_texture, (SDL_Rect *)sub_rect, (
void **)&pixels, &pitch)) {
1667 for (j = 0; j < sub_rect->h; j++, pixels += pitch)
1668 memset(pixels, 0, sub_rect->w << 2);
1669 SDL_UnlockTexture(
is->sub_texture);
1681 is->force_refresh = 1;
1683 if (
is->step && !
is->paused)
1688 if (!
display_disable &&
is->force_refresh &&
is->show_mode == SHOW_MODE_VIDEO &&
is->pictq.rindex_shown)
1691 is->force_refresh = 0;
1694 static int64_t last_time;
1696 int aqsize, vqsize, sqsize;
1700 if (!last_time || (cur_time - last_time) >= 30000) {
1705 aqsize =
is->audioq.size;
1707 vqsize =
is->videoq.size;
1708 if (
is->subtitle_st)
1709 sqsize =
is->subtitleq.size;
1711 if (
is->audio_st &&
is->video_st)
1713 else if (
is->video_st)
1715 else if (
is->audio_st)
1720 "%7.2f %s:%7.3f fd=%4d aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64
"/%"PRId64
" \r",
1722 (
is->audio_st &&
is->video_st) ?
"A-V" : (
is->video_st ?
"M-V" : (
is->audio_st ?
"M-A" :
" ")),
1724 is->frame_drops_early +
is->frame_drops_late,
1728 is->video_st ?
is->viddec.avctx->pts_correction_num_faulty_dts : 0,
1729 is->video_st ?
is->viddec.avctx->pts_correction_num_faulty_pts : 0);
1732 fprintf(stderr,
"%s", buf.str);
1739 last_time = cur_time;
1748 #if defined(DEBUG_SYNC)
1749 printf(
"frame_type=%c pts=%0.3f\n",
1794 diff -
is->frame_last_filter_delay < 0 &&
1795 is->viddec.pkt_serial ==
is->vidclk.serial &&
1796 is->videoq.nb_packets) {
1797 is->frame_drops_early++;
1825 outputs->filter_ctx = source_ctx;
1830 inputs->filter_ctx = sink_ctx;
1855 char sws_flags_str[512] =
"";
1856 char buffersrc_args[256];
1862 int nb_pix_fmts = 0;
1876 if (!strcmp(e->
key,
"sws_flags")) {
1877 av_strlcatf(sws_flags_str,
sizeof(sws_flags_str),
"%s=%s:",
"flags", e->
value);
1881 if (strlen(sws_flags_str))
1882 sws_flags_str[strlen(sws_flags_str)-1] =
'\0';
1886 snprintf(buffersrc_args,
sizeof(buffersrc_args),
1887 "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
1889 is->video_st->time_base.num,
is->video_st->time_base.den,
1892 av_strlcatf(buffersrc_args,
sizeof(buffersrc_args),
":frame_rate=%d/%d", fr.
num, fr.
den);
1896 "ffplay_buffer", buffersrc_args,
NULL,
1902 "ffplay_buffersink",
NULL,
NULL, graph);
1909 last_filter = filt_out;
1913 #define INSERT_FILT(name, arg) do { \
1914 AVFilterContext *filt_ctx; \
1916 ret = avfilter_graph_create_filter(&filt_ctx, \
1917 avfilter_get_by_name(name), \
1918 "ffplay_" name, arg, NULL, graph); \
1922 ret = avfilter_link(filt_ctx, 0, last_filter, 0); \
1926 last_filter = filt_ctx; \
1932 if (
fabs(theta - 90) < 1.0) {
1933 INSERT_FILT(
"transpose",
"clock");
1934 }
else if (
fabs(theta - 180) < 1.0) {
1935 INSERT_FILT(
"hflip",
NULL);
1936 INSERT_FILT(
"vflip",
NULL);
1937 }
else if (
fabs(theta - 270) < 1.0) {
1938 INSERT_FILT(
"transpose",
"cclock");
1939 }
else if (
fabs(theta) > 1.0) {
1940 char rotate_buf[64];
1941 snprintf(rotate_buf,
sizeof(rotate_buf),
"%f*PI/180", theta);
1942 INSERT_FILT(
"rotate", rotate_buf);
1949 is->in_video_filter = filt_src;
1950 is->out_video_filter = filt_out;
1956 static int configure_audio_filters(
VideoState *
is,
const char *afilters,
int force_output_format)
1963 char aresample_swr_opts[512] =
"";
1965 char asrc_args[256];
1975 if (strlen(aresample_swr_opts))
1976 aresample_swr_opts[strlen(aresample_swr_opts)-1] =
'\0';
1977 av_opt_set(
is->agraph,
"aresample_swr_opts", aresample_swr_opts, 0);
1980 "sample_rate=%d:sample_fmt=%s:channels=%d:time_base=%d/%d",
1982 is->audio_filter_src.channels,
1983 1,
is->audio_filter_src.freq);
1984 if (
is->audio_filter_src.channel_layout)
1986 ":channel_layout=0x%"PRIx64,
is->audio_filter_src.channel_layout);
1990 asrc_args,
NULL,
is->agraph);
2006 if (force_output_format) {
2008 channels [0] =
is->audio_tgt.channel_layout ? -1 :
is->audio_tgt.channels;
2024 is->in_audio_filter = filt_asrc;
2025 is->out_audio_filter = filt_asink;
2040 int last_serial = -1;
2041 int64_t dec_channel_layout;
2064 is->audio_filter_src.channel_layout != dec_channel_layout ||
2065 is->audio_filter_src.freq !=
frame->sample_rate ||
2066 is->auddec.pkt_serial != last_serial;
2069 char buf1[1024], buf2[1024];
2073 "Audio frame changed from rate:%d ch:%d fmt:%s layout:%s serial:%d to rate:%d ch:%d fmt:%s layout:%s serial:%d\n",
2077 is->audio_filter_src.fmt =
frame->format;
2078 is->audio_filter_src.channels =
frame->channels;
2079 is->audio_filter_src.channel_layout = dec_channel_layout;
2080 is->audio_filter_src.freq =
frame->sample_rate;
2081 last_serial =
is->auddec.pkt_serial;
2083 if ((
ret = configure_audio_filters(
is, afilters, 1)) < 0)
2098 af->
serial =
is->auddec.pkt_serial;
2105 if (
is->audioq.serial !=
is->auddec.pkt_serial)
2109 is->auddec.finished =
is->auddec.pkt_serial;
2148 int last_serial = -1;
2149 int last_vfilter_idx = 0;
2163 if ( last_w !=
frame->width
2164 || last_h !=
frame->height
2165 || last_format !=
frame->format
2166 || last_serial !=
is->viddec.pkt_serial
2167 || last_vfilter_idx !=
is->vfilter_idx) {
2169 "Video frame changed from size:%dx%d format:%s serial:%d to size:%dx%d format:%s serial:%d\n",
2181 if ((
ret = configure_video_filters(graph,
is, vfilters_list ? vfilters_list[
is->vfilter_idx] :
NULL,
frame)) < 0) {
2184 event.user.data1 =
is;
2185 SDL_PushEvent(&event);
2188 filt_in =
is->in_video_filter;
2189 filt_out =
is->out_video_filter;
2190 last_w =
frame->width;
2191 last_h =
frame->height;
2192 last_format =
frame->format;
2193 last_serial =
is->viddec.pkt_serial;
2194 last_vfilter_idx =
is->vfilter_idx;
2208 is->viddec.finished =
is->viddec.pkt_serial;
2215 is->frame_last_filter_delay = 0;
2223 if (
is->videoq.serial !=
is->viddec.pkt_serial)
2255 if (got_subtitle &&
sp->sub.format == 0) {
2259 sp->serial =
is->subdec.pkt_serial;
2260 sp->width =
is->subdec.avctx->width;
2261 sp->height =
is->subdec.avctx->height;
2266 }
else if (got_subtitle) {
2283 memcpy(
is->sample_array +
is->sample_array_index,
samples,
len *
sizeof(
short));
2285 is->sample_array_index +=
len;
2287 is->sample_array_index = 0;
2296 int wanted_nb_samples = nb_samples;
2300 double diff, avg_diff;
2301 int min_nb_samples, max_nb_samples;
2306 is->audio_diff_cum =
diff +
is->audio_diff_avg_coef *
is->audio_diff_cum;
2309 is->audio_diff_avg_count++;
2312 avg_diff =
is->audio_diff_cum * (1.0 -
is->audio_diff_avg_coef);
2314 if (
fabs(avg_diff) >=
is->audio_diff_threshold) {
2315 wanted_nb_samples = nb_samples + (
int)(
diff *
is->audio_src.freq);
2318 wanted_nb_samples =
av_clip(wanted_nb_samples, min_nb_samples, max_nb_samples);
2321 diff, avg_diff, wanted_nb_samples - nb_samples,
2322 is->audio_clock,
is->audio_diff_threshold);
2327 is->audio_diff_avg_count = 0;
2328 is->audio_diff_cum = 0;
2332 return wanted_nb_samples;
2344 int data_size, resampled_data_size;
2345 int64_t dec_channel_layout;
2347 int wanted_nb_samples;
2364 }
while (af->
serial !=
is->audioq.serial);
2370 dec_channel_layout =
2376 dec_channel_layout !=
is->audio_src.channel_layout ||
2381 is->audio_tgt.channel_layout,
is->audio_tgt.fmt,
is->audio_tgt.freq,
2386 "Cannot create sample rate converter for conversion of %d Hz %s %d channels to %d Hz %s %d channels!\n",
2392 is->audio_src.channel_layout = dec_channel_layout;
2401 int out_count = (int64_t)wanted_nb_samples *
is->audio_tgt.freq / af->
frame->
sample_rate + 256;
2416 if (!
is->audio_buf1)
2423 if (len2 == out_count) {
2428 is->audio_buf =
is->audio_buf1;
2432 resampled_data_size = data_size;
2435 audio_clock0 =
is->audio_clock;
2440 is->audio_clock =
NAN;
2441 is->audio_clock_serial = af->
serial;
2444 static double last_clock;
2445 printf(
"audio: delay=%0.3f clock=%0.3f clock0=%0.3f\n",
2446 is->audio_clock - last_clock,
2447 is->audio_clock, audio_clock0);
2448 last_clock =
is->audio_clock;
2451 return resampled_data_size;
2458 int audio_size, len1;
2463 if (
is->audio_buf_index >=
is->audio_buf_size) {
2465 if (audio_size < 0) {
2470 if (
is->show_mode != SHOW_MODE_VIDEO)
2472 is->audio_buf_size = audio_size;
2474 is->audio_buf_index = 0;
2476 len1 =
is->audio_buf_size -
is->audio_buf_index;
2479 if (!
is->muted &&
is->audio_buf &&
is->audio_volume == SDL_MIX_MAXVOLUME)
2480 memcpy(stream, (
uint8_t *)
is->audio_buf +
is->audio_buf_index, len1);
2482 memset(stream, 0, len1);
2483 if (!
is->muted &&
is->audio_buf)
2484 SDL_MixAudioFormat(stream, (
uint8_t *)
is->audio_buf +
is->audio_buf_index, AUDIO_S16SYS, len1,
is->audio_volume);
2488 is->audio_buf_index += len1;
2490 is->audio_write_buf_size =
is->audio_buf_size -
is->audio_buf_index;
2492 if (!
isnan(
is->audio_clock)) {
2498 static int audio_open(
void *opaque, int64_t wanted_channel_layout,
int wanted_nb_channels,
int wanted_sample_rate,
struct AudioParams *audio_hw_params)
2500 SDL_AudioSpec wanted_spec, spec;
2502 static const int next_nb_channels[] = {0, 0, 1, 6, 2, 6, 4, 6};
2503 static const int next_sample_rates[] = {0, 44100, 48000, 96000, 192000};
2504 int next_sample_rate_idx =
FF_ARRAY_ELEMS(next_sample_rates) - 1;
2506 env = SDL_getenv(
"SDL_AUDIO_CHANNELS");
2508 wanted_nb_channels = atoi(env);
2516 wanted_spec.channels = wanted_nb_channels;
2517 wanted_spec.freq = wanted_sample_rate;
2518 if (wanted_spec.freq <= 0 || wanted_spec.channels <= 0) {
2522 while (next_sample_rate_idx && next_sample_rates[next_sample_rate_idx] >= wanted_spec.freq)
2523 next_sample_rate_idx--;
2524 wanted_spec.format = AUDIO_S16SYS;
2525 wanted_spec.silence = 0;
2528 wanted_spec.userdata = opaque;
2529 while (!(
audio_dev = SDL_OpenAudioDevice(
NULL, 0, &wanted_spec, &spec, SDL_AUDIO_ALLOW_FREQUENCY_CHANGE | SDL_AUDIO_ALLOW_CHANNELS_CHANGE))) {
2531 wanted_spec.channels, wanted_spec.freq, SDL_GetError());
2532 wanted_spec.channels = next_nb_channels[
FFMIN(7, wanted_spec.channels)];
2533 if (!wanted_spec.channels) {
2534 wanted_spec.freq = next_sample_rates[next_sample_rate_idx--];
2535 wanted_spec.channels = wanted_nb_channels;
2536 if (!wanted_spec.freq) {
2538 "No more combinations to try, audio open failed\n");
2544 if (spec.format != AUDIO_S16SYS) {
2546 "SDL advised audio format %d is not supported!\n", spec.format);
2549 if (spec.channels != wanted_spec.channels) {
2551 if (!wanted_channel_layout) {
2553 "SDL advised channel count %d is not supported!\n", spec.channels);
2559 audio_hw_params->
freq = spec.freq;
2561 audio_hw_params->
channels = spec.channels;
2577 const char *forced_codec_name =
NULL;
2581 int64_t channel_layout;
2583 int stream_lowres =
lowres;
2585 if (stream_index < 0 || stream_index >= ic->
nb_streams)
2604 if (forced_codec_name)
2608 "No codec could be found with name '%s'\n", forced_codec_name);
2621 avctx->
lowres = stream_lowres;
2649 is->audio_filter_src.channels = avctx->
channels;
2652 if ((
ret = configure_audio_filters(
is, afilters, 0)) < 0)
2654 sink =
is->out_audio_filter;
2668 is->audio_hw_buf_size =
ret;
2669 is->audio_src =
is->audio_tgt;
2670 is->audio_buf_size = 0;
2671 is->audio_buf_index = 0;
2675 is->audio_diff_avg_count = 0;
2678 is->audio_diff_threshold = (double)(
is->audio_hw_buf_size) /
is->audio_tgt.bytes_per_sec;
2680 is->audio_stream = stream_index;
2681 is->audio_st = ic->
streams[stream_index];
2686 is->auddec.start_pts =
is->audio_st->start_time;
2687 is->auddec.start_pts_tb =
is->audio_st->time_base;
2694 is->video_stream = stream_index;
2695 is->video_st = ic->
streams[stream_index];
2701 is->queue_attachments_req = 1;
2704 is->subtitle_stream = stream_index;
2705 is->subtitle_st = ic->
streams[stream_index];
2728 return is->abort_request;
2732 return stream_id < 0 ||
2740 if( !strcmp(
s->iformat->name,
"rtp")
2741 || !strcmp(
s->iformat->name,
"rtsp")
2742 || !strcmp(
s->iformat->name,
"sdp")
2746 if(
s->pb && ( !strncmp(
s->url,
"rtp:", 4)
2747 || !strncmp(
s->url,
"udp:", 4)
2762 int64_t stream_start_time;
2763 int pkt_in_play_range = 0;
2765 SDL_mutex *wait_mutex = SDL_CreateMutex();
2766 int scan_all_pmts_set = 0;
2775 memset(st_index, -1,
sizeof(st_index));
2794 scan_all_pmts_set = 1;
2802 if (scan_all_pmts_set)
2823 for (
i = 0;
i < orig_nb_streams;
i++)
2829 "%s: could not find codec parameters\n",
is->filename);
2877 st_index[
i] = INT_MAX;
2905 if (codecpar->
width)
2918 if (
is->show_mode == SHOW_MODE_NONE)
2919 is->show_mode =
ret >= 0 ? SHOW_MODE_VIDEO : SHOW_MODE_RDFT;
2925 if (
is->video_stream < 0 &&
is->audio_stream < 0) {
2932 if (infinite_buffer < 0 && is->realtime)
2936 if (
is->abort_request)
2938 if (
is->paused !=
is->last_paused) {
2939 is->last_paused =
is->paused;
2945 #if CONFIG_RTSP_DEMUXER || CONFIG_MMSH_PROTOCOL
2956 int64_t seek_target =
is->seek_pos;
2957 int64_t seek_min =
is->seek_rel > 0 ? seek_target -
is->seek_rel + 2: INT64_MIN;
2958 int64_t seek_max =
is->seek_rel < 0 ? seek_target -
is->seek_rel - 2: INT64_MAX;
2965 "%s: error while seeking\n",
is->ic->url);
2967 if (
is->audio_stream >= 0)
2969 if (
is->subtitle_stream >= 0)
2971 if (
is->video_stream >= 0)
2980 is->queue_attachments_req = 1;
2985 if (
is->queue_attachments_req) {
2992 is->queue_attachments_req = 0;
3002 SDL_LockMutex(wait_mutex);
3003 SDL_CondWaitTimeout(
is->continue_read_thread, wait_mutex, 10);
3004 SDL_UnlockMutex(wait_mutex);
3020 if (
is->video_stream >= 0)
3022 if (
is->audio_stream >= 0)
3024 if (
is->subtitle_stream >= 0)
3034 SDL_LockMutex(wait_mutex);
3035 SDL_CondWaitTimeout(
is->continue_read_thread, wait_mutex, 10);
3036 SDL_UnlockMutex(wait_mutex);
3045 (pkt_ts - (stream_start_time !=
AV_NOPTS_VALUE ? stream_start_time : 0)) *
3071 event.user.data1 =
is;
3072 SDL_PushEvent(&event);
3074 SDL_DestroyMutex(wait_mutex);
3085 is->last_video_stream =
is->video_stream = -1;
3086 is->last_audio_stream =
is->audio_stream = -1;
3087 is->last_subtitle_stream =
is->subtitle_stream = -1;
3108 if (!(
is->continue_read_thread = SDL_CreateCond())) {
3116 is->audio_clock_serial = -1;
3127 if (!
is->read_tid) {
3139 int start_index, stream_index;
3146 start_index =
is->last_video_stream;
3147 old_index =
is->video_stream;
3149 start_index =
is->last_audio_stream;
3150 old_index =
is->audio_stream;
3152 start_index =
is->last_subtitle_stream;
3153 old_index =
is->subtitle_stream;
3155 stream_index = start_index;
3161 for (start_index = 0; start_index <
nb_streams; start_index++)
3166 stream_index = start_index;
3176 is->last_subtitle_stream = -1;
3179 if (start_index == -1)
3183 if (stream_index == start_index)
3185 st =
is->ic->streams[p ? p->
stream_index[stream_index] : stream_index];
3203 if (p && stream_index != -1)
3223 int next =
is->show_mode;
3225 next = (next + 1) % SHOW_MODE_NB;
3226 }
while (next !=
is->show_mode && (next == SHOW_MODE_VIDEO && !
is->video_st || next != SHOW_MODE_VIDEO && !
is->audio_st));
3227 if (
is->show_mode != next) {
3228 is->force_refresh = 1;
3229 is->show_mode = next;
3234 double remaining_time = 0.0;
3236 while (!SDL_PeepEvents(event, 1, SDL_GETEVENT, SDL_FIRSTEVENT, SDL_LASTEVENT)) {
3241 if (remaining_time > 0.0)
3242 av_usleep((int64_t)(remaining_time * 1000000.0));
3244 if (
is->show_mode != SHOW_MODE_NONE && (!
is->paused ||
is->force_refresh))
3255 if (!
is->ic->nb_chapters)
3259 for (
i = 0;
i <
is->ic->nb_chapters;
i++) {
3269 if (
i >=
is->ic->nb_chapters)
3281 double incr,
pos, frac;
3286 switch (event.type) {
3288 if (
exit_on_keydown || event.key.keysym.sym == SDLK_ESCAPE || event.key.keysym.sym == SDLK_q) {
3293 if (!cur_stream->
width)
3295 switch (event.key.keysym.sym) {
3307 case SDLK_KP_MULTIPLY:
3311 case SDLK_KP_DIVIDE:
3334 if (cur_stream->
show_mode == SHOW_MODE_VIDEO && cur_stream->vfilter_idx < nb_vfilters - 1) {
3335 if (++cur_stream->vfilter_idx >= nb_vfilters)
3336 cur_stream->vfilter_idx = 0;
3338 cur_stream->vfilter_idx = 0;
3399 case SDL_MOUSEBUTTONDOWN:
3404 if (event.button.button == SDL_BUTTON_LEFT) {
3405 static int64_t last_mouse_left_click = 0;
3409 last_mouse_left_click = 0;
3414 case SDL_MOUSEMOTION:
3420 if (event.type == SDL_MOUSEBUTTONDOWN) {
3421 if (event.button.button != SDL_BUTTON_RIGHT)
3425 if (!(event.motion.state & SDL_BUTTON_RMASK))
3435 int tns, thh, tmm, tss;
3438 tmm = (tns % 3600) / 60;
3440 frac = x / cur_stream->
width;
3443 mm = (
ns % 3600) / 60;
3446 "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d) \n", frac*100,
3447 hh, mm,
ss, thh, tmm, tss);
3454 case SDL_WINDOWEVENT:
3455 switch (event.window.event) {
3456 case SDL_WINDOWEVENT_SIZE_CHANGED:
3463 case SDL_WINDOWEVENT_EXPOSED:
3513 if (!strcmp(
arg,
"audio"))
3515 else if (!strcmp(
arg,
"video"))
3517 else if (!strcmp(
arg,
"ext"))
3541 !strcmp(
arg,
"waves") ? SHOW_MODE_WAVES :
3542 !strcmp(
arg,
"rdft" ) ? SHOW_MODE_RDFT :
3551 "Argument '%s' provided as input filename, but '%s' was already specified.\n",
3555 if (!strcmp(filename,
"-"))
3562 const char *spec = strchr(opt,
':');
3565 "No media specifier was specified in '%s' in option '%s'\n",
3576 "Invalid media specifier '%s' in option '%s'\n", spec, opt);
3586 {
"x",
HAS_ARG, { .func_arg =
opt_width },
"force displayed width",
"width" },
3587 {
"y",
HAS_ARG, { .func_arg =
opt_height },
"force displayed height",
"height" },
3596 {
"ss",
HAS_ARG, { .func_arg =
opt_seek },
"seek to a given position in seconds",
"pos" },
3597 {
"t",
HAS_ARG, { .func_arg =
opt_duration },
"play \"duration\" seconds of audio/video",
"duration" },
3611 {
"sync",
HAS_ARG |
OPT_EXPERT, { .func_arg =
opt_sync },
"set audio-video sync. type (type=audio/video/ext)",
"type" },
3622 {
"vf",
OPT_EXPERT |
HAS_ARG, { .func_arg = opt_add_vfilter },
"set video filters",
"filter_graph" },
3623 {
"af",
OPT_STRING |
HAS_ARG, { &afilters },
"set audio filters",
"filter_graph" },
3626 {
"showmode",
HAS_ARG, { .func_arg =
opt_show_mode},
"select show mode (0 = video, 1 = waves, 2 = RDFT)",
"mode" },
3628 {
"i",
OPT_BOOL, { &
dummy},
"read specified file",
"input_file"},
3629 {
"codec",
HAS_ARG, { .func_arg =
opt_codec},
"force decoder",
"decoder_name" },
3635 "read and decode the streams to fill missing information with heuristics" },
3656 #if !CONFIG_AVFILTER
3661 printf(
"\nWhile playing:\n"
3663 "f toggle full screen\n"
3666 "9, 0 decrease and increase volume respectively\n"
3667 "/, * decrease and increase volume respectively\n"
3668 "a cycle audio channel in the current program\n"
3669 "v cycle video channel\n"
3670 "t cycle subtitle channel in the current program\n"
3672 "w cycle video filters or show modes\n"
3673 "s activate frame-step mode\n"
3674 "left/right seek backward/forward 10 seconds or to custom interval if -seek_interval is set\n"
3675 "down/up seek backward/forward 1 minute\n"
3676 "page down/page up seek backward/forward 10 minutes\n"
3677 "right mouse click seek to percentage in file corresponding to fraction of width\n"
3678 "left double-click toggle full screen\n"
3712 "Use -h to get full help or, even better, run 'man %s'\n",
program_name);
3719 flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3721 flags &= ~SDL_INIT_AUDIO;
3725 if (!SDL_getenv(
"SDL_AUDIO_ALSA_SET_BUFFER_SIZE"))
3726 SDL_setenv(
"SDL_AUDIO_ALSA_SET_BUFFER_SIZE",
"1", 1);
3729 flags &= ~SDL_INIT_VIDEO;
3730 if (SDL_Init (
flags)) {
3736 SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3737 SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3740 int flags = SDL_WINDOW_HIDDEN;
3742 #if SDL_VERSION_ATLEAST(2,0,5)
3743 flags |= SDL_WINDOW_ALWAYS_ON_TOP;
3745 av_log(
NULL,
AV_LOG_WARNING,
"Your SDL version doesn't support SDL_WINDOW_ALWAYS_ON_TOP. Feature will be inactive.\n");
3748 flags |= SDL_WINDOW_BORDERLESS;
3750 flags |= SDL_WINDOW_RESIZABLE;
3752 SDL_SetHint(SDL_HINT_RENDER_SCALE_QUALITY,
"linear");
3754 renderer = SDL_CreateRenderer(
window, -1, SDL_RENDERER_ACCELERATED | SDL_RENDERER_PRESENTVSYNC);
struct SwsContext * sws_getCachedContext(struct SwsContext *context, int srcW, int srcH, enum AVPixelFormat srcFormat, int dstW, int dstH, enum AVPixelFormat dstFormat, int flags, SwsFilter *srcFilter, SwsFilter *dstFilter, const double *param)
Check if context can be reused, otherwise reallocate a new one.
static void do_exit(VideoState *is)
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
static int opt_frame_size(void *optctx, const char *opt, const char *arg)
int64_t av_gettime_relative(void)
Get the current time in microseconds since some unspecified starting point.
#define AV_LOG_WARNING
Something somehow does not look correct.
AVPixelFormat
Pixel format.
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
static SDL_RendererInfo renderer_info
int configure_filtergraph(FilterGraph *fg)
static int frame_queue_nb_remaining(FrameQueue *f)
static void frame_queue_next(FrameQueue *f)
enum AVMediaType codec_type
General type of the encoded data.
int nb_threads
Maximum number of threads used by filters in this graph.
uint64_t channel_layout
Audio channel layout.
int av_compare_ts(int64_t ts_a, AVRational tb_a, int64_t ts_b, AVRational tb_b)
Compare two timestamps each in its own time base.
unsigned int nb_stream_indexes
int av_fifo_generic_write(AVFifoBuffer *f, void *src, int size, int(*func)(void *, void *, int))
Feed data from a user-supplied callback to an AVFifoBuffer.
int av_bprint_finalize(AVBPrint *buf, char **ret_str)
Finalize a print buffer.
static int64_t frame_queue_last_pos(FrameQueue *f)
int sample_rate
samples per second
#define FFSWAP(type, a, b)
void av_bprint_init(AVBPrint *buf, unsigned size_init, unsigned size_max)
static int video_thread(void *arg)
The official guide to swscale for confused that is
static void set_default_window_size(int width, int height, AVRational sar)
#define AV_NOSYNC_THRESHOLD
unsigned int nb_chapters
Number of chapters in AVChapter array.
This struct describes the properties of an encoded stream.
#define AV_LOG_QUIET
Print no output.
static float sub(float src0, float src1)
static enum AVSampleFormat sample_fmts[]
static int decoder_decode_frame(Decoder *d, AVFrame *frame, AVSubtitle *sub)
#define AVERROR_EOF
End of file.
int av_fifo_grow(AVFifoBuffer *f, unsigned int size)
Enlarge an AVFifoBuffer.
int attribute_align_arg av_buffersink_get_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags)
Get a frame with filtered data from sink and put it in frame.
enum AVDiscard discard
Selects which packets can be discarded at will and do not need to be demuxed.
AVProgram * av_find_program_from_stream(AVFormatContext *ic, AVProgram *last, int s)
Find the programs which belong to a given stream.
static int display_disable
int swr_set_compensation(struct SwrContext *s, int sample_delta, int compensation_distance)
Activate resampling compensation ("soft" compensation).
#define SAMPLE_ARRAY_SIZE
static void update_volume(VideoState *is, int sign, double step)
#define AV_TIME_BASE_Q
Internal time base represented as fractional value.
#define av_opt_set_int_list(obj, name, val, term, flags)
Set a binary option to an integer list.
char * av_asprintf(const char *fmt,...)
static int decoder_start(Decoder *d, int(*fn)(void *), const char *thread_name, void *arg)
const AVClass * avformat_get_class(void)
Get the AVClass for AVFormatContext.
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
static int upload_texture(SDL_Texture **tex, AVFrame *frame, struct SwsContext **img_convert_ctx)
SDL_Texture * vis_texture
static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, double duration, int64_t pos, int serial)
void av_get_channel_layout_string(char *buf, int buf_size, int nb_channels, uint64_t channel_layout)
Return a description of a channel layout.
This structure describes decoded (raw) audio or video data.
AVStream ** streams
A list of all streams in the file.
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about which is also called distortion Distortion can be quantified by almost any quality measurement one chooses the sum of squared differences is used but more complex methods that consider psychovisual effects can be used as well It makes no difference in this discussion First step
double frame_last_filter_delay
@ AVCOL_RANGE_JPEG
Full range content.
int avcodec_decode_subtitle2(AVCodecContext *avctx, AVSubtitle *sub, int *got_sub_ptr, AVPacket *avpkt)
Decode a subtitle message.
static int frame_queue_init(FrameQueue *f, PacketQueue *pktq, int max_size, int keep_last)
static const char * subtitle_codec_name
#define EXTERNAL_CLOCK_MIN_FRAMES
int attribute_align_arg av_buffersrc_add_frame(AVFilterContext *ctx, AVFrame *frame)
Add a frame to the buffer source.
static void frame_queue_destory(FrameQueue *f)
#define SAMPLE_QUEUE_SIZE
const char program_name[]
program name, defined by the program for show_version().
AVDictionary * format_opts
int error
contains the error code or 0 if no error happened
#define AV_DICT_IGNORE_SUFFIX
Return first entry in a dictionary whose first part corresponds to the search key,...
#define AV_PIX_FMT_RGB32_1
double audio_diff_avg_coef
#define AV_LOG_VERBOSE
Detailed information.
#define CURSOR_HIDE_DELAY
void show_help_children(const AVClass *class, int flags)
Show help for all options with given flags in class and all its children.
@ AV_PIX_FMT_BGR24
packed RGB 8:8:8, 24bpp, BGRBGR...
@ AV_PIX_FMT_BGRA
packed BGRA 8:8:8:8, 32bpp, BGRABGRA...
int64_t duration
Duration of this packet in AVStream->time_base units, 0 if unknown.
int attribute_align_arg sws_scale(struct SwsContext *c, const uint8_t *const srcSlice[], const int srcStride[], int srcSliceY, int srcSliceH, uint8_t *const dst[], const int dstStride[])
swscale wrapper, so we don't need to export the SwsContext.
static double compute_target_delay(double delay, VideoState *is)
static void stream_close(VideoState *is)
int av_read_frame(AVFormatContext *s, AVPacket *pkt)
Return the next frame of a stream.
int av_fifo_generic_read(AVFifoBuffer *f, void *dest, int buf_size, void(*func)(void *, void *, int))
Feed data from an AVFifoBuffer to a user-supplied callback.
#define SDL_AUDIO_MAX_CALLBACKS_PER_SEC
static void init_clock(Clock *c, int *queue_serial)
enum AVMediaType codec_type
#define AV_OPT_FLAG_FILTERING_PARAM
a generic parameter which can be set by the user for filtering
static int opt_seek(void *optctx, const char *opt, const char *arg)
int64_t avio_size(AVIOContext *s)
Get the filesize.
size_t av_strlcatf(char *dst, size_t size, const char *fmt,...)
static double get_master_clock(VideoState *is)
static void update_video_pts(VideoState *is, double pts, int64_t pos, int serial)
void av_packet_free(AVPacket **pkt)
Free the packet, if the packet is reference counted, it will be unreferenced first.
static int subtitle_thread(void *arg)
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
void avfilter_graph_free(AVFilterGraph **graph)
Free a graph, destroy its links, and set *graph to NULL.
static int audio_decode_frame(VideoState *is)
Decode one audio frame and return its uncompressed size.
static int subtitle_disable
struct SwrContext * swr_ctx
static int opt_sync(void *optctx, const char *opt, const char *arg)
static void step_to_next_frame(VideoState *is)
enum AVPixelFormat format
static void video_display(VideoState *is)
uint8_t max_lowres
maximum value for lowres supported by the decoder
void avformat_close_input(AVFormatContext **s)
Close an opened input AVFormatContext.
double parse_number_or_die(const char *context, const char *numstr, int type, double min, double max)
Parse a string and return its corresponding value as a double.
@ AVCOL_SPC_BT470BG
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601
AVIOInterruptCB interrupt_callback
Custom interrupt callbacks for the I/O layer.
AVCodec * avcodec_find_decoder_by_name(const char *name)
Find a registered decoder with the specified name.
#define SDL_AUDIO_MIN_BUFFER_SIZE
void print_error(const char *filename, int err)
Print an error message to stderr, indicating filename and a human readable description of the error c...
static int startup_volume
static SDL_Window * window
int avfilter_graph_create_filter(AVFilterContext **filt_ctx, const AVFilter *filt, const char *name, const char *args, void *opaque, AVFilterGraph *graph_ctx)
Create and add a filter instance into an existing graph.
static void toggle_full_screen(VideoState *is)
ff_const59 struct AVInputFormat * iformat
The input container format.
static int packet_queue_init(PacketQueue *q)
#define AUDIO_DIFF_AVG_NB
AVFilterGraph * avfilter_graph_alloc(void)
Allocate a filter graph.
static int opt_duration(void *optctx, const char *opt, const char *arg)
static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
int x
top left corner of pict, undefined when pict is not set
static av_always_inline int64_t avio_tell(AVIOContext *s)
ftell() equivalent for AVIOContext.
#define AVERROR_OPTION_NOT_FOUND
Option not found.
#define AV_BPRINT_SIZE_AUTOMATIC
static void video_image_display(VideoState *is)
static double val(void *priv, double ch)
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
static int opt_show_mode(void *optctx, const char *opt, const char *arg)
SDL_cond * empty_queue_cond
static void set_clock_speed(Clock *c, double speed)
double audio_diff_threshold
const AVClass * sws_get_class(void)
Get the AVClass for swsContext.
int av_opt_set(void *obj, const char *name, const char *val, int search_flags)
#define ss(width, name, subs,...)
int avformat_network_init(void)
Do global initialization of network libraries.
static int opt_height(void *optctx, const char *opt, const char *arg)
void avsubtitle_free(AVSubtitle *sub)
Free all allocated data in the given subtitle struct.
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
static const struct TextureFormatEntry sdl_texture_format_map[]
int64_t bit_rate
Total stream bitrate in bit/s, 0 if not available.
static int is_full_screen
void avfilter_inout_free(AVFilterInOut **inout)
Free the supplied list of AVFilterInOut and set *inout to NULL.
static void set_sdl_yuv_conversion_mode(AVFrame *frame)
AVRational av_guess_sample_aspect_ratio(AVFormatContext *format, AVStream *stream, AVFrame *frame)
Guess the sample aspect ratio of a frame, based on both the stream and the frame aspect ratio.
#define AV_LOG_TRACE
Extremely verbose debugging, useful for libav* development.
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
int av_fifo_space(const AVFifoBuffer *f)
Return the amount of space in bytes in the AVFifoBuffer, that is the amount of data you can write int...
AVDictionary * metadata
Metadata that applies to the whole file.
#define FF_ARRAY_ELEMS(a)
static int audio_thread(void *arg)
static void set_clock(Clock *c, double pts, int serial)
void av_dump_format(AVFormatContext *ic, int index, const char *url, int is_output)
Print detailed information about the input or output format, such as duration, bitrate,...
static Frame * frame_queue_peek_next(FrameQueue *f)
static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
static void sync_clock_to_slave(Clock *c, Clock *slave)
av_cold int swr_init(struct SwrContext *s)
Initialize context after user parameters have been set.
static void opt_input_file(void *optctx, const char *filename)
AVRational av_buffersink_get_frame_rate(const AVFilterContext *ctx)
static void frame_queue_signal(FrameQueue *f)
AVDictionaryEntry * av_dict_get(const AVDictionary *m, const char *key, const AVDictionaryEntry *prev, int flags)
Get a dictionary entry with matching key.
struct SwsContext * img_convert_ctx
AVCodecContext * avcodec_alloc_context3(const AVCodec *codec)
Allocate an AVCodecContext and set its fields to default values.
#define AV_OPT_FLAG_ENCODING_PARAM
a generic parameter which can be set by the user for muxing or encoding
void show_help_default(const char *opt, const char *arg)
Per-fftool specific help handler.
#define AV_CEIL_RSHIFT(a, b)
static int default_height
int flags
Flags modifying the (de)muxer behaviour.
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample format(the sample packing is implied by the sample format) and sample rate. The lists are not just lists
AVRational sample_aspect_ratio
Video only.
int channels
number of audio channels, only used for audio.
#define AV_PIX_FMT_0BGR32
@ AVCOL_SPC_SMPTE170M
also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC
int y
top left corner of pict, undefined when pict is not set
static void calculate_display_rect(SDL_Rect *rect, int scr_xleft, int scr_ytop, int scr_width, int scr_height, int pic_width, int pic_height, AVRational pic_sar)
static double av_q2d(AVRational a)
Convert an AVRational to a double.
#define EXTERNAL_CLOCK_SPEED_STEP
#define AV_CH_LAYOUT_STEREO_DOWNMIX
static int opt_frame_pix_fmt(void *optctx, const char *opt, const char *arg)
int avcodec_receive_frame(AVCodecContext *avctx, AVFrame *frame)
Return decoded output data from a decoder.
AVRational av_buffersink_get_time_base(const AVFilterContext *ctx)
static const AVFilterPad outputs[]
static enum AVPixelFormat pix_fmts[]
int av_read_play(AVFormatContext *s)
Start playing a network-based stream (e.g.
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
static int opt_codec(void *optctx, const char *opt, const char *arg)
int attribute_align_arg swr_convert(struct SwrContext *s, uint8_t *out_arg[SWR_CH_MAX], int out_count, const uint8_t *in_arg[SWR_CH_MAX], int in_count)
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
static double get_clock(Clock *c)
#define EXTERNAL_CLOCK_SPEED_MIN
static unsigned sws_flags
void parse_options(void *optctx, int argc, char **argv, const OptionDef *options, void(*parse_arg_function)(void *, const char *))
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
const char * av_get_sample_fmt_name(enum AVSampleFormat sample_fmt)
Return the name of sample_fmt, or NULL if sample_fmt is not recognized.
static SDL_Renderer * renderer
int av_usleep(unsigned usec)
Sleep for a period of time.
The libswresample context.
static double vp_duration(VideoState *is, Frame *vp, Frame *nextvp)
struct SwrContext * swr_alloc_set_opts(struct SwrContext *s, int64_t out_ch_layout, enum AVSampleFormat out_sample_fmt, int out_sample_rate, int64_t in_ch_layout, enum AVSampleFormat in_sample_fmt, int in_sample_rate, int log_offset, void *log_ctx)
Allocate SwrContext if needed and set/reset common parameters.
#define AV_PIX_FMT_BGR32_1
void av_rdft_calc(RDFTContext *s, FFTSample *data)
static int synchronize_audio(VideoState *is, int nb_samples)
static const char * window_title
@ AVDISCARD_ALL
discard all
int av_log_get_level(void)
Get the current log level.
const AVFilter * avfilter_get_by_name(const char *name)
Get a filter definition matching the given name.
void init_dynload(void)
Initialize dynamic library loading.
AVCodecParameters * codecpar
Codec parameters associated with this stream.
int w
width of pict, undefined when pict is not set
static void seek_chapter(VideoState *is, int incr)
static int get_master_sync_type(VideoState *is)
const AVClass * avcodec_get_class(void)
Get the AVClass for AVCodecContext.
static __device__ float fabs(float a)
int avfilter_graph_config(AVFilterGraph *graphctx, void *log_ctx)
Check validity and configure all the links and formats in the graph.
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented.
static VideoState * stream_open(const char *filename, AVInputFormat *iformat)
@ AV_PIX_FMT_YUYV422
packed YUV 4:2:2, 16bpp, Y0 Cb Y1 Cr
void avcodec_free_context(AVCodecContext **avctx)
Free the codec context and everything associated with it and write NULL to the provided pointer.
Rational number (pair of numerator and denominator).
AVFilterContext ** filters
static void stream_cycle_channel(VideoState *is, int codec_type)
#define AV_CODEC_FLAG2_FAST
Allow non spec compliant speedup tricks.
AVIOContext * pb
I/O context.
void av_log_set_flags(int arg)
static void frame_queue_unref_item(Frame *vp)
Frame queue[FRAME_QUEUE_SIZE]
void init_opts(void)
Initialize the cmdutils option system, in particular allocate the *_opts contexts.
static int64_t cursor_last_shown
unsigned int * stream_index
static Frame * frame_queue_peek(FrameQueue *f)
AVFilterInOut * avfilter_inout_alloc(void)
Allocate a single AVFilterInOut entry.
#define AV_DICT_DONT_OVERWRITE
Don't overwrite existing entries.
@ AV_PIX_FMT_RGB8
packed RGB 3:3:2, 8bpp, (msb)2R 3G 3B(lsb)
int attribute_align_arg avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options)
Initialize the AVCodecContext to use the given AVCodec.
double frame_last_returned_time
static void set_clock_at(Clock *c, double pts, int serial, double time)
static void toggle_pause(VideoState *is)
static int stream_component_open(VideoState *is, int stream_index)
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
uint64_t av_buffersink_get_channel_layout(const AVFilterContext *ctx)
int av_packet_ref(AVPacket *dst, const AVPacket *src)
Setup a new reference to the data described by a given packet.
void av_packet_move_ref(AVPacket *dst, AVPacket *src)
Move every field in src to dst and reset src.
#define AV_PIX_FMT_NE(be, le)
static void event_loop(VideoState *cur_stream)
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
int sample_rate
Audio only.
int av_get_channel_layout_nb_channels(uint64_t channel_layout)
Return the number of channels in the channel layout.
int av_opt_set_int(void *obj, const char *name, int64_t val, int search_flags)
int16_t sample_array[SAMPLE_ARRAY_SIZE]
unsigned int nb_streams
Number of elements in AVFormatContext.streams.
static int exit_on_mousedown
char * scale_sws_opts
sws options to use for the auto-inserted scale filters
int lowres
low resolution decoding, 1-> 1/2 size, 2->1/4 size
AVDictionary * codec_opts
static int64_t audio_callback_time
int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options)
Read packets of a media file to get stream information.
static int64_t get_valid_channel_layout(int64_t channel_layout, int channels)
int flags2
AV_CODEC_FLAG2_*.
enum AVPictureType pict_type
Picture type of the frame.
@ AV_PIX_FMT_RGB24
packed RGB 8:8:8, 24bpp, RGBRGB...
void av_log_set_callback(void(*callback)(void *, int, const char *, va_list))
Set the logging callback.
int avformat_match_stream_specifier(AVFormatContext *s, AVStream *st, const char *spec)
Check if the stream st contained in s is matched by the stream specifier spec.
RDFTContext * av_rdft_init(int nbits, enum RDFTransformType trans)
Set up a real FFT.
AVFormatContext * avformat_alloc_context(void)
Allocate an AVFormatContext.
@ AVDISCARD_DEFAULT
discard useless packets like 0 size packets in avi
static Frame * frame_queue_peek_writable(FrameQueue *f)
int sample_rate
Sample rate of the audio data.
static int decoder_init(Decoder *d, AVCodecContext *avctx, PacketQueue *queue, SDL_cond *empty_queue_cond)
int avfilter_link(AVFilterContext *src, unsigned srcpad, AVFilterContext *dst, unsigned dstpad)
Link two filters together.
static int64_t start_time
enum AVSampleFormat sample_fmt
audio sample format
AVRational pkt_timebase
Timebase in which pkt_dts/pts and AVPacket.dts/pts are.
static av_const double hypot(double x, double y)
void uninit_opts(void)
Uninitialize the cmdutils option system, in particular free the *_opts contexts and their contents.
static AVRational av_make_q(int num, int den)
Create an AVRational.
static int read_thread(void *arg)
#define AV_PIX_FMT_BGR555
int avformat_seek_file(AVFormatContext *s, int stream_index, int64_t min_ts, int64_t ts, int64_t max_ts, int flags)
Seek to timestamp ts.
#define AV_NOPTS_VALUE
Undefined timestamp value.
SDL_Texture * sub_texture
AVRational av_guess_frame_rate(AVFormatContext *ctx, AVStream *stream, AVFrame *frame)
Guess the frame rate, based on both the container and codec information.
av_cold void swr_free(SwrContext **ss)
Free the given SwrContext and set the pointer to NULL.
SDL_Texture * vid_texture
AVDictionary ** setup_find_stream_info_opts(AVFormatContext *s, AVDictionary *codec_opts)
Setup AVCodecContext options for avformat_find_stream_info().
printf("static const uint8_t my_array[100] = {\n")
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
static const char * wanted_stream_spec[AVMEDIA_TYPE_NB]
static int infinite_buffer
double max_frame_duration
#define AV_OPT_SEARCH_CHILDREN
Search in possible children of the given object first.
static void packet_queue_destroy(PacketQueue *q)
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed.
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
uint64_t channel_layout
Channel layout of the audio data.
static void toggle_mute(VideoState *is)
static void decoder_abort(Decoder *d, FrameQueue *fq)
static void video_refresh(void *opaque, double *remaining_time)
#define ns(max_value, name, subs,...)
static float seek_interval
AVPacket * av_packet_alloc(void)
Allocate an AVPacket and set its fields to default values.
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values.
static void frame_queue_push(FrameQueue *f)
static SDL_AudioDeviceID audio_dev
static void sigterm_handler(int sig)
#define AV_LOG_INFO
Standard information.
static void packet_queue_abort(PacketQueue *q)
static const char * video_codec_name
int channels
number of audio channels
void show_banner(int argc, char **argv, const OptionDef *options)
Print the program banner to stderr.
static void packet_queue_flush(PacketQueue *q)
static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block, int *serial)
char av_get_picture_type_char(enum AVPictureType pict_type)
Return a single letter to describe the given picture type pict_type.
@ AVCOL_SPC_SMPTE240M
functionally identical to above
const char * avcodec_get_name(enum AVCodecID id)
Get the name of a codec.
int queue_attachments_req
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) #define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac) { } void ff_audio_convert_free(AudioConvert **ac) { if(! *ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);} AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map) { AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method !=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2) { ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc) { av_free(ac);return NULL;} return ac;} in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar) { ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar ? ac->channels :1;} else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;} int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in) { int use_generic=1;int len=in->nb_samples;int p;if(ac->dc) { av_log(ac->avr, AV_LOG_TRACE, "%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
int avcodec_send_packet(AVCodecContext *avctx, const AVPacket *avpkt)
Supply raw packet data as input to a decoder.
ff_const59 AVInputFormat * av_find_input_format(const char *short_name)
Find AVInputFormat based on the short name of the input format.
int nb_samples
number of audio samples (per channel) described by this frame
#define VIDEO_PICTURE_QUEUE_SIZE
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
enum VideoState::ShowMode show_mode
struct AudioParams audio_src
const int program_birth_year
program birth year, defined by the program for show_banner()
int avformat_open_input(AVFormatContext **ps, const char *url, ff_const59 AVInputFormat *fmt, AVDictionary **options)
Open an input stream and read the header.
int av_get_bytes_per_sample(enum AVSampleFormat sample_fmt)
Return number of bytes per sample.
int avfilter_graph_parse_ptr(AVFilterGraph *graph, const char *filters, AVFilterInOut **inputs, AVFilterInOut **outputs, void *log_ctx)
Add a graph described by a string to a graph.
static int compute_mod(int a, int b)
int avcodec_parameters_to_context(AVCodecContext *codec, const AVCodecParameters *par)
Fill the codec context based on the values from the supplied codec parameters.
#define AV_TIME_BASE
Internal time base represented as integer.
uint8_t ** extended_data
pointers to the data planes/channels.
static AVInputFormat * file_iformat
#define av_malloc_array(a, b)
static int video_open(VideoState *is)
static void get_sdl_pix_fmt_and_blendmode(int format, Uint32 *sdl_pix_fmt, SDL_BlendMode *sdl_blendmode)
static int opt_format(void *optctx, const char *opt, const char *arg)
AVSampleFormat
Audio sample formats.
AVDictionary * filter_codec_opts(AVDictionary *opts, enum AVCodecID codec_id, AVFormatContext *s, AVStream *st, const AVCodec *codec)
Filter out options for given codec.
#define AV_PIX_FMT_RGB555
#define AV_OPT_FLAG_DECODING_PARAM
a generic parameter which can be set by the user for demuxing or decoding
void av_frame_move_ref(AVFrame *dst, AVFrame *src)
Move everything contained in src to dst and reset src.
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
static void update_sample_display(VideoState *is, short *samples, int samples_size)
@ AV_SAMPLE_FMT_S16
signed 16 bits
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
New fields can be added to the end with minor version bumps.
int av_read_pause(AVFormatContext *s)
Pause a network-based stream (e.g.
#define AV_PIX_FMT_BGR565
int av_samples_get_buffer_size(int *linesize, int nb_channels, int nb_samples, enum AVSampleFormat sample_fmt, int align)
Get the required buffer size for the given audio parameters.
const char * name
Pad name.
int64_t av_rescale(int64_t a, int64_t b, int64_t c)
Rescale a 64-bit integer with rounding to nearest.
static Frame * frame_queue_peek_readable(FrameQueue *f)
#define AV_PIX_FMT_RGB565
void parse_loglevel(int argc, char **argv, const OptionDef *options)
Find the '-loglevel' option in the command line args and apply it.
static int av_cmp_q(AVRational a, AVRational b)
Compare two rationals.
@ AV_PIX_FMT_PAL8
8 bits with AV_PIX_FMT_RGB32 palette
int disposition
AV_DISPOSITION_* bit field.
#define AV_LOG_FATAL
Something went wrong and recovery is not possible.
static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
#define EXTERNAL_CLOCK_MAX_FRAMES
int h
height of pict, undefined when pict is not set
void avcodec_flush_buffers(AVCodecContext *avctx)
Reset the internal codec state / flush internal buffers.
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
#define AV_PIX_FMT_0RGB32
static AVStream * video_stream
static int audio_open(void *opaque, int64_t wanted_channel_layout, int wanted_nb_channels, int wanted_sample_rate, struct AudioParams *audio_hw_params)
static int filter_nbthreads
AVCodec * avcodec_find_decoder(enum AVCodecID id)
Find a registered decoder with a matching codec ID.
void log_callback_help(void *ptr, int level, const char *fmt, va_list vl)
Trivial log callback.
static int find_stream_info
static int packet_queue_put_private(PacketQueue *q, AVPacket *pkt)
static AVInputFormat * iformat
void av_bprintf(AVBPrint *buf, const char *fmt,...)
AVRational sample_aspect_ratio
Sample aspect ratio for the video frame, 0/1 if unknown/unspecified.
#define AV_LOG_SKIP_REPEATED
Skip repeated messages, this requires the user app to use av_log() instead of (f)printf as the 2 woul...
@ AV_PIX_FMT_UYVY422
packed YUV 4:2:2, 16bpp, Cb Y0 Cr Y1
#define AV_DICT_MATCH_CASE
Only get an entry with exact-case key match.
static int opt_width(void *optctx, const char *opt, const char *arg)
int main(int argc, char **argv)
int avformat_network_deinit(void)
Undo the initialization done by avformat_network_init.
static void show_usage(void)
const char * av_get_media_type_string(enum AVMediaType media_type)
Return a string describing the media_type enum, NULL if media_type is unknown.
main external API structure.
#define CMDUTILS_COMMON_OPTIONS
static void packet_queue_start(PacketQueue *q)
static const char * audio_codec_name
double get_rotation(AVStream *st)
int av_buffersink_get_sample_rate(const AVFilterContext *ctx)
#define AV_SYNC_FRAMEDUP_THRESHOLD
static enum ShowMode show_mode
void sws_freeContext(struct SwsContext *swsContext)
Free the swscaler context swsContext.
static const OptionDef options[]
static void fill_rectangle(int x, int y, int w, int h)
unsigned int audio_buf1_size
#define AV_SYNC_THRESHOLD_MAX
int av_buffersink_get_channels(const AVFilterContext *ctx)
static void decoder_destroy(Decoder *d)
enum AVSampleFormat av_get_packed_sample_fmt(enum AVSampleFormat sample_fmt)
Get the packed alternative form of the given sample format.
int eof_reached
true if was unable to read due to error or eof
static int stream_has_enough_packets(AVStream *st, int stream_id, PacketQueue *queue)
Filter the word “frame” indicates either a video frame or a group of audio samples
int64_t duration
Duration of the stream, in AV_TIME_BASE fractional seconds.
AVRational av_mul_q(AVRational b, AVRational c)
Multiply two rationals.
#define GROW_ARRAY(array, nb_elems)
#define SUBPICTURE_QUEUE_SIZE
static const char * input_filename
static void stream_toggle_pause(VideoState *is)
SDL_cond * continue_read_thread
int av_dict_set_int(AVDictionary **pm, const char *key, int64_t value, int flags)
Convenience wrapper for av_dict_set that converts the value to a string and stores it.
static void toggle_audio_display(VideoState *is)
enum AVMediaType codec_type
int opt_default(void *optctx, const char *opt, const char *arg)
Fallback for options that are not explicitly handled, these will be parsed through AVOptions.
char * av_strdup(const char *s)
Duplicate a string.
static int get_video_frame(VideoState *is, AVFrame *frame)
int64_t av_get_default_channel_layout(int nb_channels)
Return default channel layout for a given number of channels.
int av_fifo_size(const AVFifoBuffer *f)
Return the amount of data in bytes in the AVFifoBuffer, that is the amount of data you can read from ...
struct SwsContext * sub_convert_ctx
static av_always_inline int diff(const uint32_t a, const uint32_t b)
void av_fifo_freep(AVFifoBuffer **f)
Free an AVFifoBuffer and reset pointer to NULL.
This structure stores compressed data.
static void refresh_loop_wait_event(VideoState *is, SDL_Event *event)
static const uint16_t channel_layouts[7]
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
static void stream_component_close(VideoState *is, int stream_index)
static int cmp_audio_fmts(enum AVSampleFormat fmt1, int64_t channel_count1, enum AVSampleFormat fmt2, int64_t channel_count2)
AVFifoBuffer * av_fifo_alloc(unsigned int size)
Initialize an AVFifoBuffer.
int64_t parse_time_or_die(const char *context, const char *timestr, int is_duration)
Parse a string specifying a time and return its corresponding value as a number of microseconds.
unsigned int audio_buf_size
#define flags(name, subs,...)
void av_rdft_end(RDFTContext *s)
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
The exact code depends on how similar the blocks are and how related they are to the block
int64_t start_time
Position of the first frame of the component, in AV_TIME_BASE fractional seconds.
int64_t start_time
Decoding: pts of the first frame of the stream in presentation order, in stream time base.
static Frame * frame_queue_peek_last(FrameQueue *f)
void show_help_options(const OptionDef *options, const char *msg, int req_flags, int rej_flags, int alt_flags)
Print help for all options matching specified flags.
static int decoder_reorder_pts
@ AVCOL_SPC_BT709
also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / SMPTE RP177 Annex B
struct AudioParams audio_tgt
AVRational time_base
time base in which the start/end timestamps are specified
static AVStream * audio_stream
const AVClass * avfilter_get_class(void)
A linked-list of the inputs/outputs of the filter chain.
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option keep it simple and lowercase description are short
static void video_audio_display(VideoState *s)
#define AV_SYNC_THRESHOLD_MIN
static void check_external_clock_speed(VideoState *is)
uint32_t start_display_time
#define SAMPLE_CORRECTION_PERCENT_MAX
#define EXTERNAL_CLOCK_SPEED_MAX
static int packet_queue_put_nullpacket(PacketQueue *q, AVPacket *pkt, int stream_index)
static int is_realtime(AVFormatContext *s)
static void * av_x_if_null(const void *p, const void *x)
Return x default pointer in case p is NULL.
static int decode_interrupt_cb(void *ctx)
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
int av_find_best_stream(AVFormatContext *ic, enum AVMediaType type, int wanted_stream_nb, int related_stream, AVCodec **decoder_ret, int flags)
Find the "best" stream in the file.
void avdevice_register_all(void)
Initialize libavdevice and register all the input and output devices.
int avio_feof(AVIOContext *s)
Similar to feof() but also returns nonzero on read errors.
static int realloc_texture(SDL_Texture **texture, Uint32 new_format, int new_width, int new_height, SDL_BlendMode blendmode, int init_texture)
#define AV_PIX_FMT_RGB444
static int exit_on_keydown