Go to the documentation of this file.
59 #include <SDL_thread.h>
66 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
68 #define EXTERNAL_CLOCK_MIN_FRAMES 2
69 #define EXTERNAL_CLOCK_MAX_FRAMES 10
72 #define SDL_AUDIO_MIN_BUFFER_SIZE 512
74 #define SDL_AUDIO_MAX_CALLBACKS_PER_SEC 30
77 #define SDL_VOLUME_STEP (0.75)
80 #define AV_SYNC_THRESHOLD_MIN 0.04
82 #define AV_SYNC_THRESHOLD_MAX 0.1
84 #define AV_SYNC_FRAMEDUP_THRESHOLD 0.1
86 #define AV_NOSYNC_THRESHOLD 10.0
89 #define SAMPLE_CORRECTION_PERCENT_MAX 10
92 #define EXTERNAL_CLOCK_SPEED_MIN 0.900
93 #define EXTERNAL_CLOCK_SPEED_MAX 1.010
94 #define EXTERNAL_CLOCK_SPEED_STEP 0.001
97 #define AUDIO_DIFF_AVG_NB 20
100 #define REFRESH_RATE 0.01
104 #define SAMPLE_ARRAY_SIZE (8 * 65536)
106 #define CURSOR_HIDE_DELAY 1000000
108 #define USE_ONEPASS_SUBTITLE_RENDER 1
128 #define VIDEO_PICTURE_QUEUE_SIZE 3
129 #define SUBPICTURE_QUEUE_SIZE 16
130 #define SAMPLE_QUEUE_SIZE 9
131 #define FRAME_QUEUE_SIZE FFMAX(SAMPLE_QUEUE_SIZE, FFMAX(VIDEO_PICTURE_QUEUE_SIZE, SUBPICTURE_QUEUE_SIZE))
350 static const char **vfilters_list =
NULL;
351 static int nb_vfilters = 0;
352 static char *afilters =
NULL;
362 #define FF_QUIT_EVENT (SDL_USEREVENT + 2)
396 static int opt_add_vfilter(
void *optctx,
const char *opt,
const char *
arg)
399 vfilters_list[nb_vfilters - 1] =
arg;
409 if (channel_count1 == 1 && channel_count2 == 1)
412 return channel_count1 != channel_count2 || fmt1 != fmt2;
419 return channel_layout;
444 SDL_CondSignal(q->
cond);
460 SDL_LockMutex(q->
mutex);
462 SDL_UnlockMutex(q->
mutex);
483 q->
mutex = SDL_CreateMutex();
488 q->
cond = SDL_CreateCond();
501 SDL_LockMutex(q->
mutex);
510 SDL_UnlockMutex(q->
mutex);
517 SDL_DestroyMutex(q->
mutex);
518 SDL_DestroyCond(q->
cond);
523 SDL_LockMutex(q->
mutex);
527 SDL_CondSignal(q->
cond);
529 SDL_UnlockMutex(q->
mutex);
534 SDL_LockMutex(q->
mutex);
537 SDL_UnlockMutex(q->
mutex);
546 SDL_LockMutex(q->
mutex);
572 SDL_UnlockMutex(q->
mutex);
583 d->empty_queue_cond = empty_queue_cond;
593 if (
d->queue->serial ==
d->pkt_serial) {
595 if (
d->queue->abort_request)
598 switch (
d->avctx->codec_type) {
625 d->finished =
d->pkt_serial;
635 if (
d->queue->nb_packets == 0)
636 SDL_CondSignal(
d->empty_queue_cond);
637 if (
d->packet_pending) {
638 d->packet_pending = 0;
640 int old_serial =
d->pkt_serial;
643 if (old_serial !=
d->pkt_serial) {
646 d->next_pts =
d->start_pts;
647 d->next_pts_tb =
d->start_pts_tb;
650 if (
d->queue->serial ==
d->pkt_serial)
661 if (got_frame && !
d->pkt->data) {
662 d->packet_pending = 1;
669 av_log(
d->avctx,
AV_LOG_ERROR,
"Receive_frame and send_packet both returned EAGAIN, which is an API violation.\n");
670 d->packet_pending = 1;
693 if (!(
f->mutex = SDL_CreateMutex())) {
697 if (!(
f->cond = SDL_CreateCond())) {
703 f->keep_last = !!keep_last;
704 for (
i = 0;
i <
f->max_size;
i++)
713 for (
i = 0;
i <
f->max_size;
i++) {
718 SDL_DestroyMutex(
f->mutex);
719 SDL_DestroyCond(
f->cond);
724 SDL_LockMutex(
f->mutex);
725 SDL_CondSignal(
f->cond);
726 SDL_UnlockMutex(
f->mutex);
731 return &
f->queue[(
f->rindex +
f->rindex_shown) %
f->max_size];
736 return &
f->queue[(
f->rindex +
f->rindex_shown + 1) %
f->max_size];
741 return &
f->queue[
f->rindex];
747 SDL_LockMutex(
f->mutex);
748 while (
f->size >=
f->max_size &&
749 !
f->pktq->abort_request) {
750 SDL_CondWait(
f->cond,
f->mutex);
752 SDL_UnlockMutex(
f->mutex);
754 if (
f->pktq->abort_request)
757 return &
f->queue[
f->windex];
763 SDL_LockMutex(
f->mutex);
764 while (
f->size -
f->rindex_shown <= 0 &&
765 !
f->pktq->abort_request) {
766 SDL_CondWait(
f->cond,
f->mutex);
768 SDL_UnlockMutex(
f->mutex);
770 if (
f->pktq->abort_request)
773 return &
f->queue[(
f->rindex +
f->rindex_shown) %
f->max_size];
778 if (++
f->windex ==
f->max_size)
780 SDL_LockMutex(
f->mutex);
782 SDL_CondSignal(
f->cond);
783 SDL_UnlockMutex(
f->mutex);
788 if (
f->keep_last && !
f->rindex_shown) {
793 if (++
f->rindex ==
f->max_size)
795 SDL_LockMutex(
f->mutex);
797 SDL_CondSignal(
f->cond);
798 SDL_UnlockMutex(
f->mutex);
804 return f->size -
f->rindex_shown;
811 if (
f->rindex_shown &&
fp->serial ==
f->pktq->serial)
821 SDL_WaitThread(
d->decoder_tid,
NULL);
822 d->decoder_tid =
NULL;
837 static int realloc_texture(SDL_Texture **texture, Uint32 new_format,
int new_width,
int new_height, SDL_BlendMode blendmode,
int init_texture)
841 if (!*texture || SDL_QueryTexture(*texture, &
format, &access, &
w, &
h) < 0 || new_width !=
w || new_height !=
h || new_format !=
format) {
845 SDL_DestroyTexture(*texture);
846 if (!(*texture = SDL_CreateTexture(
renderer, new_format, SDL_TEXTUREACCESS_STREAMING, new_width, new_height)))
848 if (SDL_SetTextureBlendMode(*texture, blendmode) < 0)
851 if (SDL_LockTexture(*texture,
NULL, &pixels, &pitch) < 0)
853 memset(pixels, 0, pitch * new_height);
854 SDL_UnlockTexture(*texture);
856 av_log(
NULL,
AV_LOG_VERBOSE,
"Created %dx%d texture with %s.\n", new_width, new_height, SDL_GetPixelFormatName(new_format));
862 int scr_xleft,
int scr_ytop,
int scr_width,
int scr_height,
863 int pic_width,
int pic_height,
AVRational pic_sar)
876 if (
width > scr_width) {
880 x = (scr_width -
width) / 2;
881 y = (scr_height -
height) / 2;
882 rect->
x = scr_xleft + x;
883 rect->
y = scr_ytop + y;
891 *sdl_blendmode = SDL_BLENDMODE_NONE;
892 *sdl_pix_fmt = SDL_PIXELFORMAT_UNKNOWN;
897 *sdl_blendmode = SDL_BLENDMODE_BLEND;
909 SDL_BlendMode sdl_blendmode;
911 if (
realloc_texture(tex, sdl_pix_fmt == SDL_PIXELFORMAT_UNKNOWN ? SDL_PIXELFORMAT_ARGB8888 : sdl_pix_fmt,
frame->width,
frame->height, sdl_blendmode, 0) < 0)
913 switch (sdl_pix_fmt) {
914 case SDL_PIXELFORMAT_UNKNOWN:
919 if (*img_convert_ctx !=
NULL) {
922 if (!SDL_LockTexture(*tex,
NULL, (
void **)pixels, pitch)) {
924 0,
frame->height, pixels, pitch);
925 SDL_UnlockTexture(*tex);
932 case SDL_PIXELFORMAT_IYUV:
933 if (
frame->linesize[0] > 0 &&
frame->linesize[1] > 0 &&
frame->linesize[2] > 0) {
937 }
else if (
frame->linesize[0] < 0 &&
frame->linesize[1] < 0 &&
frame->linesize[2] < 0) {
947 if (
frame->linesize[0] < 0) {
959 #if SDL_VERSION_ATLEAST(2,0,8)
960 SDL_YUV_CONVERSION_MODE
mode = SDL_YUV_CONVERSION_AUTOMATIC;
963 mode = SDL_YUV_CONVERSION_JPEG;
965 mode = SDL_YUV_CONVERSION_BT709;
967 mode = SDL_YUV_CONVERSION_BT601;
969 SDL_SetYUVConversionMode(
mode);
980 if (
is->subtitle_st) {
984 if (vp->
pts >=
sp->pts + ((
float)
sp->sub.start_display_time / 1000)) {
989 if (!
sp->width || !
sp->height) {
993 if (
realloc_texture(&
is->sub_texture, SDL_PIXELFORMAT_ARGB8888,
sp->width,
sp->height, SDL_BLENDMODE_BLEND, 1) < 0)
996 for (
i = 0;
i <
sp->sub.num_rects;
i++) {
1000 sub_rect->
y =
av_clip(sub_rect->
y, 0,
sp->height);
1001 sub_rect->
w =
av_clip(sub_rect->
w, 0,
sp->width - sub_rect->
x);
1002 sub_rect->
h =
av_clip(sub_rect->
h, 0,
sp->height - sub_rect->
y);
1008 if (!
is->sub_convert_ctx) {
1012 if (!SDL_LockTexture(
is->sub_texture, (SDL_Rect *)sub_rect, (
void **)pixels, pitch)) {
1013 sws_scale(
is->sub_convert_ctx, (
const uint8_t *
const *)sub_rect->data, sub_rect->linesize,
1014 0, sub_rect->h, pixels, pitch);
1015 SDL_UnlockTexture(
is->sub_texture);
1038 #if USE_ONEPASS_SUBTITLE_RENDER
1042 double xratio = (double)
rect.
w / (
double)
sp->width;
1043 double yratio = (double)
rect.
h / (
double)
sp->height;
1044 for (
i = 0;
i <
sp->sub.num_rects;
i++) {
1045 SDL_Rect *sub_rect = (SDL_Rect*)
sp->sub.rects[
i];
1046 SDL_Rect target = {.x =
rect.
x + sub_rect->x * xratio,
1047 .y =
rect.
y + sub_rect->y * yratio,
1048 .w = sub_rect->w * xratio,
1049 .h = sub_rect->h * yratio};
1050 SDL_RenderCopy(
renderer,
is->sub_texture, sub_rect, &target);
1058 return a < 0 ?
a%
b +
b :
a%
b;
1063 int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
1066 int rdft_bits, nb_freq;
1068 for (rdft_bits = 1; (1 << rdft_bits) < 2 *
s->height; rdft_bits++)
1070 nb_freq = 1 << (rdft_bits - 1);
1076 int data_used=
s->show_mode == SHOW_MODE_WAVES ?
s->width : (2*nb_freq);
1078 delay =
s->audio_write_buf_size;
1085 delay -= (time_diff *
s->audio_tgt.freq) / 1000000;
1088 delay += 2 * data_used;
1089 if (delay < data_used)
1093 if (
s->show_mode == SHOW_MODE_WAVES) {
1097 int a =
s->sample_array[idx];
1102 if (
h < score && (
b ^
c) < 0) {
1109 s->last_i_start = i_start;
1111 i_start =
s->last_i_start;
1114 if (
s->show_mode == SHOW_MODE_WAVES) {
1115 SDL_SetRenderDrawColor(
renderer, 255, 255, 255, 255);
1118 h =
s->height / nb_display_channels;
1121 for (ch = 0; ch < nb_display_channels; ch++) {
1123 y1 =
s->ytop + ch *
h + (
h / 2);
1124 for (x = 0; x <
s->width; x++) {
1125 y = (
s->sample_array[
i] * h2) >> 15;
1139 SDL_SetRenderDrawColor(
renderer, 0, 0, 255, 255);
1141 for (ch = 1; ch < nb_display_channels; ch++) {
1142 y =
s->ytop + ch *
h;
1146 if (
realloc_texture(&
s->vis_texture, SDL_PIXELFORMAT_ARGB8888,
s->width,
s->height, SDL_BLENDMODE_NONE, 1) < 0)
1149 if (
s->xpos >=
s->width)
1151 nb_display_channels=
FFMIN(nb_display_channels, 2);
1152 if (rdft_bits !=
s->rdft_bits) {
1156 s->rdft_bits = rdft_bits;
1159 if (!
s->rdft || !
s->rdft_data){
1161 s->show_mode = SHOW_MODE_WAVES;
1164 SDL_Rect
rect = {.
x =
s->xpos, .y = 0, .w = 1, .h =
s->height};
1167 for (ch = 0; ch < nb_display_channels; ch++) {
1168 data[ch] =
s->rdft_data + 2 * nb_freq * ch;
1170 for (x = 0; x < 2 * nb_freq; x++) {
1171 double w = (x-nb_freq) * (1.0 / nb_freq);
1172 data[ch][x] =
s->sample_array[
i] * (1.0 -
w *
w);
1181 if (!SDL_LockTexture(
s->vis_texture, &
rect, (
void **)&pixels, &pitch)) {
1183 pixels += pitch *
s->height;
1184 for (y = 0; y <
s->height; y++) {
1185 double w = 1 / sqrt(nb_freq);
1186 int a = sqrt(
w * sqrt(
data[0][2 * y + 0] *
data[0][2 * y + 0] +
data[0][2 * y + 1] *
data[0][2 * y + 1]));
1187 int b = (nb_display_channels == 2 ) ? sqrt(
w *
hypot(
data[1][2 * y + 0],
data[1][2 * y + 1]))
1192 *pixels = (
a << 16) + (
b << 8) + ((
a+
b) >> 1);
1194 SDL_UnlockTexture(
s->vis_texture);
1208 if (stream_index < 0 || stream_index >= ic->
nb_streams)
1219 is->audio_buf1_size = 0;
1245 is->audio_stream = -1;
1249 is->video_stream = -1;
1253 is->subtitle_stream = -1;
1263 is->abort_request = 1;
1264 SDL_WaitThread(
is->read_tid,
NULL);
1267 if (
is->audio_stream >= 0)
1269 if (
is->video_stream >= 0)
1271 if (
is->subtitle_stream >= 0)
1284 SDL_DestroyCond(
is->continue_read_thread);
1288 if (
is->vis_texture)
1289 SDL_DestroyTexture(
is->vis_texture);
1290 if (
is->vid_texture)
1291 SDL_DestroyTexture(
is->vid_texture);
1292 if (
is->sub_texture)
1293 SDL_DestroyTexture(
is->sub_texture);
1305 SDL_DestroyWindow(
window);
1328 if (max_width == INT_MAX && max_height == INT_MAX)
1349 SDL_SetWindowFullscreen(
window, SDL_WINDOW_FULLSCREEN_DESKTOP);
1364 SDL_SetRenderDrawColor(
renderer, 0, 0, 0, 255);
1366 if (
is->audio_st &&
is->show_mode != SHOW_MODE_VIDEO)
1368 else if (
is->video_st)
1375 if (*
c->queue_serial !=
c->serial)
1381 return c->pts_drift + time - (time -
c->last_updated) * (1.0 -
c->speed);
1388 c->last_updated = time;
1389 c->pts_drift =
c->pts - time;
1409 c->queue_serial = queue_serial;
1464 double speed =
is->extclk.speed;
1473 if (!
is->seek_req) {
1480 SDL_CondSignal(
is->continue_read_thread);
1489 if (
is->read_pause_return !=
AVERROR(ENOSYS)) {
1490 is->vidclk.paused = 0;
1495 is->paused =
is->audclk.paused =
is->vidclk.paused =
is->extclk.paused = !
is->paused;
1506 is->muted = !
is->muted;
1511 double volume_level =
is->audio_volume ? (20 * log(
is->audio_volume / (
double)SDL_MIX_MAXVOLUME) / log(10)) : -1000.0;
1512 int new_volume =
lrint(SDL_MIX_MAXVOLUME * pow(10.0, (volume_level + sign *
step) / 20.0));
1513 is->audio_volume =
av_clip(
is->audio_volume == new_volume ? (
is->audio_volume + sign) : new_volume, 0, SDL_MIX_MAXVOLUME);
1526 double sync_threshold,
diff = 0;
1539 if (
diff <= -sync_threshold)
1542 delay = delay +
diff;
1543 else if (
diff >= sync_threshold)
1557 if (
isnan(
duration) || duration <= 0 || duration >
is->max_frame_duration)
1585 if (
is->force_refresh ||
is->last_vis_time +
rdftspeed < time) {
1587 is->last_vis_time = time;
1589 *remaining_time =
FFMIN(*remaining_time,
is->last_vis_time +
rdftspeed - time);
1597 double last_duration,
duration, delay;
1604 if (vp->
serial !=
is->videoq.serial) {
1620 if (time < is->frame_timer + delay) {
1621 *remaining_time =
FFMIN(
is->frame_timer + delay - time, *remaining_time);
1625 is->frame_timer += delay;
1627 is->frame_timer = time;
1629 SDL_LockMutex(
is->pictq.mutex);
1632 SDL_UnlockMutex(
is->pictq.mutex);
1638 is->frame_drops_late++;
1644 if (
is->subtitle_st) {
1653 if (
sp->serial !=
is->subtitleq.serial
1654 || (
is->vidclk.pts > (
sp->pts + ((
float)
sp->sub.end_display_time / 1000)))
1659 for (
i = 0;
i <
sp->sub.num_rects;
i++) {
1664 if (!SDL_LockTexture(
is->sub_texture, (SDL_Rect *)sub_rect, (
void **)&pixels, &pitch)) {
1665 for (j = 0; j < sub_rect->h; j++, pixels += pitch)
1666 memset(pixels, 0, sub_rect->w << 2);
1667 SDL_UnlockTexture(
is->sub_texture);
1679 is->force_refresh = 1;
1681 if (
is->step && !
is->paused)
1686 if (!
display_disable &&
is->force_refresh &&
is->show_mode == SHOW_MODE_VIDEO &&
is->pictq.rindex_shown)
1689 is->force_refresh = 0;
1692 static int64_t last_time;
1694 int aqsize, vqsize, sqsize;
1698 if (!last_time || (cur_time - last_time) >= 30000) {
1703 aqsize =
is->audioq.size;
1705 vqsize =
is->videoq.size;
1706 if (
is->subtitle_st)
1707 sqsize =
is->subtitleq.size;
1709 if (
is->audio_st &&
is->video_st)
1711 else if (
is->video_st)
1713 else if (
is->audio_st)
1718 "%7.2f %s:%7.3f fd=%4d aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64
"/%"PRId64
" \r",
1720 (
is->audio_st &&
is->video_st) ?
"A-V" : (
is->video_st ?
"M-V" : (
is->audio_st ?
"M-A" :
" ")),
1722 is->frame_drops_early +
is->frame_drops_late,
1726 is->video_st ?
is->viddec.avctx->pts_correction_num_faulty_dts : 0,
1727 is->video_st ?
is->viddec.avctx->pts_correction_num_faulty_pts : 0);
1730 fprintf(stderr,
"%s", buf.str);
1737 last_time = cur_time;
1746 #if defined(DEBUG_SYNC)
1747 printf(
"frame_type=%c pts=%0.3f\n",
1792 diff -
is->frame_last_filter_delay < 0 &&
1793 is->viddec.pkt_serial ==
is->vidclk.serial &&
1794 is->videoq.nb_packets) {
1795 is->frame_drops_early++;
1811 int nb_filters =
graph->nb_filters;
1823 outputs->filter_ctx = source_ctx;
1828 inputs->filter_ctx = sink_ctx;
1840 for (
i = 0;
i <
graph->nb_filters - nb_filters;
i++)
1853 char sws_flags_str[512] =
"";
1854 char buffersrc_args[256];
1860 int nb_pix_fmts = 0;
1874 if (!strcmp(e->
key,
"sws_flags")) {
1875 av_strlcatf(sws_flags_str,
sizeof(sws_flags_str),
"%s=%s:",
"flags", e->
value);
1879 if (strlen(sws_flags_str))
1880 sws_flags_str[strlen(sws_flags_str)-1] =
'\0';
1884 snprintf(buffersrc_args,
sizeof(buffersrc_args),
1885 "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
1887 is->video_st->time_base.num,
is->video_st->time_base.den,
1890 av_strlcatf(buffersrc_args,
sizeof(buffersrc_args),
":frame_rate=%d/%d", fr.
num, fr.
den);
1894 "ffplay_buffer", buffersrc_args,
NULL,
1907 last_filter = filt_out;
1911 #define INSERT_FILT(name, arg) do { \
1912 AVFilterContext *filt_ctx; \
1914 ret = avfilter_graph_create_filter(&filt_ctx, \
1915 avfilter_get_by_name(name), \
1916 "ffplay_" name, arg, NULL, graph); \
1920 ret = avfilter_link(filt_ctx, 0, last_filter, 0); \
1924 last_filter = filt_ctx; \
1931 if (
fabs(theta - 90) < 1.0) {
1932 INSERT_FILT(
"transpose",
"clock");
1933 }
else if (
fabs(theta - 180) < 1.0) {
1934 INSERT_FILT(
"hflip",
NULL);
1935 INSERT_FILT(
"vflip",
NULL);
1936 }
else if (
fabs(theta - 270) < 1.0) {
1937 INSERT_FILT(
"transpose",
"cclock");
1938 }
else if (
fabs(theta) > 1.0) {
1939 char rotate_buf[64];
1940 snprintf(rotate_buf,
sizeof(rotate_buf),
"%f*PI/180", theta);
1941 INSERT_FILT(
"rotate", rotate_buf);
1948 is->in_video_filter = filt_src;
1949 is->out_video_filter = filt_out;
1955 static int configure_audio_filters(
VideoState *
is,
const char *afilters,
int force_output_format)
1962 char aresample_swr_opts[512] =
"";
1964 char asrc_args[256];
1974 if (strlen(aresample_swr_opts))
1975 aresample_swr_opts[strlen(aresample_swr_opts)-1] =
'\0';
1976 av_opt_set(
is->agraph,
"aresample_swr_opts", aresample_swr_opts, 0);
1979 "sample_rate=%d:sample_fmt=%s:channels=%d:time_base=%d/%d",
1981 is->audio_filter_src.channels,
1982 1,
is->audio_filter_src.freq);
1983 if (
is->audio_filter_src.channel_layout)
1985 ":channel_layout=0x%"PRIx64,
is->audio_filter_src.channel_layout);
1989 asrc_args,
NULL,
is->agraph);
2005 if (force_output_format) {
2007 channels [0] =
is->audio_tgt.channel_layout ? -1 :
is->audio_tgt.channels;
2023 is->in_audio_filter = filt_asrc;
2024 is->out_audio_filter = filt_asink;
2039 int last_serial = -1;
2040 int64_t dec_channel_layout;
2063 is->audio_filter_src.channel_layout != dec_channel_layout ||
2064 is->audio_filter_src.freq !=
frame->sample_rate ||
2065 is->auddec.pkt_serial != last_serial;
2068 char buf1[1024], buf2[1024];
2072 "Audio frame changed from rate:%d ch:%d fmt:%s layout:%s serial:%d to rate:%d ch:%d fmt:%s layout:%s serial:%d\n",
2076 is->audio_filter_src.fmt =
frame->format;
2077 is->audio_filter_src.channels =
frame->channels;
2078 is->audio_filter_src.channel_layout = dec_channel_layout;
2079 is->audio_filter_src.freq =
frame->sample_rate;
2080 last_serial =
is->auddec.pkt_serial;
2082 if ((
ret = configure_audio_filters(
is, afilters, 1)) < 0)
2097 af->
serial =
is->auddec.pkt_serial;
2104 if (
is->audioq.serial !=
is->auddec.pkt_serial)
2108 is->auddec.finished =
is->auddec.pkt_serial;
2123 d->decoder_tid = SDL_CreateThread(
fn, thread_name,
arg);
2124 if (!
d->decoder_tid) {
2147 int last_serial = -1;
2148 int last_vfilter_idx = 0;
2162 if ( last_w !=
frame->width
2163 || last_h !=
frame->height
2164 || last_format !=
frame->format
2165 || last_serial !=
is->viddec.pkt_serial
2166 || last_vfilter_idx !=
is->vfilter_idx) {
2168 "Video frame changed from size:%dx%d format:%s serial:%d to size:%dx%d format:%s serial:%d\n",
2180 if ((
ret = configure_video_filters(
graph,
is, vfilters_list ? vfilters_list[
is->vfilter_idx] :
NULL,
frame)) < 0) {
2183 event.user.data1 =
is;
2184 SDL_PushEvent(&event);
2187 filt_in =
is->in_video_filter;
2188 filt_out =
is->out_video_filter;
2189 last_w =
frame->width;
2190 last_h =
frame->height;
2191 last_format =
frame->format;
2192 last_serial =
is->viddec.pkt_serial;
2193 last_vfilter_idx =
is->vfilter_idx;
2207 is->viddec.finished =
is->viddec.pkt_serial;
2214 is->frame_last_filter_delay = 0;
2222 if (
is->videoq.serial !=
is->viddec.pkt_serial)
2254 if (got_subtitle &&
sp->sub.format == 0) {
2258 sp->serial =
is->subdec.pkt_serial;
2259 sp->width =
is->subdec.avctx->width;
2260 sp->height =
is->subdec.avctx->height;
2265 }
else if (got_subtitle) {
2282 memcpy(
is->sample_array +
is->sample_array_index,
samples,
len *
sizeof(
short));
2284 is->sample_array_index +=
len;
2286 is->sample_array_index = 0;
2295 int wanted_nb_samples = nb_samples;
2299 double diff, avg_diff;
2300 int min_nb_samples, max_nb_samples;
2305 is->audio_diff_cum =
diff +
is->audio_diff_avg_coef *
is->audio_diff_cum;
2308 is->audio_diff_avg_count++;
2311 avg_diff =
is->audio_diff_cum * (1.0 -
is->audio_diff_avg_coef);
2313 if (
fabs(avg_diff) >=
is->audio_diff_threshold) {
2314 wanted_nb_samples = nb_samples + (
int)(
diff *
is->audio_src.freq);
2317 wanted_nb_samples =
av_clip(wanted_nb_samples, min_nb_samples, max_nb_samples);
2320 diff, avg_diff, wanted_nb_samples - nb_samples,
2321 is->audio_clock,
is->audio_diff_threshold);
2326 is->audio_diff_avg_count = 0;
2327 is->audio_diff_cum = 0;
2331 return wanted_nb_samples;
2343 int data_size, resampled_data_size;
2344 int64_t dec_channel_layout;
2346 int wanted_nb_samples;
2363 }
while (af->
serial !=
is->audioq.serial);
2369 dec_channel_layout =
2375 dec_channel_layout !=
is->audio_src.channel_layout ||
2380 is->audio_tgt.channel_layout,
is->audio_tgt.fmt,
is->audio_tgt.freq,
2385 "Cannot create sample rate converter for conversion of %d Hz %s %d channels to %d Hz %s %d channels!\n",
2391 is->audio_src.channel_layout = dec_channel_layout;
2399 uint8_t **
out = &
is->audio_buf1;
2400 int out_count = (int64_t)wanted_nb_samples *
is->audio_tgt.freq / af->
frame->
sample_rate + 256;
2415 if (!
is->audio_buf1)
2422 if (len2 == out_count) {
2427 is->audio_buf =
is->audio_buf1;
2431 resampled_data_size = data_size;
2434 audio_clock0 =
is->audio_clock;
2439 is->audio_clock =
NAN;
2440 is->audio_clock_serial = af->
serial;
2443 static double last_clock;
2444 printf(
"audio: delay=%0.3f clock=%0.3f clock0=%0.3f\n",
2445 is->audio_clock - last_clock,
2446 is->audio_clock, audio_clock0);
2447 last_clock =
is->audio_clock;
2450 return resampled_data_size;
2457 int audio_size, len1;
2462 if (
is->audio_buf_index >=
is->audio_buf_size) {
2464 if (audio_size < 0) {
2469 if (
is->show_mode != SHOW_MODE_VIDEO)
2471 is->audio_buf_size = audio_size;
2473 is->audio_buf_index = 0;
2475 len1 =
is->audio_buf_size -
is->audio_buf_index;
2478 if (!
is->muted &&
is->audio_buf &&
is->audio_volume == SDL_MIX_MAXVOLUME)
2479 memcpy(stream, (uint8_t *)
is->audio_buf +
is->audio_buf_index, len1);
2481 memset(stream, 0, len1);
2482 if (!
is->muted &&
is->audio_buf)
2483 SDL_MixAudioFormat(stream, (uint8_t *)
is->audio_buf +
is->audio_buf_index, AUDIO_S16SYS, len1,
is->audio_volume);
2487 is->audio_buf_index += len1;
2489 is->audio_write_buf_size =
is->audio_buf_size -
is->audio_buf_index;
2491 if (!
isnan(
is->audio_clock)) {
2497 static int audio_open(
void *opaque, int64_t wanted_channel_layout,
int wanted_nb_channels,
int wanted_sample_rate,
struct AudioParams *audio_hw_params)
2499 SDL_AudioSpec wanted_spec, spec;
2501 static const int next_nb_channels[] = {0, 0, 1, 6, 2, 6, 4, 6};
2502 static const int next_sample_rates[] = {0, 44100, 48000, 96000, 192000};
2503 int next_sample_rate_idx =
FF_ARRAY_ELEMS(next_sample_rates) - 1;
2505 env = SDL_getenv(
"SDL_AUDIO_CHANNELS");
2507 wanted_nb_channels = atoi(env);
2515 wanted_spec.channels = wanted_nb_channels;
2516 wanted_spec.freq = wanted_sample_rate;
2517 if (wanted_spec.freq <= 0 || wanted_spec.channels <= 0) {
2521 while (next_sample_rate_idx && next_sample_rates[next_sample_rate_idx] >= wanted_spec.freq)
2522 next_sample_rate_idx--;
2523 wanted_spec.format = AUDIO_S16SYS;
2524 wanted_spec.silence = 0;
2527 wanted_spec.userdata = opaque;
2528 while (!(
audio_dev = SDL_OpenAudioDevice(
NULL, 0, &wanted_spec, &spec, SDL_AUDIO_ALLOW_FREQUENCY_CHANGE | SDL_AUDIO_ALLOW_CHANNELS_CHANGE))) {
2530 wanted_spec.channels, wanted_spec.freq, SDL_GetError());
2531 wanted_spec.channels = next_nb_channels[
FFMIN(7, wanted_spec.channels)];
2532 if (!wanted_spec.channels) {
2533 wanted_spec.freq = next_sample_rates[next_sample_rate_idx--];
2534 wanted_spec.channels = wanted_nb_channels;
2535 if (!wanted_spec.freq) {
2537 "No more combinations to try, audio open failed\n");
2543 if (spec.format != AUDIO_S16SYS) {
2545 "SDL advised audio format %d is not supported!\n", spec.format);
2548 if (spec.channels != wanted_spec.channels) {
2550 if (!wanted_channel_layout) {
2552 "SDL advised channel count %d is not supported!\n", spec.channels);
2558 audio_hw_params->
freq = spec.freq;
2560 audio_hw_params->
channels = spec.channels;
2576 const char *forced_codec_name =
NULL;
2580 int64_t channel_layout;
2582 int stream_lowres =
lowres;
2584 if (stream_index < 0 || stream_index >= ic->
nb_streams)
2603 if (forced_codec_name)
2607 "No codec could be found with name '%s'\n", forced_codec_name);
2620 avctx->
lowres = stream_lowres;
2648 is->audio_filter_src.channels = avctx->
channels;
2651 if ((
ret = configure_audio_filters(
is, afilters, 0)) < 0)
2653 sink =
is->out_audio_filter;
2667 is->audio_hw_buf_size =
ret;
2668 is->audio_src =
is->audio_tgt;
2669 is->audio_buf_size = 0;
2670 is->audio_buf_index = 0;
2674 is->audio_diff_avg_count = 0;
2677 is->audio_diff_threshold = (double)(
is->audio_hw_buf_size) /
is->audio_tgt.bytes_per_sec;
2679 is->audio_stream = stream_index;
2680 is->audio_st = ic->
streams[stream_index];
2685 is->auddec.start_pts =
is->audio_st->start_time;
2686 is->auddec.start_pts_tb =
is->audio_st->time_base;
2693 is->video_stream = stream_index;
2694 is->video_st = ic->
streams[stream_index];
2700 is->queue_attachments_req = 1;
2703 is->subtitle_stream = stream_index;
2704 is->subtitle_st = ic->
streams[stream_index];
2727 return is->abort_request;
2731 return stream_id < 0 ||
2739 if( !strcmp(
s->iformat->name,
"rtp")
2740 || !strcmp(
s->iformat->name,
"rtsp")
2741 || !strcmp(
s->iformat->name,
"sdp")
2745 if(
s->pb && ( !strncmp(
s->url,
"rtp:", 4)
2746 || !strncmp(
s->url,
"udp:", 4)
2761 int64_t stream_start_time;
2762 int pkt_in_play_range = 0;
2764 SDL_mutex *wait_mutex = SDL_CreateMutex();
2765 int scan_all_pmts_set = 0;
2774 memset(st_index, -1,
sizeof(st_index));
2793 scan_all_pmts_set = 1;
2801 if (scan_all_pmts_set)
2822 for (
i = 0;
i < orig_nb_streams;
i++)
2828 "%s: could not find codec parameters\n",
is->filename);
2876 st_index[
i] = INT_MAX;
2904 if (codecpar->
width)
2917 if (
is->show_mode == SHOW_MODE_NONE)
2918 is->show_mode =
ret >= 0 ? SHOW_MODE_VIDEO : SHOW_MODE_RDFT;
2924 if (
is->video_stream < 0 &&
is->audio_stream < 0) {
2931 if (infinite_buffer < 0 && is->realtime)
2935 if (
is->abort_request)
2937 if (
is->paused !=
is->last_paused) {
2938 is->last_paused =
is->paused;
2944 #if CONFIG_RTSP_DEMUXER || CONFIG_MMSH_PROTOCOL
2955 int64_t seek_target =
is->seek_pos;
2956 int64_t seek_min =
is->seek_rel > 0 ? seek_target -
is->seek_rel + 2: INT64_MIN;
2957 int64_t seek_max =
is->seek_rel < 0 ? seek_target -
is->seek_rel - 2: INT64_MAX;
2964 "%s: error while seeking\n",
is->ic->url);
2966 if (
is->audio_stream >= 0)
2968 if (
is->subtitle_stream >= 0)
2970 if (
is->video_stream >= 0)
2979 is->queue_attachments_req = 1;
2984 if (
is->queue_attachments_req) {
2991 is->queue_attachments_req = 0;
3001 SDL_LockMutex(wait_mutex);
3002 SDL_CondWaitTimeout(
is->continue_read_thread, wait_mutex, 10);
3003 SDL_UnlockMutex(wait_mutex);
3019 if (
is->video_stream >= 0)
3021 if (
is->audio_stream >= 0)
3023 if (
is->subtitle_stream >= 0)
3033 SDL_LockMutex(wait_mutex);
3034 SDL_CondWaitTimeout(
is->continue_read_thread, wait_mutex, 10);
3035 SDL_UnlockMutex(wait_mutex);
3044 (pkt_ts - (stream_start_time !=
AV_NOPTS_VALUE ? stream_start_time : 0)) *
3070 event.user.data1 =
is;
3071 SDL_PushEvent(&event);
3073 SDL_DestroyMutex(wait_mutex);
3085 is->last_video_stream =
is->video_stream = -1;
3086 is->last_audio_stream =
is->audio_stream = -1;
3087 is->last_subtitle_stream =
is->subtitle_stream = -1;
3108 if (!(
is->continue_read_thread = SDL_CreateCond())) {
3116 is->audio_clock_serial = -1;
3127 if (!
is->read_tid) {
3139 int start_index, stream_index;
3146 start_index =
is->last_video_stream;
3147 old_index =
is->video_stream;
3149 start_index =
is->last_audio_stream;
3150 old_index =
is->audio_stream;
3152 start_index =
is->last_subtitle_stream;
3153 old_index =
is->subtitle_stream;
3155 stream_index = start_index;
3161 for (start_index = 0; start_index <
nb_streams; start_index++)
3166 stream_index = start_index;
3176 is->last_subtitle_stream = -1;
3179 if (start_index == -1)
3183 if (stream_index == start_index)
3185 st =
is->ic->streams[p ? p->
stream_index[stream_index] : stream_index];
3203 if (p && stream_index != -1)
3223 int next =
is->show_mode;
3225 next = (next + 1) % SHOW_MODE_NB;
3226 }
while (next !=
is->show_mode && (next == SHOW_MODE_VIDEO && !
is->video_st || next != SHOW_MODE_VIDEO && !
is->audio_st));
3227 if (
is->show_mode != next) {
3228 is->force_refresh = 1;
3229 is->show_mode = next;
3234 double remaining_time = 0.0;
3236 while (!SDL_PeepEvents(event, 1, SDL_GETEVENT, SDL_FIRSTEVENT, SDL_LASTEVENT)) {
3241 if (remaining_time > 0.0)
3242 av_usleep((int64_t)(remaining_time * 1000000.0));
3244 if (
is->show_mode != SHOW_MODE_NONE && (!
is->paused ||
is->force_refresh))
3255 if (!
is->ic->nb_chapters)
3259 for (
i = 0;
i <
is->ic->nb_chapters;
i++) {
3269 if (
i >=
is->ic->nb_chapters)
3281 double incr,
pos, frac;
3286 switch (event.type) {
3288 if (
exit_on_keydown || event.key.keysym.sym == SDLK_ESCAPE || event.key.keysym.sym == SDLK_q) {
3293 if (!cur_stream->
width)
3295 switch (event.key.keysym.sym) {
3307 case SDLK_KP_MULTIPLY:
3311 case SDLK_KP_DIVIDE:
3334 if (cur_stream->
show_mode == SHOW_MODE_VIDEO && cur_stream->vfilter_idx < nb_vfilters - 1) {
3335 if (++cur_stream->vfilter_idx >= nb_vfilters)
3336 cur_stream->vfilter_idx = 0;
3338 cur_stream->vfilter_idx = 0;
3399 case SDL_MOUSEBUTTONDOWN:
3404 if (event.button.button == SDL_BUTTON_LEFT) {
3405 static int64_t last_mouse_left_click = 0;
3409 last_mouse_left_click = 0;
3414 case SDL_MOUSEMOTION:
3420 if (event.type == SDL_MOUSEBUTTONDOWN) {
3421 if (event.button.button != SDL_BUTTON_RIGHT)
3425 if (!(event.motion.state & SDL_BUTTON_RMASK))
3435 int tns, thh, tmm, tss;
3438 tmm = (tns % 3600) / 60;
3440 frac = x / cur_stream->
width;
3443 mm = (
ns % 3600) / 60;
3446 "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d) \n", frac*100,
3447 hh, mm,
ss, thh, tmm, tss);
3454 case SDL_WINDOWEVENT:
3455 switch (event.window.event) {
3456 case SDL_WINDOWEVENT_SIZE_CHANGED:
3463 case SDL_WINDOWEVENT_EXPOSED:
3513 if (!strcmp(
arg,
"audio"))
3515 else if (!strcmp(
arg,
"video"))
3517 else if (!strcmp(
arg,
"ext"))
3541 !strcmp(
arg,
"waves") ? SHOW_MODE_WAVES :
3542 !strcmp(
arg,
"rdft" ) ? SHOW_MODE_RDFT :
3551 "Argument '%s' provided as input filename, but '%s' was already specified.\n",
3555 if (!strcmp(filename,
"-"))
3562 const char *spec = strchr(opt,
':');
3565 "No media specifier was specified in '%s' in option '%s'\n",
3576 "Invalid media specifier '%s' in option '%s'\n", spec, opt);
3586 {
"x",
HAS_ARG, { .func_arg =
opt_width },
"force displayed width",
"width" },
3587 {
"y",
HAS_ARG, { .func_arg =
opt_height },
"force displayed height",
"height" },
3596 {
"ss",
HAS_ARG, { .func_arg =
opt_seek },
"seek to a given position in seconds",
"pos" },
3597 {
"t",
HAS_ARG, { .func_arg =
opt_duration },
"play \"duration\" seconds of audio/video",
"duration" },
3611 {
"sync",
HAS_ARG |
OPT_EXPERT, { .func_arg =
opt_sync },
"set audio-video sync. type (type=audio/video/ext)",
"type" },
3622 {
"vf",
OPT_EXPERT |
HAS_ARG, { .func_arg = opt_add_vfilter },
"set video filters",
"filter_graph" },
3623 {
"af",
OPT_STRING |
HAS_ARG, { &afilters },
"set audio filters",
"filter_graph" },
3626 {
"showmode",
HAS_ARG, { .func_arg =
opt_show_mode},
"select show mode (0 = video, 1 = waves, 2 = RDFT)",
"mode" },
3628 {
"i",
OPT_BOOL, { &
dummy},
"read specified file",
"input_file"},
3629 {
"codec",
HAS_ARG, { .func_arg =
opt_codec},
"force decoder",
"decoder_name" },
3635 "read and decode the streams to fill missing information with heuristics" },
3656 #if !CONFIG_AVFILTER
3661 printf(
"\nWhile playing:\n"
3663 "f toggle full screen\n"
3666 "9, 0 decrease and increase volume respectively\n"
3667 "/, * decrease and increase volume respectively\n"
3668 "a cycle audio channel in the current program\n"
3669 "v cycle video channel\n"
3670 "t cycle subtitle channel in the current program\n"
3672 "w cycle video filters or show modes\n"
3673 "s activate frame-step mode\n"
3674 "left/right seek backward/forward 10 seconds or to custom interval if -seek_interval is set\n"
3675 "down/up seek backward/forward 1 minute\n"
3676 "page down/page up seek backward/forward 10 minutes\n"
3677 "right mouse click seek to percentage in file corresponding to fraction of width\n"
3678 "left double-click toggle full screen\n"
3710 "Use -h to get full help or, even better, run 'man %s'\n",
program_name);
3717 flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3719 flags &= ~SDL_INIT_AUDIO;
3723 if (!SDL_getenv(
"SDL_AUDIO_ALSA_SET_BUFFER_SIZE"))
3724 SDL_setenv(
"SDL_AUDIO_ALSA_SET_BUFFER_SIZE",
"1", 1);
3727 flags &= ~SDL_INIT_VIDEO;
3728 if (SDL_Init (
flags)) {
3734 SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3735 SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3738 int flags = SDL_WINDOW_HIDDEN;
3740 #if SDL_VERSION_ATLEAST(2,0,5)
3741 flags |= SDL_WINDOW_ALWAYS_ON_TOP;
3743 av_log(
NULL,
AV_LOG_WARNING,
"Your SDL version doesn't support SDL_WINDOW_ALWAYS_ON_TOP. Feature will be inactive.\n");
3746 flags |= SDL_WINDOW_BORDERLESS;
3748 flags |= SDL_WINDOW_RESIZABLE;
3750 #ifdef SDL_HINT_VIDEO_X11_NET_WM_BYPASS_COMPOSITOR
3751 SDL_SetHint(SDL_HINT_VIDEO_X11_NET_WM_BYPASS_COMPOSITOR,
"0");
3754 SDL_SetHint(SDL_HINT_RENDER_SCALE_QUALITY,
"linear");
3756 renderer = SDL_CreateRenderer(
window, -1, SDL_RENDERER_ACCELERATED | SDL_RENDERER_PRESENTVSYNC);
struct SwsContext * sws_getCachedContext(struct SwsContext *context, int srcW, int srcH, enum AVPixelFormat srcFormat, int dstW, int dstH, enum AVPixelFormat dstFormat, int flags, SwsFilter *srcFilter, SwsFilter *dstFilter, const double *param)
Check if context can be reused, otherwise reallocate a new one.
static void do_exit(VideoState *is)
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
static int opt_frame_size(void *optctx, const char *opt, const char *arg)
int64_t av_gettime_relative(void)
Get the current time in microseconds since some unspecified starting point.
#define AV_LOG_WARNING
Something somehow does not look correct.
AVPixelFormat
Pixel format.
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
static SDL_RendererInfo renderer_info
int configure_filtergraph(FilterGraph *fg)
static int frame_queue_nb_remaining(FrameQueue *f)
static void frame_queue_next(FrameQueue *f)
enum AVMediaType codec_type
General type of the encoded data.
uint64_t channel_layout
Audio channel layout.
int av_compare_ts(int64_t ts_a, AVRational tb_a, int64_t ts_b, AVRational tb_b)
Compare two timestamps each in its own time base.
unsigned int nb_stream_indexes
int av_fifo_generic_write(AVFifoBuffer *f, void *src, int size, int(*func)(void *, void *, int))
Feed data from a user-supplied callback to an AVFifoBuffer.
int av_bprint_finalize(AVBPrint *buf, char **ret_str)
Finalize a print buffer.
static int64_t frame_queue_last_pos(FrameQueue *f)
int sample_rate
samples per second
void av_bprint_init(AVBPrint *buf, unsigned size_init, unsigned size_max)
static int video_thread(void *arg)
The official guide to swscale for confused that is
static void set_default_window_size(int width, int height, AVRational sar)
#define AV_NOSYNC_THRESHOLD
unsigned int nb_chapters
Number of chapters in AVChapter array.
This struct describes the properties of an encoded stream.
#define AV_LOG_QUIET
Print no output.
static float sub(float src0, float src1)
static enum AVSampleFormat sample_fmts[]
static int decoder_decode_frame(Decoder *d, AVFrame *frame, AVSubtitle *sub)
#define AVERROR_EOF
End of file.
int av_fifo_grow(AVFifoBuffer *f, unsigned int size)
Enlarge an AVFifoBuffer.
int attribute_align_arg av_buffersink_get_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags)
Get a frame with filtered data from sink and put it in frame.
enum AVDiscard discard
Selects which packets can be discarded at will and do not need to be demuxed.
AVProgram * av_find_program_from_stream(AVFormatContext *ic, AVProgram *last, int s)
Find the programs which belong to a given stream.
static int display_disable
int swr_set_compensation(struct SwrContext *s, int sample_delta, int compensation_distance)
Activate resampling compensation ("soft" compensation).
#define SAMPLE_ARRAY_SIZE
static void update_volume(VideoState *is, int sign, double step)
#define AV_TIME_BASE_Q
Internal time base represented as fractional value.
#define av_opt_set_int_list(obj, name, val, term, flags)
Set a binary option to an integer list.
char * av_asprintf(const char *fmt,...)
static int decoder_start(Decoder *d, int(*fn)(void *), const char *thread_name, void *arg)
const AVClass * avformat_get_class(void)
Get the AVClass for AVFormatContext.
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
static int upload_texture(SDL_Texture **tex, AVFrame *frame, struct SwsContext **img_convert_ctx)
SDL_Texture * vis_texture
static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, double duration, int64_t pos, int serial)
void av_get_channel_layout_string(char *buf, int buf_size, int nb_channels, uint64_t channel_layout)
Return a description of a channel layout.
This structure describes decoded (raw) audio or video data.
AVStream ** streams
A list of all streams in the file.
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about which is also called distortion Distortion can be quantified by almost any quality measurement one chooses the sum of squared differences is used but more complex methods that consider psychovisual effects can be used as well It makes no difference in this discussion First step
double frame_last_filter_delay
@ AVCOL_RANGE_JPEG
Full range content.
int avcodec_decode_subtitle2(AVCodecContext *avctx, AVSubtitle *sub, int *got_sub_ptr, AVPacket *avpkt)
Decode a subtitle message.
static int frame_queue_init(FrameQueue *f, PacketQueue *pktq, int max_size, int keep_last)
static const char * subtitle_codec_name
#define EXTERNAL_CLOCK_MIN_FRAMES
int attribute_align_arg av_buffersrc_add_frame(AVFilterContext *ctx, AVFrame *frame)
Add a frame to the buffer source.
static void frame_queue_destory(FrameQueue *f)
#define SAMPLE_QUEUE_SIZE
const char program_name[]
program name, defined by the program for show_version().
AVDictionary * format_opts
int error
contains the error code or 0 if no error happened
#define AV_DICT_IGNORE_SUFFIX
Return first entry in a dictionary whose first part corresponds to the search key,...
#define AV_PIX_FMT_RGB32_1
double audio_diff_avg_coef
#define AV_LOG_VERBOSE
Detailed information.
#define CURSOR_HIDE_DELAY
void show_help_children(const AVClass *class, int flags)
Show help for all options with given flags in class and all its children.
@ AV_PIX_FMT_BGR24
packed RGB 8:8:8, 24bpp, BGRBGR...
@ AV_PIX_FMT_BGRA
packed BGRA 8:8:8:8, 32bpp, BGRABGRA...
int64_t duration
Duration of this packet in AVStream->time_base units, 0 if unknown.
int attribute_align_arg sws_scale(struct SwsContext *c, const uint8_t *const srcSlice[], const int srcStride[], int srcSliceY, int srcSliceH, uint8_t *const dst[], const int dstStride[])
swscale wrapper, so we don't need to export the SwsContext.
static double compute_target_delay(double delay, VideoState *is)
static void stream_close(VideoState *is)
int av_read_frame(AVFormatContext *s, AVPacket *pkt)
Return the next frame of a stream.
int av_fifo_generic_read(AVFifoBuffer *f, void *dest, int buf_size, void(*func)(void *, void *, int))
Feed data from an AVFifoBuffer to a user-supplied callback.
#define SDL_AUDIO_MAX_CALLBACKS_PER_SEC
static void init_clock(Clock *c, int *queue_serial)
enum AVMediaType codec_type
#define AV_OPT_FLAG_FILTERING_PARAM
a generic parameter which can be set by the user for filtering
static int opt_seek(void *optctx, const char *opt, const char *arg)
int64_t avio_size(AVIOContext *s)
Get the filesize.
size_t av_strlcatf(char *dst, size_t size, const char *fmt,...)
static double get_master_clock(VideoState *is)
static const AVInputFormat * file_iformat
static void update_video_pts(VideoState *is, double pts, int64_t pos, int serial)
void av_packet_free(AVPacket **pkt)
Free the packet, if the packet is reference counted, it will be unreferenced first.
static int subtitle_thread(void *arg)
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
void avfilter_graph_free(AVFilterGraph **graph)
Free a graph, destroy its links, and set *graph to NULL.
static int audio_decode_frame(VideoState *is)
Decode one audio frame and return its uncompressed size.
static int subtitle_disable
struct SwrContext * swr_ctx
static int opt_sync(void *optctx, const char *opt, const char *arg)
int av_find_best_stream(AVFormatContext *ic, enum AVMediaType type, int wanted_stream_nb, int related_stream, const AVCodec **decoder_ret, int flags)
Find the "best" stream in the file.
static void step_to_next_frame(VideoState *is)
enum AVPixelFormat format
static void video_display(VideoState *is)
uint8_t max_lowres
maximum value for lowres supported by the decoder
void avformat_close_input(AVFormatContext **s)
Close an opened input AVFormatContext.
double parse_number_or_die(const char *context, const char *numstr, int type, double min, double max)
Parse a string and return its corresponding value as a double.
@ AVCOL_SPC_BT470BG
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601
AVIOInterruptCB interrupt_callback
Custom interrupt callbacks for the I/O layer.
#define SDL_AUDIO_MIN_BUFFER_SIZE
void print_error(const char *filename, int err)
Print an error message to stderr, indicating filename and a human readable description of the error c...
static int startup_volume
static SDL_Window * window
int avfilter_graph_create_filter(AVFilterContext **filt_ctx, const AVFilter *filt, const char *name, const char *args, void *opaque, AVFilterGraph *graph_ctx)
Create and add a filter instance into an existing graph.
static void toggle_full_screen(VideoState *is)
static int packet_queue_init(PacketQueue *q)
#define AUDIO_DIFF_AVG_NB
AVFilterGraph * avfilter_graph_alloc(void)
Allocate a filter graph.
static int opt_duration(void *optctx, const char *opt, const char *arg)
static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
int x
top left corner of pict, undefined when pict is not set
static av_always_inline int64_t avio_tell(AVIOContext *s)
ftell() equivalent for AVIOContext.
#define AVERROR_OPTION_NOT_FOUND
Option not found.
#define AV_BPRINT_SIZE_AUTOMATIC
static void video_image_display(VideoState *is)
static double val(void *priv, double ch)
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
static int opt_show_mode(void *optctx, const char *opt, const char *arg)
SDL_cond * empty_queue_cond
static void set_clock_speed(Clock *c, double speed)
double audio_diff_threshold
const AVClass * sws_get_class(void)
Get the AVClass for swsContext.
@ AV_PKT_DATA_DISPLAYMATRIX
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
int av_opt_set(void *obj, const char *name, const char *val, int search_flags)
#define ss(width, name, subs,...)
int avformat_network_init(void)
Do global initialization of network libraries.
static int opt_height(void *optctx, const char *opt, const char *arg)
void avsubtitle_free(AVSubtitle *sub)
Free all allocated data in the given subtitle struct.
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
static const struct TextureFormatEntry sdl_texture_format_map[]
int64_t bit_rate
Total stream bitrate in bit/s, 0 if not available.
static int is_full_screen
void avfilter_inout_free(AVFilterInOut **inout)
Free the supplied list of AVFilterInOut and set *inout to NULL.
static void set_sdl_yuv_conversion_mode(AVFrame *frame)
AVRational av_guess_sample_aspect_ratio(AVFormatContext *format, AVStream *stream, AVFrame *frame)
Guess the sample aspect ratio of a frame, based on both the stream and the frame aspect ratio.
#define AV_LOG_TRACE
Extremely verbose debugging, useful for libav* development.
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
int av_fifo_space(const AVFifoBuffer *f)
Return the amount of space in bytes in the AVFifoBuffer, that is the amount of data you can write int...
AVDictionary * metadata
Metadata that applies to the whole file.
#define FF_ARRAY_ELEMS(a)
static int audio_thread(void *arg)
static void set_clock(Clock *c, double pts, int serial)
void av_dump_format(AVFormatContext *ic, int index, const char *url, int is_output)
Print detailed information about the input or output format, such as duration, bitrate,...
static Frame * frame_queue_peek_next(FrameQueue *f)
static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
static void sync_clock_to_slave(Clock *c, Clock *slave)
av_cold int swr_init(struct SwrContext *s)
Initialize context after user parameters have been set.
static void opt_input_file(void *optctx, const char *filename)
AVRational av_buffersink_get_frame_rate(const AVFilterContext *ctx)
int avformat_open_input(AVFormatContext **ps, const char *url, const AVInputFormat *fmt, AVDictionary **options)
Open an input stream and read the header.
static void frame_queue_signal(FrameQueue *f)
AVDictionaryEntry * av_dict_get(const AVDictionary *m, const char *key, const AVDictionaryEntry *prev, int flags)
Get a dictionary entry with matching key.
struct SwsContext * img_convert_ctx
AVCodecContext * avcodec_alloc_context3(const AVCodec *codec)
Allocate an AVCodecContext and set its fields to default values.
#define AV_OPT_FLAG_ENCODING_PARAM
a generic parameter which can be set by the user for muxing or encoding
void show_help_default(const char *opt, const char *arg)
Per-fftool specific help handler.
#define AV_CEIL_RSHIFT(a, b)
static int default_height
int flags
Flags modifying the (de)muxer behaviour.
AVRational sample_aspect_ratio
Video only.
int channels
number of audio channels, only used for audio.
const struct AVInputFormat * iformat
The input container format.
#define AV_PIX_FMT_0BGR32
@ AVCOL_SPC_SMPTE170M
also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC / functionally identical to above
int y
top left corner of pict, undefined when pict is not set
static void calculate_display_rect(SDL_Rect *rect, int scr_xleft, int scr_ytop, int scr_width, int scr_height, int pic_width, int pic_height, AVRational pic_sar)
static double av_q2d(AVRational a)
Convert an AVRational to a double.
#define EXTERNAL_CLOCK_SPEED_STEP
#define AV_CH_LAYOUT_STEREO_DOWNMIX
static int opt_frame_pix_fmt(void *optctx, const char *opt, const char *arg)
int avcodec_receive_frame(AVCodecContext *avctx, AVFrame *frame)
Return decoded output data from a decoder.
AVRational av_buffersink_get_time_base(const AVFilterContext *ctx)
static const AVFilterPad outputs[]
static enum AVPixelFormat pix_fmts[]
int av_read_play(AVFormatContext *s)
Start playing a network-based stream (e.g.
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
static int opt_codec(void *optctx, const char *opt, const char *arg)
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
static double get_clock(Clock *c)
#define EXTERNAL_CLOCK_SPEED_MIN
static unsigned sws_flags
void parse_options(void *optctx, int argc, char **argv, const OptionDef *options, void(*parse_arg_function)(void *, const char *))
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
const char * av_get_sample_fmt_name(enum AVSampleFormat sample_fmt)
Return the name of sample_fmt, or NULL if sample_fmt is not recognized.
static SDL_Renderer * renderer
int av_usleep(unsigned usec)
Sleep for a period of time.
The libswresample context.
static double vp_duration(VideoState *is, Frame *vp, Frame *nextvp)
struct SwrContext * swr_alloc_set_opts(struct SwrContext *s, int64_t out_ch_layout, enum AVSampleFormat out_sample_fmt, int out_sample_rate, int64_t in_ch_layout, enum AVSampleFormat in_sample_fmt, int in_sample_rate, int log_offset, void *log_ctx)
Allocate SwrContext if needed and set/reset common parameters.
#define AV_PIX_FMT_BGR32_1
void av_rdft_calc(RDFTContext *s, FFTSample *data)
static int synchronize_audio(VideoState *is, int nb_samples)
static const char * window_title
@ AVDISCARD_ALL
discard all
int av_log_get_level(void)
Get the current log level.
const AVFilter * avfilter_get_by_name(const char *name)
Get a filter definition matching the given name.
void init_dynload(void)
Initialize dynamic library loading.
AVCodecParameters * codecpar
Codec parameters associated with this stream.
int w
width of pict, undefined when pict is not set
static void seek_chapter(VideoState *is, int incr)
static int get_master_sync_type(VideoState *is)
const AVClass * avcodec_get_class(void)
Get the AVClass for AVCodecContext.
static __device__ float fabs(float a)
int avfilter_graph_config(AVFilterGraph *graphctx, void *log_ctx)
Check validity and configure all the links and formats in the graph.
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented.
const AVCodec * avcodec_find_decoder_by_name(const char *name)
Find a registered decoder with the specified name.
@ AV_PIX_FMT_YUYV422
packed YUV 4:2:2, 16bpp, Y0 Cb Y1 Cr
void avcodec_free_context(AVCodecContext **avctx)
Free the codec context and everything associated with it and write NULL to the provided pointer.
Rational number (pair of numerator and denominator).
static void stream_cycle_channel(VideoState *is, int codec_type)
#define AV_CODEC_FLAG2_FAST
Allow non spec compliant speedup tricks.
AVIOContext * pb
I/O context.
void av_log_set_flags(int arg)
static void frame_queue_unref_item(Frame *vp)
Frame queue[FRAME_QUEUE_SIZE]
static int64_t cursor_last_shown
unsigned int * stream_index
static Frame * frame_queue_peek(FrameQueue *f)
AVFilterInOut * avfilter_inout_alloc(void)
Allocate a single AVFilterInOut entry.
#define AV_DICT_DONT_OVERWRITE
Don't overwrite existing entries.
@ AV_PIX_FMT_RGB8
packed RGB 3:3:2, 8bpp, (msb)2R 3G 3B(lsb)
int attribute_align_arg avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options)
Initialize the AVCodecContext to use the given AVCodec.
double frame_last_returned_time
static void set_clock_at(Clock *c, double pts, int serial, double time)
static void toggle_pause(VideoState *is)
static int stream_component_open(VideoState *is, int stream_index)
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
uint64_t av_buffersink_get_channel_layout(const AVFilterContext *ctx)
int av_packet_ref(AVPacket *dst, const AVPacket *src)
Setup a new reference to the data described by a given packet.
void av_packet_move_ref(AVPacket *dst, AVPacket *src)
Move every field in src to dst and reset src.
#define AV_PIX_FMT_NE(be, le)
static void event_loop(VideoState *cur_stream)
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
int sample_rate
Audio only.
int av_get_channel_layout_nb_channels(uint64_t channel_layout)
Return the number of channels in the channel layout.
int av_opt_set_int(void *obj, const char *name, int64_t val, int search_flags)
const AVCodec * avcodec_find_decoder(enum AVCodecID id)
Find a registered decoder with a matching codec ID.
int16_t sample_array[SAMPLE_ARRAY_SIZE]
unsigned int nb_streams
Number of elements in AVFormatContext.streams.
static int exit_on_mousedown
const AVInputFormat * iformat
int lowres
low resolution decoding, 1-> 1/2 size, 2->1/4 size
AVDictionary * codec_opts
static int64_t audio_callback_time
int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options)
Read packets of a media file to get stream information.
static int64_t get_valid_channel_layout(int64_t channel_layout, int channels)
int flags2
AV_CODEC_FLAG2_*.
enum AVPictureType pict_type
Picture type of the frame.
@ AV_PIX_FMT_RGB24
packed RGB 8:8:8, 24bpp, RGBRGB...
void av_log_set_callback(void(*callback)(void *, int, const char *, va_list))
Set the logging callback.
int avformat_match_stream_specifier(AVFormatContext *s, AVStream *st, const char *spec)
Check if the stream st contained in s is matched by the stream specifier spec.
RDFTContext * av_rdft_init(int nbits, enum RDFTransformType trans)
Set up a real FFT.
AVFormatContext * avformat_alloc_context(void)
Allocate an AVFormatContext.
@ AVDISCARD_DEFAULT
discard useless packets like 0 size packets in avi
static Frame * frame_queue_peek_writable(FrameQueue *f)
int sample_rate
Sample rate of the audio data.
static int decoder_init(Decoder *d, AVCodecContext *avctx, PacketQueue *queue, SDL_cond *empty_queue_cond)
int avfilter_link(AVFilterContext *src, unsigned srcpad, AVFilterContext *dst, unsigned dstpad)
Link two filters together.
static int64_t start_time
enum AVSampleFormat sample_fmt
audio sample format
AVRational pkt_timebase
Timebase in which pkt_dts/pts and AVPacket.dts/pts are.
static av_const double hypot(double x, double y)
void uninit_opts(void)
Uninitialize the cmdutils option system, in particular free the *_opts contexts and their contents.
static AVRational av_make_q(int num, int den)
Create an AVRational.
static int read_thread(void *arg)
#define AV_PIX_FMT_BGR555
int avformat_seek_file(AVFormatContext *s, int stream_index, int64_t min_ts, int64_t ts, int64_t max_ts, int flags)
Seek to timestamp ts.
#define AV_NOPTS_VALUE
Undefined timestamp value.
SDL_Texture * sub_texture
AVRational av_guess_frame_rate(AVFormatContext *ctx, AVStream *stream, AVFrame *frame)
Guess the frame rate, based on both the container and codec information.
av_cold void swr_free(SwrContext **ss)
Free the given SwrContext and set the pointer to NULL.
SDL_Texture * vid_texture
int attribute_align_arg swr_convert(struct SwrContext *s, uint8_t **out_arg, int out_count, const uint8_t **in_arg, int in_count)
Convert audio.
AVDictionary ** setup_find_stream_info_opts(AVFormatContext *s, AVDictionary *codec_opts)
Setup AVCodecContext options for avformat_find_stream_info().
printf("static const uint8_t my_array[100] = {\n")
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
static const char * wanted_stream_spec[AVMEDIA_TYPE_NB]
static int infinite_buffer
double max_frame_duration
#define AV_OPT_SEARCH_CHILDREN
Search in possible children of the given object first.
static void packet_queue_destroy(PacketQueue *q)
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed.
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
uint64_t channel_layout
Channel layout of the audio data.
static void toggle_mute(VideoState *is)
static void decoder_abort(Decoder *d, FrameQueue *fq)
static void video_refresh(void *opaque, double *remaining_time)
#define ns(max_value, name, subs,...)
static float seek_interval
AVPacket * av_packet_alloc(void)
Allocate an AVPacket and set its fields to default values.
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values.
static void frame_queue_push(FrameQueue *f)
static SDL_AudioDeviceID audio_dev
static void sigterm_handler(int sig)
#define AV_LOG_INFO
Standard information.
static void packet_queue_abort(PacketQueue *q)
static const char * video_codec_name
int channels
number of audio channels
void show_banner(int argc, char **argv, const OptionDef *options)
Print the program banner to stderr.
static const AVInputFormat * iformat
static void packet_queue_flush(PacketQueue *q)
static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block, int *serial)
char av_get_picture_type_char(enum AVPictureType pict_type)
Return a single letter to describe the given picture type pict_type.
const char * avcodec_get_name(enum AVCodecID id)
Get the name of a codec.
int queue_attachments_req
int avcodec_send_packet(AVCodecContext *avctx, const AVPacket *avpkt)
Supply raw packet data as input to a decoder.
int nb_samples
number of audio samples (per channel) described by this frame
#define VIDEO_PICTURE_QUEUE_SIZE
#define i(width, name, range_min, range_max)
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
enum VideoState::ShowMode show_mode
struct AudioParams audio_src
const int program_birth_year
program birth year, defined by the program for show_banner()
int av_get_bytes_per_sample(enum AVSampleFormat sample_fmt)
Return number of bytes per sample.
int avfilter_graph_parse_ptr(AVFilterGraph *graph, const char *filters, AVFilterInOut **inputs, AVFilterInOut **outputs, void *log_ctx)
Add a graph described by a string to a graph.
static int compute_mod(int a, int b)
int avcodec_parameters_to_context(AVCodecContext *codec, const AVCodecParameters *par)
Fill the codec context based on the values from the supplied codec parameters.
#define AV_TIME_BASE
Internal time base represented as integer.
uint8_t ** extended_data
pointers to the data planes/channels.
#define av_malloc_array(a, b)
static int video_open(VideoState *is)
static void get_sdl_pix_fmt_and_blendmode(int format, Uint32 *sdl_pix_fmt, SDL_BlendMode *sdl_blendmode)
static int opt_format(void *optctx, const char *opt, const char *arg)
AVSampleFormat
Audio sample formats.
AVDictionary * filter_codec_opts(AVDictionary *opts, enum AVCodecID codec_id, AVFormatContext *s, AVStream *st, const AVCodec *codec)
Filter out options for given codec.
#define AV_PIX_FMT_RGB555
#define AV_OPT_FLAG_DECODING_PARAM
a generic parameter which can be set by the user for demuxing or decoding
void av_frame_move_ref(AVFrame *dst, AVFrame *src)
Move everything contained in src to dst and reset src.
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
static void update_sample_display(VideoState *is, short *samples, int samples_size)
@ AV_SAMPLE_FMT_S16
signed 16 bits
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
New fields can be added to the end with minor version bumps.
int av_read_pause(AVFormatContext *s)
Pause a network-based stream (e.g.
#define AV_PIX_FMT_BGR565
int av_samples_get_buffer_size(int *linesize, int nb_channels, int nb_samples, enum AVSampleFormat sample_fmt, int align)
Get the required buffer size for the given audio parameters.
const char * name
Pad name.
int64_t av_rescale(int64_t a, int64_t b, int64_t c)
Rescale a 64-bit integer with rounding to nearest.
static Frame * frame_queue_peek_readable(FrameQueue *f)
#define AV_PIX_FMT_RGB565
void parse_loglevel(int argc, char **argv, const OptionDef *options)
Find the '-loglevel' option in the command line args and apply it.
static int av_cmp_q(AVRational a, AVRational b)
Compare two rationals.
@ AV_PIX_FMT_PAL8
8 bits with AV_PIX_FMT_RGB32 palette
int disposition
Stream disposition - a combination of AV_DISPOSITION_* flags.
#define AV_LOG_FATAL
Something went wrong and recovery is not possible.
static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
#define EXTERNAL_CLOCK_MAX_FRAMES
int h
height of pict, undefined when pict is not set
static VideoState * stream_open(const char *filename, const AVInputFormat *iformat)
#define FFSWAP(type, a, b)
void avcodec_flush_buffers(AVCodecContext *avctx)
Reset the internal codec state / flush internal buffers.
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
#define AV_PIX_FMT_0RGB32
static AVStream * video_stream
static int audio_open(void *opaque, int64_t wanted_channel_layout, int wanted_nb_channels, int wanted_sample_rate, struct AudioParams *audio_hw_params)
static int filter_nbthreads
void log_callback_help(void *ptr, int level, const char *fmt, va_list vl)
Trivial log callback.
static int find_stream_info
static int packet_queue_put_private(PacketQueue *q, AVPacket *pkt)
uint8_t * av_stream_get_side_data(const AVStream *stream, enum AVPacketSideDataType type, size_t *size)
Get side information from stream.
void av_bprintf(AVBPrint *buf, const char *fmt,...)
AVRational sample_aspect_ratio
Sample aspect ratio for the video frame, 0/1 if unknown/unspecified.
#define AV_LOG_SKIP_REPEATED
Skip repeated messages, this requires the user app to use av_log() instead of (f)printf as the 2 woul...
@ AV_PIX_FMT_UYVY422
packed YUV 4:2:2, 16bpp, Cb Y0 Cr Y1
#define AV_DICT_MATCH_CASE
Only get an entry with exact-case key match.
static int opt_width(void *optctx, const char *opt, const char *arg)
int main(int argc, char **argv)
int avformat_network_deinit(void)
Undo the initialization done by avformat_network_init.
static void show_usage(void)
const char * av_get_media_type_string(enum AVMediaType media_type)
Return a string describing the media_type enum, NULL if media_type is unknown.
main external API structure.
#define CMDUTILS_COMMON_OPTIONS
static void packet_queue_start(PacketQueue *q)
static const char * audio_codec_name
int av_buffersink_get_sample_rate(const AVFilterContext *ctx)
#define AV_SYNC_FRAMEDUP_THRESHOLD
static enum ShowMode show_mode
void sws_freeContext(struct SwsContext *swsContext)
Free the swscaler context swsContext.
static const OptionDef options[]
static void fill_rectangle(int x, int y, int w, int h)
unsigned int audio_buf1_size
#define AV_SYNC_THRESHOLD_MAX
int av_buffersink_get_channels(const AVFilterContext *ctx)
static void decoder_destroy(Decoder *d)
enum AVSampleFormat av_get_packed_sample_fmt(enum AVSampleFormat sample_fmt)
Get the packed alternative form of the given sample format.
int eof_reached
true if was unable to read due to error or eof
static int stream_has_enough_packets(AVStream *st, int stream_id, PacketQueue *queue)
Filter the word “frame” indicates either a video frame or a group of audio samples
const AVInputFormat * av_find_input_format(const char *short_name)
Find AVInputFormat based on the short name of the input format.
int64_t duration
Duration of the stream, in AV_TIME_BASE fractional seconds.
AVRational av_mul_q(AVRational b, AVRational c)
Multiply two rationals.
#define GROW_ARRAY(array, nb_elems)
#define SUBPICTURE_QUEUE_SIZE
static const char * input_filename
static void stream_toggle_pause(VideoState *is)
SDL_cond * continue_read_thread
int av_dict_set_int(AVDictionary **pm, const char *key, int64_t value, int flags)
Convenience wrapper for av_dict_set that converts the value to a string and stores it.
static void toggle_audio_display(VideoState *is)
enum AVMediaType codec_type
int opt_default(void *optctx, const char *opt, const char *arg)
Fallback for options that are not explicitly handled, these will be parsed through AVOptions.
char * av_strdup(const char *s)
Duplicate a string.
static int get_video_frame(VideoState *is, AVFrame *frame)
int64_t av_get_default_channel_layout(int nb_channels)
Return default channel layout for a given number of channels.
int av_fifo_size(const AVFifoBuffer *f)
Return the amount of data in bytes in the AVFifoBuffer, that is the amount of data you can read from ...
struct SwsContext * sub_convert_ctx
static av_always_inline int diff(const uint32_t a, const uint32_t b)
void av_fifo_freep(AVFifoBuffer **f)
Free an AVFifoBuffer and reset pointer to NULL.
This structure stores compressed data.
static void refresh_loop_wait_event(VideoState *is, SDL_Event *event)
static const uint16_t channel_layouts[7]
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
static void stream_component_close(VideoState *is, int stream_index)
static int cmp_audio_fmts(enum AVSampleFormat fmt1, int64_t channel_count1, enum AVSampleFormat fmt2, int64_t channel_count2)
AVFifoBuffer * av_fifo_alloc(unsigned int size)
Initialize an AVFifoBuffer.
int64_t parse_time_or_die(const char *context, const char *timestr, int is_duration)
Parse a string specifying a time and return its corresponding value as a number of microseconds.
unsigned int audio_buf_size
#define flags(name, subs,...)
void av_rdft_end(RDFTContext *s)
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
The exact code depends on how similar the blocks are and how related they are to the block
int64_t start_time
Position of the first frame of the component, in AV_TIME_BASE fractional seconds.
int64_t start_time
Decoding: pts of the first frame of the stream in presentation order, in stream time base.
double get_rotation(int32_t *displaymatrix)
static Frame * frame_queue_peek_last(FrameQueue *f)
void show_help_options(const OptionDef *options, const char *msg, int req_flags, int rej_flags, int alt_flags)
Print help for all options matching specified flags.
static int decoder_reorder_pts
@ AVCOL_SPC_BT709
also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / derived in SMPTE RP 177 Annex B
struct AudioParams audio_tgt
AVRational time_base
time base in which the start/end timestamps are specified
static AVStream * audio_stream
const AVClass * avfilter_get_class(void)
A linked-list of the inputs/outputs of the filter chain.
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option keep it simple and lowercase description are short
static void video_audio_display(VideoState *s)
#define AV_SYNC_THRESHOLD_MIN
static void check_external_clock_speed(VideoState *is)
uint32_t start_display_time
#define SAMPLE_CORRECTION_PERCENT_MAX
#define EXTERNAL_CLOCK_SPEED_MAX
static int packet_queue_put_nullpacket(PacketQueue *q, AVPacket *pkt, int stream_index)
static int is_realtime(AVFormatContext *s)
static void * av_x_if_null(const void *p, const void *x)
Return x default pointer in case p is NULL.
static int decode_interrupt_cb(void *ctx)
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
void avdevice_register_all(void)
Initialize libavdevice and register all the input and output devices.
int avio_feof(AVIOContext *s)
Similar to feof() but also returns nonzero on read errors.
static int realloc_texture(SDL_Texture **texture, Uint32 new_format, int new_width, int new_height, SDL_BlendMode blendmode, int init_texture)
#define AV_PIX_FMT_RGB444
static int exit_on_keydown