Go to the documentation of this file.
33 #define WIN32_LEAN_AND_MEAN
37 #include <OpenGL/gl3.h>
44 #if HAVE_GLXGETPROCADDRESS
75 #define FF_GL_RED_COMPONENT GL_RED
76 #elif defined(GL_LUMINANCE)
77 #define FF_GL_RED_COMPONENT GL_LUMINANCE
79 #define FF_GL_RED_COMPONENT 0x1903; //GL_RED
83 #define FF_GL_UNSIGNED_BYTE_3_3_2 0x8032
84 #define FF_GL_UNSIGNED_BYTE_2_3_3_REV 0x8362
85 #define FF_GL_UNSIGNED_SHORT_1_5_5_5_REV 0x8366
86 #define FF_GL_UNPACK_ROW_LENGTH 0x0CF2
89 #define FF_GL_ARRAY_BUFFER 0x8892
90 #define FF_GL_ELEMENT_ARRAY_BUFFER 0x8893
91 #define FF_GL_STATIC_DRAW 0x88E4
92 #define FF_GL_FRAGMENT_SHADER 0x8B30
93 #define FF_GL_VERTEX_SHADER 0x8B31
94 #define FF_GL_COMPILE_STATUS 0x8B81
95 #define FF_GL_LINK_STATUS 0x8B82
96 #define FF_GL_INFO_LOG_LENGTH 0x8B84
151 #define OPENGL_ERROR_CHECK(ctx) \
154 if ((err_code = glGetError()) != GL_NO_ERROR) { \
155 av_log(ctx, AV_LOG_ERROR, "OpenGL error occurred in '%s', line %d: %d\n", __FUNCTION__, __LINE__, err_code); \
178 SDL_GLContext glcontext;
352 while (SDL_PeepEvents(&event, 1, SDL_GETEVENT, SDL_FIRSTEVENT, SDL_LASTEVENT) > 0) {
353 switch (event.type) {
357 switch (event.key.keysym.sym) {
363 case SDL_WINDOWEVENT:
364 switch(event.window.event) {
365 case SDL_WINDOWEVENT_RESIZED:
366 case SDL_WINDOWEVENT_SIZE_CHANGED:
367 SDL_GL_GetDrawableSize(opengl->window, &
message.width, &
message.height);
381 if (SDL_Init(SDL_INIT_VIDEO)) {
386 SDL_WINDOWPOS_UNDEFINED,
387 SDL_WINDOWPOS_UNDEFINED,
389 SDL_WINDOW_RESIZABLE | SDL_WINDOW_OPENGL);
390 if (!opengl->window) {
391 av_log(opengl,
AV_LOG_ERROR,
"Unable to create default window: %s\n", SDL_GetError());
394 opengl->glcontext = SDL_GL_CreateContext(opengl->window);
395 if (!opengl->glcontext) {
396 av_log(opengl,
AV_LOG_ERROR,
"Unable to create OpenGL context on default window: %s\n", SDL_GetError());
399 SDL_GL_SetAttribute(SDL_GL_RED_SIZE, 8);
400 SDL_GL_SetAttribute(SDL_GL_GREEN_SIZE, 8);
401 SDL_GL_SetAttribute(SDL_GL_BLUE_SIZE, 8);
402 SDL_GL_SetAttribute(SDL_GL_ALPHA_SIZE, 8);
403 SDL_GL_SetAttribute(SDL_GL_DOUBLEBUFFER, 1);
405 SDL_GL_GetDrawableSize(opengl->window, &
message.width, &
message.height);
413 #define LOAD_OPENGL_FUN(name, type) \
414 procs->name = (type)SDL_GL_GetProcAddress(#name); \
415 if (!procs->name) { \
416 av_log(opengl, AV_LOG_ERROR, "Cannot load OpenGL function: '%s'\n", #name); \
417 return AVERROR(ENOSYS); \
448 #undef LOAD_OPENGL_FUN
452 #if defined(__APPLE__)
459 return opengl_sdl_load_procedures(opengl);
494 #if HAVE_GLXGETPROCADDRESS
495 #define SelectedGetProcAddress glXGetProcAddress
496 #elif HAVE_WGLGETPROCADDRESS
497 #define SelectedGetProcAddress wglGetProcAddress
500 #define LOAD_OPENGL_FUN(name, type) \
501 procs->name = (type)SelectedGetProcAddress(#name); \
502 if (!procs->name) { \
503 av_log(opengl, AV_LOG_ERROR, "Cannot load OpenGL function: '%s'\n", #name); \
504 return AVERROR(ENOSYS); \
509 return opengl_sdl_load_procedures(opengl);
540 #undef SelectedGetProcAddress
541 #undef LOAD_OPENGL_FUN
547 memset(matrix, 0, 16 *
sizeof(
float));
548 matrix[0] = matrix[5] = matrix[10] = matrix[15] = 1.0f;
552 float bottom,
float top,
float nearZ,
float farZ)
554 float ral = right +
left;
555 float rsl = right -
left;
556 float tab = top + bottom;
557 float tsb = top - bottom;
558 float fan = farZ + nearZ;
559 float fsn = farZ - nearZ;
561 memset(matrix, 0, 16 *
sizeof(
float));
562 matrix[0] = 2.0f / rsl;
563 matrix[5] = 2.0f / tsb;
564 matrix[10] = -2.0f / fsn;
565 matrix[12] = -ral / rsl;
566 matrix[13] = -
tab / tsb;
567 matrix[14] = -fan / fsn;
575 const char *extension;
578 } required_extensions[] = {
579 {
"GL_ARB_multitexture", 1, 3 },
580 {
"GL_ARB_vertex_buffer_object", 1, 5 },
581 {
"GL_ARB_vertex_shader", 2, 0 },
582 {
"GL_ARB_fragment_shader", 2, 0 },
583 {
"GL_ARB_shader_objects", 2, 0 },
587 const char *extensions, *
version;
589 version = glGetString(GL_VERSION);
590 extensions = glGetString(GL_EXTENSIONS);
599 for (
i = 0; required_extensions[
i].extension;
i++) {
602 !strstr(extensions, required_extensions[
i].extension)) {
604 required_extensions[
i].extension);
612 #if defined(GL_ES_VERSION_2_0)
613 opengl->
unpack_subimage = !!strstr(extensions,
"GL_EXT_unpack_subimage");
643 case GL_UNSIGNED_SHORT:
645 case GL_UNSIGNED_SHORT_5_6_5:
647 case GL_UNSIGNED_BYTE:
693 int *out_width,
int *out_height)
696 *out_width = in_width;
697 *out_height = in_height;
700 unsigned power_of_2 = 1;
701 while (power_of_2 <
max)
703 *out_height = power_of_2;
704 *out_width = power_of_2;
706 in_width, in_height, *out_width, *out_height);
727 #define FILL_COMPONENT(i) { \
728 shift = (desc->comp[i].depth - 1) >> 3; \
729 opengl->color_map[(i << 2) + (desc->comp[i].offset >> shift)] = 1.0; \
739 #undef FILL_COMPONENT
778 if (!fragment_shader_code) {
791 fragment_shader_code);
846 int new_width, new_height;
848 glBindTexture(GL_TEXTURE_2D, texture);
849 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
850 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
851 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
852 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
853 glTexImage2D(GL_TEXTURE_2D, 0, opengl->
format, new_width, new_height, 0,
913 for (
i = 0;
i < 4;
i++)
939 if ((
ret = opengl_sdl_create_window(
h)) < 0) {
944 av_log(opengl,
AV_LOG_ERROR,
"FFmpeg is compiled without SDL. Cannot create default window.\n");
971 SDL_GL_DeleteContext(opengl->glcontext);
972 SDL_DestroyWindow(opengl->window);
1016 if (
desc->nb_components > 1) {
1018 int num_planes =
desc->nb_components - (has_alpha ? 1 : 0);
1026 for (
i = 1;
i < num_planes;
i++)
1042 glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
1045 (
float)opengl->
background[2] / 255.0f, 1.0f);
1062 if (
h->nb_streams != 1 ||
1065 av_log(opengl,
AV_LOG_ERROR,
"Only a single raw or wrapped avframe video stream is supported.\n");
1105 glClear(GL_COLOR_BUFFER_BIT);
1109 SDL_GL_SwapWindow(opengl->window);
1135 int plane =
desc->comp[comp_index].plane;
1145 data += width_chroma * height_chroma * wordsize;
1149 data += 2 * width_chroma * height_chroma * wordsize;
1157 #define LOAD_TEXTURE_DATA(comp_index, sub) \
1159 int width = sub ? AV_CEIL_RSHIFT(opengl->width, desc->log2_chroma_w) : opengl->width; \
1160 int height = sub ? AV_CEIL_RSHIFT(opengl->height, desc->log2_chroma_h): opengl->height; \
1162 int plane = desc->comp[comp_index].plane; \
1164 glBindTexture(GL_TEXTURE_2D, opengl->texture_name[comp_index]); \
1166 GLint length = ((AVFrame *)input)->linesize[plane]; \
1167 int bytes_per_pixel = opengl_type_size(opengl->type); \
1168 if (!(desc->flags & AV_PIX_FMT_FLAG_PLANAR)) \
1169 bytes_per_pixel *= desc->nb_components; \
1170 data = ((AVFrame *)input)->data[plane]; \
1171 if (!(length % bytes_per_pixel) && \
1172 (opengl->unpack_subimage || ((length / bytes_per_pixel) == width))) { \
1173 length /= bytes_per_pixel; \
1174 if (length != width) \
1175 glPixelStorei(FF_GL_UNPACK_ROW_LENGTH, length); \
1176 glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, width, height, \
1177 opengl->format, opengl->type, data); \
1178 if (length != width) \
1179 glPixelStorei(FF_GL_UNPACK_ROW_LENGTH, 0); \
1182 for (h = 0; h < height; h++) { \
1183 glTexSubImage2D(GL_TEXTURE_2D, 0, 0, h, width, 1, \
1184 opengl->format, opengl->type, data); \
1189 data = opengl_get_plane_pointer(opengl, input, comp_index, desc); \
1190 glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, width, height, \
1191 opengl->format, opengl->type, data); \
1203 if (!opengl->
no_window && (
ret = opengl_sdl_process_events(
h)) < 0)
1212 glClear(GL_COLOR_BUFFER_BIT);
1216 glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
1245 SDL_GL_SwapWindow(opengl->window);
1277 #define OFFSET(x) offsetof(OpenGLContext, x)
1278 #define ENC AV_OPT_FLAG_ENCODING_PARAM
static void usage(const char *program_name)
#define AV_PIX_FMT_YUVA422P16
GLuint vertex_buffer
Vertex buffer.
#define AV_PIX_FMT_GBRAP16
#define AV_LOG_WARNING
Something somehow does not look correct.
static const char *const FF_OPENGL_FRAGMENT_SHADER_RGBA_PACKET
Fragment shader for packet RGBA formats.
AVPixelFormat
Pixel format.
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option name
void(APIENTRY * FF_PFNGLSHADERSOURCEPROC)(GLuint shader, GLsizei count, const char **string, const GLint *length)
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C program
static const AVOption options[]
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
GLuint(APIENTRY * FF_PFNGLCREATESHADERPROC)(GLenum type)
enum AVMediaType codec_type
General type of the encoded data.
This struct describes the properties of an encoded stream.
GLint chroma_div_h_location
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
FF_PFNGLGETPROGRAMIVPROC glGetProgramiv
GLint texture_location[4]
static av_cold int opengl_init_context(OpenGLContext *opengl)
static av_cold int end(AVCodecContext *avctx)
@ AV_DEV_TO_APP_PREPARE_WINDOW_BUFFER
Prepare window buffer message.
This structure describes decoded (raw) audio or video data.
static int opengl_control_message(AVFormatContext *h, int type, void *data, size_t data_size)
#define AV_PIX_FMT_YUVA420P16
static const char * opengl_get_fragment_shader_code(enum AVPixelFormat format)
GLint max_viewport_width
Maximum viewport size.
int avdevice_dev_to_app_control_message(struct AVFormatContext *s, enum AVDevToAppMessageType type, void *data, size_t data_size)
Send control message from device to application.
static const char *const FF_OPENGL_VERTEX_SHADER
static const GLushort g_index[6]
@ AV_PIX_FMT_BGR24
packed RGB 8:8:8, 24bpp, BGRBGR...
@ AV_PIX_FMT_BGRA
packed BGRA 8:8:8:8, 32bpp, BGRABGRA...
GLuint vertex_shader
Vertex shader.
int no_window
0 for create default window
@ AV_PIX_FMT_YUV440P
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
void(APIENTRY * FF_PFNGLDELETEBUFFERSPROC)(GLsizei n, const GLuint *buffers)
void(APIENTRY * FF_PFNGLATTACHSHADERPROC)(GLuint program, GLuint shader)
GLfloat color_map[16]
RGBA color map matrix.
static av_cold GLuint opengl_load_shader(OpenGLContext *opengl, GLenum type, const char *source)
@ AV_APP_TO_DEV_WINDOW_REPAINT
Repaint request message.
static const char *const FF_OPENGL_FRAGMENT_SHADER_GRAY
void(APIENTRY * FF_PFNGLCOMPILESHADERPROC)(GLuint shader)
void(APIENTRY * FF_PFNGLGETSHADERIVPROC)(GLuint shader, GLenum pname, GLint *params)
#define FF_GL_UNSIGNED_SHORT_1_5_5_5_REV
GLfloat chroma_div_h
Chroma subsampling h ratio.
#define FF_GL_UNSIGNED_BYTE_2_3_3_REV
static SDL_Window * window
@ AV_PIX_FMT_GBRAP
planar GBRA 4:4:4:4 32bpp
void(APIENTRY * FF_PFNGLUNIFORM1IPROC)(GLint location, GLint v0)
FF_PFNGLCREATEPROGRAMPROC glCreateProgram
#define AV_PIX_FMT_YUVA444P16
static void opengl_compute_display_area(AVFormatContext *s)
static const struct twinvq_data tab
GLuint texture_name[4]
Textures' IDs.
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
#define AV_PIX_FMT_GRAY16
static int opengl_write_frame(AVFormatContext *h, int stream_index, AVFrame **frame, unsigned flags)
static av_cold int opengl_configure_texture(OpenGLContext *opengl, GLuint texture, GLsizei width, GLsizei height)
FF_PFNGLUNIFORMMATRIX4FVPROC glUniformMatrix4fv
void(APIENTRY * FF_PFNGLGETSHADERINFOLOGPROC)(GLuint shader, GLsizei bufSize, GLsizei *length, char *infoLog)
GLuint fragment_shader
Fragment shader for current pix_pmt.
FF_PFNGLCREATESHADERPROC glCreateShader
@ AV_PIX_FMT_BGR8
packed RGB 3:3:2, 8bpp, (msb)2B 3G 3R(lsb)
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
GLint texture_coords_attrib
static int opengl_prepare(OpenGLContext *opengl)
static av_cold void opengl_get_texture_size(OpenGLContext *opengl, int in_width, int in_height, int *out_width, int *out_height)
#define AV_PIX_FMT_YUV422P16
int picture_height
Rendered height.
static void opengl_make_ortho(float matrix[16], float left, float right, float bottom, float top, float nearZ, float farZ)
@ AV_DEV_TO_APP_DISPLAY_WINDOW_BUFFER
Display window buffer message.
GLint position_attrib
Attibutes' locations.
@ AV_PIX_FMT_YUVA420P
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
#define LOAD_OPENGL_FUN(name, type)
#define AV_PIX_FMT_YUV444P16
#define AV_CEIL_RSHIFT(a, b)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample format(the sample packing is implied by the sample format) and sample rate. The lists are not just lists
int unpack_subimage
1 when GL_EXT_unpack_subimage is available
FF_PFNGLUNIFORM1IPROC glUniform1i
void(APIENTRY * FF_PFNGLBINDBUFFERPROC)(GLenum target, GLuint buffer)
@ AV_DEV_TO_APP_CREATE_WINDOW_BUFFER
Create window buffer message.
#define av_assert0(cond)
assert() equivalent, that is always enabled.
static int opengl_write_packet(AVFormatContext *h, AVPacket *pkt)
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
#define AV_PIX_FMT_YUV420P16
static void opengl_make_identity(float matrix[16])
#define AV_PIX_FMT_FLAG_ALPHA
The pixel format has an alpha channel.
static enum AVPixelFormat pix_fmt
static const char *const FF_OPENGL_FRAGMENT_SHADER_RGB_PLANAR
Fragment shader for planar RGB formats.
void(APIENTRY * FF_PFNGLBUFFERDATAPROC)(GLenum target, ptrdiff_t size, const GLvoid *data, GLenum usage)
#define FF_GL_FRAGMENT_SHADER
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
@ AV_CODEC_ID_WRAPPED_AVFRAME
Passthrough codec, AVFrames wrapped in AVPacket.
@ AV_PIX_FMT_RGBA
packed RGBA 8:8:8:8, 32bpp, RGBARGBA...
static const struct OpenGLFormatDesc opengl_format_desc[]
void(APIENTRY * FF_PFNGLUNIFORM1FPROC)(GLint location, GLfloat v0)
static const char * window_title
GLint max_viewport_height
Maximum viewport size.
#define AV_PIX_FMT_GBRP16
AVCodecParameters * codecpar
Codec parameters associated with this stream.
char * window_title
Title of the window.
#define AV_PIX_FMT_RGBA64
#define LIBAVUTIL_VERSION_INT
Describe the class of an AVClass context structure.
and forward the result(frame or status change) to the corresponding input. If nothing is possible
void(APIENTRY * FF_PFNGLVERTEXATTRIBPOINTERPROC)(GLuint index, GLint size, GLenum type, GLboolean normalized, GLsizei stride, uintptr_t pointer)
FF_PFNGLGETATTRIBLOCATIONPROC glGetAttribLocation
void(APIENTRY * FF_PFNGLENABLEVERTEXATTRIBARRAYPROC)(GLuint index)
static av_cold int opengl_read_limits(AVFormatContext *h)
static int write_trailer(AVFormatContext *s1)
Rational number (pair of numerator and denominator).
#define FF_GL_VERTEX_SHADER
GLuint program
Shader program.
@ AV_OPT_TYPE_IMAGE_SIZE
offset must point to two consecutive integers
const char * av_default_item_name(void *ptr)
Return the context name.
static const char *const FF_OPENGL_FRAGMENT_SHADER_YUV_PLANAR
Fragment shader for planar YUV formats.
@ AV_CLASS_CATEGORY_DEVICE_VIDEO_OUTPUT
static const char *const FF_OPENGL_FRAGMENT_SHADER_RGBA_PLANAR
Fragment shader for planar RGBA formats.
enum AVPixelFormat pix_fmt
Stream pixel format.
FF_PFNGLGETSHADERINFOLOGPROC glGetShaderInfoLog
@ AV_PIX_FMT_RGB8
packed RGB 3:3:2, 8bpp, (msb)2R 3G 3B(lsb)
@ AV_PIX_FMT_BGR0
packed BGR 8:8:8, 32bpp, BGRXBGRX... X=unused/undefined
static av_cold void opengl_fill_color_map(OpenGLContext *opengl)
void(APIENTRY * FF_PFNGLDELETESHADERPROC)(GLuint shader)
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
@ AV_PIX_FMT_ABGR
packed ABGR 8:8:8:8, 32bpp, ABGRABGR...
void(APIENTRY * FF_PFNGLGETPROGRAMIVPROC)(GLuint program, GLenum pname, GLint *params)
static av_cold int opengl_compile_shaders(OpenGLContext *opengl, enum AVPixelFormat pix_fmt)
#define OPENGL_ERROR_CHECK(ctx)
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a source
static const char *const FF_OPENGL_FRAGMENT_SHADER_RGB_PACKET
Fragment shader for packet RGB formats.
static av_cold void opengl_deinit_context(OpenGLContext *opengl)
FF_PFNGLUNIFORM1FPROC glUniform1f
@ AV_PIX_FMT_RGB24
packed RGB 8:8:8, 24bpp, RGBRGB...
static av_cold int opengl_prepare_vertex(AVFormatContext *s)
static int av_cold opengl_load_procedures(OpenGLContext *opengl)
static int opengl_draw(AVFormatContext *h, void *intput, int repaint, int is_pkt)
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
#define FF_GL_COMPILE_STATUS
#define AV_PIX_FMT_FLAG_RGB
The pixel format contains RGB-like data (as opposed to YUV/grayscale).
FF_PFNGLCOMPILESHADERPROC glCompileShader
FFOpenGLFunctions glprocs
#define FF_GL_STATIC_DRAW
#define FF_GL_ELEMENT_ARRAY_BUFFER
void(APIENTRY * FF_PFNGLUSEPROGRAMPROC)(GLuint program)
#define AV_PIX_FMT_BGR555
static uint8_t * opengl_get_plane_pointer(OpenGLContext *opengl, AVPacket *pkt, int comp_index, const AVPixFmtDescriptor *desc)
GLfloat chroma_div_w
Chroma subsampling w ratio.
GLint projection_matrix_location
Uniforms' locations.
FF_PFNGLLINKPROGRAMPROC glLinkProgram
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown)
int picture_width
Rendered width.
@ AV_PIX_FMT_YUVA444P
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
#define AVERROR_EXTERNAL
Generic error in an external library.
GLuint index_buffer
Index buffer.
@ AV_PIX_FMT_RGB0
packed RGB 8:8:8, 32bpp, RGBXRGBX... X=unused/undefined
FF_PFNGLDELETEPROGRAMPROC glDeleteProgram
#define FF_GL_LINK_STATUS
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some input
#define AV_LOG_INFO
Standard information.
@ AV_PIX_FMT_ARGB
packed ARGB 8:8:8:8, 32bpp, ARGBARGB...
static void write_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost, int unqueue)
void(APIENTRY * FF_PFNGLGETPROGRAMINFOLOGPROC)(GLuint program, GLsizei bufSize, GLsizei *length, char *infoLog)
FF_PFNGLDELETESHADERPROC glDeleteShader
static int opengl_release_window(AVFormatContext *h)
#define AV_PIX_FMT_BGRA64
#define i(width, name, range_min, range_max)
static int opengl_create_window(AVFormatContext *h)
uint8_t background[4]
Background color.
static const AVClass opengl_class
static av_cold void opengl_get_texture_params(OpenGLContext *opengl)
#define LOAD_TEXTURE_DATA(comp_index, sub)
#define AV_PIX_FMT_RGB555
int inited
Set to 1 when write_header was successfully called.
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default value
FF_PFNGLGENBUFFERSPROC glGenBuffers
#define FF_GL_ARRAY_BUFFER
#define AV_PIX_FMT_BGR565
int64_t av_rescale(int64_t a, int64_t b, int64_t c)
Rescale a 64-bit integer with rounding to nearest.
FF_PFNGLATTACHSHADERPROC glAttachShader
GLint model_view_matrix_location
#define AV_PIX_FMT_RGB565
FF_PFNGLBINDBUFFERPROC glBindBuffer
static int av_cmp_q(AVRational a, AVRational b)
Compare two rationals.
@ AV_APP_TO_DEV_WINDOW_SIZE
Window size change message.
void(APIENTRY * FF_PFNGLACTIVETEXTUREPROC)(GLenum texture)
FF_PFNGLGETUNIFORMLOCATIONPROC glGetUniformLocation
@ AV_PIX_FMT_0BGR
packed BGR 8:8:8, 32bpp, XBGRXBGR... X=unused/undefined
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
FF_PFNGLDELETEBUFFERSPROC glDeleteBuffers
static int opengl_resize(AVFormatContext *h, int width, int height)
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
#define FF_ARRAY_ELEMS(a)
FF_PFNGLACTIVETEXTUREPROC glActiveTexture
FF_PFNGLGETPROGRAMINFOLOGPROC glGetProgramInfoLog
FF_PFNGLBUFFERDATAPROC glBufferData
FF_PFNGLUSEPROGRAMPROC glUseProgram
the frame and frame reference mechanism is intended to as much as expensive copies of that data while still allowing the filters to produce correct results The data is stored in buffers represented by AVFrame structures Several references can point to the same frame buffer
#define FF_GL_RED_COMPONENT
GLfloat model_view_matrix[16]
Modev view matrix.
#define AV_PIX_FMT_FLAG_PLANAR
At least one pixel component is not in the first data plane.
AVRational av_mul_q(AVRational b, AVRational c)
Multiply two rationals.
static int opengl_type_size(GLenum type)
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
#define FF_GL_UNSIGNED_BYTE_3_3_2
#define FF_GL_INFO_LOG_LENGTH
GLint(APIENTRY * FF_PFNGLGETUNIFORMLOCATIONPROC)(GLuint program, const char *name)
static int shift(int a, int b)
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
char * av_strdup(const char *s)
Duplicate a string.
static av_cold int opengl_write_trailer(AVFormatContext *h)
void(APIENTRY * FF_PFNGLLINKPROGRAMPROC)(GLuint program)
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
void(APIENTRY * FF_PFNGLUNIFORMMATRIX4FVPROC)(GLint location, GLsizei count, GLboolean transpose, const GLfloat *value)
GLuint(APIENTRY * FF_PFNGLCREATEPROGRAMPROC)(void)
static av_cold int opengl_write_header(AVFormatContext *h)
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
FF_PFNGLGETSHADERIVPROC glGetShaderiv
static const char *const FF_OPENGL_FRAGMENT_SHADER_YUVA_PLANAR
Fragment shader for planar YUVA formats.
enum AVCodecID codec_id
Specific type of the encoded data (the codec used).
This structure stores compressed data.
GLint chroma_div_w_location
void(APIENTRY * FF_PFNGLGENBUFFERSPROC)(GLsizei n, GLuint *buffers)
GLfloat projection_matrix[16]
Projection matrix.
#define FILL_COMPONENT(i)
@ AV_PIX_FMT_YUV411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
#define flags(name, subs,...)
FF_PFNGLVERTEXATTRIBPOINTERPROC glVertexAttribPointer
@ AV_PIX_FMT_0RGB
packed RGB 8:8:8, 32bpp, XRGBXRGB... X=unused/undefined
@ AV_PIX_FMT_YUV410P
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
FF_PFNGLENABLEVERTEXATTRIBARRAYPROC glEnableVertexAttribArray
OpenGLVertexInfo vertex[4]
VBO.
GLint max_texture_size
Maximum texture size.
@ AV_DEV_TO_APP_DESTROY_WINDOW_BUFFER
Destroy window buffer message.
FF_PFNGLSHADERSOURCEPROC glShaderSource
static void write_header(FFV1Context *f)
AVOutputFormat ff_opengl_muxer
GLint(APIENTRY * FF_PFNGLGETATTRIBLOCATIONPROC)(GLuint program, const char *name)
@ AV_PIX_FMT_YUVA422P
planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples)
void(APIENTRY * FF_PFNGLDELETEPROGRAMPROC)(GLuint program)
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
int non_pow_2_textures
1 when non power of 2 textures are supported