Go to the documentation of this file.
32 #define CACHED_BITSTREAM_READER !ARCH_X86_32
80 uint8_t *ptr, *ptr1, *ptr2;
84 ptr = picture->
data[0];
85 ptr1 = picture->
data[1];
86 ptr2 = picture->
data[2];
90 for (
int y = 0; y <
height; y += 2) {
92 for (
int x = 0; x <
width - 1; x++) {
93 fill = bytestream2_get_byte(gb);
95 *(dst++) = (fill + bytestream2_peek_byte(gb) + 1) >> 1;
97 fill = bytestream2_get_byte(gb);
104 for (
int x = 0; x <
width - 1; x++) {
105 fill = bytestream2_get_byte(gb);
107 *(dst++) = (fill + bytestream2_peek_byte(gb) + 1) >> 1;
109 fill = bytestream2_get_byte(gb);
116 for (
int x = 0; x < (
width >> 1) - 1; x++) {
117 fill = bytestream2_get_byte(gb);
119 *(dst++) = (fill + bytestream2_peek_byte(gb) + 1) >> 1;
121 fill = bytestream2_get_byte(gb);
128 for (
int x = 0; x < (
width >> 1) - 1; x++) {
129 fill = bytestream2_get_byte(gb);
131 *(dst++) = (fill + bytestream2_peek_byte(gb) + 1) >> 1;
133 fill = bytestream2_get_byte(gb);
150 for (
int y = 0; y <
height - 2; y += 2) {
151 const uint8_t *
src1 = ptr;
152 uint8_t *dst = ptr + linesize;
153 const uint8_t *
src2 = dst + linesize;
154 for (x = 0; x <
width - 2; x += 2) {
155 dst[x] = (
src1[x] +
src2[x] + 1) >> 1;
158 dst[x] = dst[x + 1] = (
src1[x] +
src2[x] + 1) >> 1;
160 ptr += linesize << 1;
164 dst = ptr + linesize;
165 for (x = 0; x <
width - 2; x += 2) {
167 dst[x + 1] = (
src1[x] +
src1[x + 2] + 1) >> 1;
169 dst[x] = dst[x + 1] =
src1[x];
175 for (
int y =
height - 2; y >= 0; y -= 2) {
176 const uint8_t *
src = ptr + (y >> 1) * linesize;
177 uint8_t *dst = ptr + y * linesize;
180 for (
int x =
width - 4; x >= 0; x -= 2) {
181 dst[x] =
src[x >> 1];
182 dst[x + 1] = (
src[x >> 1] +
src[(x >> 1) + 1] + 1) >> 1;
191 int start =
s->streampos;
196 count = bytestream2_get_byte(gb) + 1;
200 for (
int j = 0; j < count; j++) {
201 const int bit = bytestream2_get_byteu(gb) + 1;
202 const int code = bytestream2_get_be16u(gb);
203 const int sym = bytestream2_get_byteu(gb);
215 s->bits,
sizeof(*
s->bits),
sizeof(*
s->bits),
216 s->codes,
sizeof(*
s->codes),
sizeof(*
s->codes),
217 s->syms,
sizeof(*
s->syms),
sizeof(*
s->syms), 0);
225 int target_res,
int curr_res)
231 int start =
s->streampos;
233 const int scaling = target_res - curr_res;
234 const uint8_t type2idx[] = { 0, 0xff, 1, 2 };
254 while (shiftreg != 0xfffffe) {
269 idx = type2idx[
type];
273 x2 = avctx->
width >> (scaling + !!idx);
274 for (
int x = 0; x < x2; x++) {
279 m =
get_vlc2(&
g,
s->vlc[idx].table,
s->vlc[idx].bits, 2);
288 s->streampos = (
s->streampos + 0x6000 + 2047) & ~0x7ff;
297 const uint8_t *buf = avpkt->
data;
299 uint8_t *ptr, *ptr1, *ptr2;
305 if (!memcmp(
"PCD_OPA", buf, 7)) {
308 "reading first thumbnail only\n");
309 }
else if (avpkt->
size < 786432) {
311 }
else if (memcmp(
"PCD_IPI", buf + 0x800, 7)) {
315 s->orientation =
s->thumbnails ? buf[12] & 3 : buf[0x48] & 3;
319 else if (avpkt->
size <= 788480)
322 s->resolution =
av_clip(4 -
s->lowres, 0, 4);
339 if (
s->resolution < 3) {
349 for (
int y = 0; y < avctx->
height; y += 2) {
373 if (
s->resolution == 4) {
382 s->streampos = 0xc2000;
383 for (
int n = 0; n < 3; n++) {
387 s->streampos = (
s->streampos + 2047) & ~0x3ff;
391 if (
s->resolution == 4) {
395 for (
int n = 0; n < 3; n++) {
399 s->streampos = (
s->streampos + 2047) & ~0x3ff;
409 for (
int y = 0; y < avctx->
height >> 1; y++) {
410 for (
int x = 0; x < avctx->
width >> 1; x++) {
440 for (
int i = 0;
i < 3;
i++)
446 #define OFFSET(x) offsetof(PhotoCDContext, x)
447 #define VD AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM
450 {
"lowres",
"Lower the decoding resolution by a power of two",
#define AV_LOG_WARNING
Something somehow does not look correct.
static int get_bits_left(GetBitContext *gb)
enum AVColorSpace colorspace
YUV colorspace type.
static av_cold int photocd_decode_close(AVCodecContext *avctx)
static int photocd_decode_frame(AVCodecContext *avctx, AVFrame *p, int *got_frame, AVPacket *avpkt)
static int get_bits_count(const GetBitContext *s)
static av_always_inline int bytestream2_seek(GetByteContext *g, int offset, int whence)
This structure describes decoded (raw) audio or video data.
enum AVColorTransferCharacteristic color_trc
Color Transfer Characteristic.
@ AVCOL_RANGE_JPEG
Full range content.
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
static av_noinline int decode_huff(AVCodecContext *avctx, AVFrame *frame, int target_res, int curr_res)
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
#define bit(string, value)
static void skip_bits(GetBitContext *s, int n)
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
AVCodec p
The public AVCodec.
@ AVCOL_TRC_IEC61966_2_1
IEC 61966-2-1 (sRGB or sYCC)
enum AVDiscard skip_frame
Skip decoding for selected frames.
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have so the codec calls ff_thread_report set FF_CODEC_CAP_ALLOCATE_PROGRESS in AVCodec caps_internal and use ff_thread_get_buffer() to allocate frames. The frames must then be freed with ff_thread_release_buffer(). Otherwise decode directly into the user-supplied frames. Call ff_thread_report_progress() after some part of the current picture has decoded. A good place to put this is where draw_horiz_band() is called - add this if it isn 't called anywhere
int key_frame
1 -> keyframe, 0-> not
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
static av_noinline void interp_pixels(uint8_t *ptr, int linesize, int width, int height)
enum AVColorPrimaries color_primaries
Chromaticity coordinates of the source primaries.
static av_noinline void interp_lines(uint8_t *ptr, int linesize, int width, int height)
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
#define FF_CODEC_DECODE_CB(func)
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_RL16
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
#define CODEC_LONG_NAME(str)
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
@ AVDISCARD_ALL
discard all
#define LIBAVUTIL_VERSION_INT
Describe the class of an AVClass context structure.
enum AVColorRange color_range
MPEG vs JPEG YUV range.
static av_noinline int read_hufftable(AVCodecContext *avctx, VLC *vlc)
@ AVCOL_PRI_BT709
also ITU-R BT1361 / IEC 61966-2-4 / SMPTE RP 177 Annex B
const char * av_default_item_name(void *ptr)
Return the context name.
@ AV_PICTURE_TYPE_I
Intra.
static av_noinline void interp_lowres(PhotoCDContext *s, AVFrame *picture, int width, int height)
static av_always_inline unsigned int bytestream2_get_buffer(GetByteContext *g, uint8_t *dst, unsigned int size)
static av_always_inline int get_vlc2(GetBitContext *s, const VLCElem *table, int bits, int max_depth)
Parse a vlc code.
static const ImageInfo img_info[6]
static av_always_inline int bytestream2_get_bytes_left(GetByteContext *g)
static av_always_inline int bytestream2_tell(GetByteContext *g)
enum AVPictureType pict_type
Picture type of the frame.
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
static const AVClass photocd_class
#define FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM
The decoder extracts and fills its parameters even if the frame is skipped due to the skip_frame sett...
#define i(width, name, range_min, range_max)
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some it can consider them to be part of the FIFO and delay acknowledging a status change accordingly Example code
static unsigned int show_bits(GetBitContext *s, int n)
Show 1-25 bits.
int ff_init_vlc_sparse(VLC *vlc, int nb_bits, int nb_codes, const void *bits, int bits_wrap, int bits_size, const void *codes, int codes_wrap, int codes_size, const void *symbols, int symbols_wrap, int symbols_size, int flags)
const char * name
Name of the codec implementation.
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
void ff_free_vlc(VLC *vlc)
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
static const AVOption options[]
main external API structure.
static av_cold int photocd_decode_init(AVCodecContext *avctx)
static av_const int sign_extend(int val, unsigned bits)
This structure stores compressed data.
const FFCodec ff_photocd_decoder
int width
picture width / height.
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
@ AVCOL_SPC_BT709
also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / derived in SMPTE RP 177 Annex B