33 for (i=0; i<n/2; i++) {
34 ((uint16_t *)dst)[i] =
AV_RB16(src+2*i);
43 int buf_size = avpkt->
size;
46 int i, j,
n, linesize,
h, upgrade = 0, is_mono = 0;
48 int components, sample_len, ret;
100 n = avctx->
width * 2;
105 n = avctx->
width * 2;
112 n = avctx->
width * 4;
120 n = (avctx->
width + 7) >> 3;
129 if(s->
type < 4 || (is_mono && s->
type==7)){
130 for (i=0; i<avctx->
height; i++) {
133 for(j=0; j<avctx->
width * components; j++){
151 if (sample_len == 16) {
152 ((uint16_t*)ptr)[j] = (((1<<sample_len)-1)*v + (s->
maxval>>1))/s->
maxval;
156 if (sample_len != 16)
161 for (i = 0; i < avctx->
height; i++) {
164 else if (upgrade == 1) {
165 unsigned int j, f = (255 * 128 + s->
maxval / 2) / s->
maxval;
166 for (j = 0; j <
n; j++)
168 }
else if (upgrade == 2) {
169 unsigned int j, v, f = (65535 * 32768 + s->
maxval / 2) / s->
maxval;
170 for (j = 0; j < n / 2; j++) {
172 ((uint16_t *)ptr)[j] = (v * f + 16384) >> 15;
184 unsigned char *ptr1, *ptr2;
193 for (i = 0; i < avctx->
height; i++) {
202 for (i = 0; i <
h; i++) {
214 uint16_t *ptr1, *ptr2;
215 const int f = (65535 * 32768 + s->
maxval / 2) / s->
maxval;
218 n = avctx->
width * 2;
223 for (i = 0; i < avctx->
height; i++) {
224 for (j = 0; j < n / 2; j++) {
226 ((uint16_t *)ptr)[j] = (v * f + 16384) >> 15;
231 ptr1 = (uint16_t*)p->
data[1];
232 ptr2 = (uint16_t*)p->
data[2];
235 for (i = 0; i <
h; i++) {
236 for (j = 0; j < n / 2; j++) {
238 ptr1[j] = (v * f + 16384) >> 15;
242 for (j = 0; j < n / 2; j++) {
244 ptr2[j] = (v * f + 16384) >> 15;
260 #if CONFIG_PGM_DECODER
272 #if CONFIG_PGMYUV_DECODER
284 #if CONFIG_PPM_DECODER
296 #if CONFIG_PBM_DECODER
308 #if CONFIG_PAM_DECODER
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
This structure describes decoded (raw) audio or video data.
ptrdiff_t const GLvoid * data
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
packed RGB 8:8:8, 24bpp, RGBRGB...
#define AV_PIX_FMT_RGBA64
int maxval
maximum value of a pixel
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_WB32 unsigned int_TMPL AV_WB24 unsigned int_TMPL AV_RB16
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
const char * name
Name of the codec implementation.
packed RGBA 8:8:8:8, 32bpp, RGBARGBA...
enum AVPictureType pict_type
Picture type of the frame.
#define AV_PIX_FMT_GRAY16
int width
picture width / height.
#define AV_PIX_FMT_YUV420P16
Libavcodec external API header.
int ff_pnm_decode_header(AVCodecContext *avctx, PNMContext *const s)
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
main external API structure.
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
#define AV_PIX_FMT_YUV420P10
#define AV_PIX_FMT_YUV420P9
static void samplecpy(uint8_t *dst, const uint8_t *src, int n, int maxval)
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
static int pnm_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
static int decode(AVCodecContext *avctx, void *data, int *got_sub, AVPacket *avpkt)
Y , 1bpp, 0 is black, 1 is white, in each byte pixels are ordered from the msb to the lsb...
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
common internal api header.
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
Y , 1bpp, 0 is white, 1 is black, in each byte pixels are ordered from the msb to the lsb...
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
uint8_t * bytestream_start
int key_frame
1 -> keyframe, 0-> not
This structure stores compressed data.
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.