FFmpeg
me_cmp.c
Go to the documentation of this file.
1 /*
2  * DSP utils
3  * Copyright (c) 2000, 2001 Fabrice Bellard
4  * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5  *
6  * This file is part of FFmpeg.
7  *
8  * FFmpeg is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; either
11  * version 2.1 of the License, or (at your option) any later version.
12  *
13  * FFmpeg is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with FFmpeg; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21  */
22 
23 #include <stddef.h>
24 
25 #include "libavutil/attributes.h"
26 #include "libavutil/internal.h"
27 #include "libavutil/mem_internal.h"
28 #include "avcodec.h"
29 #include "copy_block.h"
30 #include "simple_idct.h"
31 #include "me_cmp.h"
32 #include "mpegvideoenc.h"
33 #include "config.h"
34 #include "config_components.h"
35 
36 /* (i - 256) * (i - 256) */
37 const uint32_t ff_square_tab[512] = {
38  65536, 65025, 64516, 64009, 63504, 63001, 62500, 62001, 61504, 61009, 60516, 60025, 59536, 59049, 58564, 58081,
39  57600, 57121, 56644, 56169, 55696, 55225, 54756, 54289, 53824, 53361, 52900, 52441, 51984, 51529, 51076, 50625,
40  50176, 49729, 49284, 48841, 48400, 47961, 47524, 47089, 46656, 46225, 45796, 45369, 44944, 44521, 44100, 43681,
41  43264, 42849, 42436, 42025, 41616, 41209, 40804, 40401, 40000, 39601, 39204, 38809, 38416, 38025, 37636, 37249,
42  36864, 36481, 36100, 35721, 35344, 34969, 34596, 34225, 33856, 33489, 33124, 32761, 32400, 32041, 31684, 31329,
43  30976, 30625, 30276, 29929, 29584, 29241, 28900, 28561, 28224, 27889, 27556, 27225, 26896, 26569, 26244, 25921,
44  25600, 25281, 24964, 24649, 24336, 24025, 23716, 23409, 23104, 22801, 22500, 22201, 21904, 21609, 21316, 21025,
45  20736, 20449, 20164, 19881, 19600, 19321, 19044, 18769, 18496, 18225, 17956, 17689, 17424, 17161, 16900, 16641,
46  16384, 16129, 15876, 15625, 15376, 15129, 14884, 14641, 14400, 14161, 13924, 13689, 13456, 13225, 12996, 12769,
47  12544, 12321, 12100, 11881, 11664, 11449, 11236, 11025, 10816, 10609, 10404, 10201, 10000, 9801, 9604, 9409,
48  9216, 9025, 8836, 8649, 8464, 8281, 8100, 7921, 7744, 7569, 7396, 7225, 7056, 6889, 6724, 6561,
49  6400, 6241, 6084, 5929, 5776, 5625, 5476, 5329, 5184, 5041, 4900, 4761, 4624, 4489, 4356, 4225,
50  4096, 3969, 3844, 3721, 3600, 3481, 3364, 3249, 3136, 3025, 2916, 2809, 2704, 2601, 2500, 2401,
51  2304, 2209, 2116, 2025, 1936, 1849, 1764, 1681, 1600, 1521, 1444, 1369, 1296, 1225, 1156, 1089,
52  1024, 961, 900, 841, 784, 729, 676, 625, 576, 529, 484, 441, 400, 361, 324, 289,
53  256, 225, 196, 169, 144, 121, 100, 81, 64, 49, 36, 25, 16, 9, 4, 1,
54  0, 1, 4, 9, 16, 25, 36, 49, 64, 81, 100, 121, 144, 169, 196, 225,
55  256, 289, 324, 361, 400, 441, 484, 529, 576, 625, 676, 729, 784, 841, 900, 961,
56  1024, 1089, 1156, 1225, 1296, 1369, 1444, 1521, 1600, 1681, 1764, 1849, 1936, 2025, 2116, 2209,
57  2304, 2401, 2500, 2601, 2704, 2809, 2916, 3025, 3136, 3249, 3364, 3481, 3600, 3721, 3844, 3969,
58  4096, 4225, 4356, 4489, 4624, 4761, 4900, 5041, 5184, 5329, 5476, 5625, 5776, 5929, 6084, 6241,
59  6400, 6561, 6724, 6889, 7056, 7225, 7396, 7569, 7744, 7921, 8100, 8281, 8464, 8649, 8836, 9025,
60  9216, 9409, 9604, 9801, 10000, 10201, 10404, 10609, 10816, 11025, 11236, 11449, 11664, 11881, 12100, 12321,
61  12544, 12769, 12996, 13225, 13456, 13689, 13924, 14161, 14400, 14641, 14884, 15129, 15376, 15625, 15876, 16129,
62  16384, 16641, 16900, 17161, 17424, 17689, 17956, 18225, 18496, 18769, 19044, 19321, 19600, 19881, 20164, 20449,
63  20736, 21025, 21316, 21609, 21904, 22201, 22500, 22801, 23104, 23409, 23716, 24025, 24336, 24649, 24964, 25281,
64  25600, 25921, 26244, 26569, 26896, 27225, 27556, 27889, 28224, 28561, 28900, 29241, 29584, 29929, 30276, 30625,
65  30976, 31329, 31684, 32041, 32400, 32761, 33124, 33489, 33856, 34225, 34596, 34969, 35344, 35721, 36100, 36481,
66  36864, 37249, 37636, 38025, 38416, 38809, 39204, 39601, 40000, 40401, 40804, 41209, 41616, 42025, 42436, 42849,
67  43264, 43681, 44100, 44521, 44944, 45369, 45796, 46225, 46656, 47089, 47524, 47961, 48400, 48841, 49284, 49729,
68  50176, 50625, 51076, 51529, 51984, 52441, 52900, 53361, 53824, 54289, 54756, 55225, 55696, 56169, 56644, 57121,
69  57600, 58081, 58564, 59049, 59536, 60025, 60516, 61009, 61504, 62001, 62500, 63001, 63504, 64009, 64516, 65025,
70 };
71 
72 static int sse4_c(MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2,
73  ptrdiff_t stride, int h)
74 {
75  int s = 0, i;
76  const uint32_t *sq = ff_square_tab + 256;
77 
78  for (i = 0; i < h; i++) {
79  s += sq[pix1[0] - pix2[0]];
80  s += sq[pix1[1] - pix2[1]];
81  s += sq[pix1[2] - pix2[2]];
82  s += sq[pix1[3] - pix2[3]];
83  pix1 += stride;
84  pix2 += stride;
85  }
86  return s;
87 }
88 
89 static int sse8_c(MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2,
90  ptrdiff_t stride, int h)
91 {
92  int s = 0, i;
93  const uint32_t *sq = ff_square_tab + 256;
94 
95  for (i = 0; i < h; i++) {
96  s += sq[pix1[0] - pix2[0]];
97  s += sq[pix1[1] - pix2[1]];
98  s += sq[pix1[2] - pix2[2]];
99  s += sq[pix1[3] - pix2[3]];
100  s += sq[pix1[4] - pix2[4]];
101  s += sq[pix1[5] - pix2[5]];
102  s += sq[pix1[6] - pix2[6]];
103  s += sq[pix1[7] - pix2[7]];
104  pix1 += stride;
105  pix2 += stride;
106  }
107  return s;
108 }
109 
110 static int sse16_c(MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2,
111  ptrdiff_t stride, int h)
112 {
113  int s = 0, i;
114  const uint32_t *sq = ff_square_tab + 256;
115 
116  for (i = 0; i < h; i++) {
117  s += sq[pix1[0] - pix2[0]];
118  s += sq[pix1[1] - pix2[1]];
119  s += sq[pix1[2] - pix2[2]];
120  s += sq[pix1[3] - pix2[3]];
121  s += sq[pix1[4] - pix2[4]];
122  s += sq[pix1[5] - pix2[5]];
123  s += sq[pix1[6] - pix2[6]];
124  s += sq[pix1[7] - pix2[7]];
125  s += sq[pix1[8] - pix2[8]];
126  s += sq[pix1[9] - pix2[9]];
127  s += sq[pix1[10] - pix2[10]];
128  s += sq[pix1[11] - pix2[11]];
129  s += sq[pix1[12] - pix2[12]];
130  s += sq[pix1[13] - pix2[13]];
131  s += sq[pix1[14] - pix2[14]];
132  s += sq[pix1[15] - pix2[15]];
133 
134  pix1 += stride;
135  pix2 += stride;
136  }
137  return s;
138 }
139 
140 static int sum_abs_dctelem_c(const int16_t *block)
141 {
142  int sum = 0, i;
143 
144  for (i = 0; i < 64; i++)
145  sum += FFABS(block[i]);
146  return sum;
147 }
148 
149 #define avg2(a, b) (((a) + (b) + 1) >> 1)
150 #define avg4(a, b, c, d) (((a) + (b) + (c) + (d) + 2) >> 2)
151 
152 static inline int pix_abs16_c(MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2,
153  ptrdiff_t stride, int h)
154 {
155  int s = 0, i;
156 
157  for (i = 0; i < h; i++) {
158  s += abs(pix1[0] - pix2[0]);
159  s += abs(pix1[1] - pix2[1]);
160  s += abs(pix1[2] - pix2[2]);
161  s += abs(pix1[3] - pix2[3]);
162  s += abs(pix1[4] - pix2[4]);
163  s += abs(pix1[5] - pix2[5]);
164  s += abs(pix1[6] - pix2[6]);
165  s += abs(pix1[7] - pix2[7]);
166  s += abs(pix1[8] - pix2[8]);
167  s += abs(pix1[9] - pix2[9]);
168  s += abs(pix1[10] - pix2[10]);
169  s += abs(pix1[11] - pix2[11]);
170  s += abs(pix1[12] - pix2[12]);
171  s += abs(pix1[13] - pix2[13]);
172  s += abs(pix1[14] - pix2[14]);
173  s += abs(pix1[15] - pix2[15]);
174  pix1 += stride;
175  pix2 += stride;
176  }
177  return s;
178 }
179 
180 static inline int pix_median_abs16_c(MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2,
181  ptrdiff_t stride, int h)
182 {
183  int s = 0, i, j;
184 
185 #define V(x) (pix1[x] - pix2[x])
186 
187  s += abs(V(0));
188  s += abs(V(1) - V(0));
189  s += abs(V(2) - V(1));
190  s += abs(V(3) - V(2));
191  s += abs(V(4) - V(3));
192  s += abs(V(5) - V(4));
193  s += abs(V(6) - V(5));
194  s += abs(V(7) - V(6));
195  s += abs(V(8) - V(7));
196  s += abs(V(9) - V(8));
197  s += abs(V(10) - V(9));
198  s += abs(V(11) - V(10));
199  s += abs(V(12) - V(11));
200  s += abs(V(13) - V(12));
201  s += abs(V(14) - V(13));
202  s += abs(V(15) - V(14));
203 
204  pix1 += stride;
205  pix2 += stride;
206 
207  for (i = 1; i < h; i++) {
208  s += abs(V(0) - V(-stride));
209  for (j = 1; j < 16; j++)
210  s += abs(V(j) - mid_pred(V(j-stride), V(j-1), V(j-stride) + V(j-1) - V(j-stride-1)));
211  pix1 += stride;
212  pix2 += stride;
213 
214  }
215 #undef V
216  return s;
217 }
218 
219 static int pix_abs16_x2_c(MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2,
220  ptrdiff_t stride, int h)
221 {
222  int s = 0, i;
223 
224  for (i = 0; i < h; i++) {
225  s += abs(pix1[0] - avg2(pix2[0], pix2[1]));
226  s += abs(pix1[1] - avg2(pix2[1], pix2[2]));
227  s += abs(pix1[2] - avg2(pix2[2], pix2[3]));
228  s += abs(pix1[3] - avg2(pix2[3], pix2[4]));
229  s += abs(pix1[4] - avg2(pix2[4], pix2[5]));
230  s += abs(pix1[5] - avg2(pix2[5], pix2[6]));
231  s += abs(pix1[6] - avg2(pix2[6], pix2[7]));
232  s += abs(pix1[7] - avg2(pix2[7], pix2[8]));
233  s += abs(pix1[8] - avg2(pix2[8], pix2[9]));
234  s += abs(pix1[9] - avg2(pix2[9], pix2[10]));
235  s += abs(pix1[10] - avg2(pix2[10], pix2[11]));
236  s += abs(pix1[11] - avg2(pix2[11], pix2[12]));
237  s += abs(pix1[12] - avg2(pix2[12], pix2[13]));
238  s += abs(pix1[13] - avg2(pix2[13], pix2[14]));
239  s += abs(pix1[14] - avg2(pix2[14], pix2[15]));
240  s += abs(pix1[15] - avg2(pix2[15], pix2[16]));
241  pix1 += stride;
242  pix2 += stride;
243  }
244  return s;
245 }
246 
247 static int pix_abs16_y2_c(MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2,
248  ptrdiff_t stride, int h)
249 {
250  int s = 0, i;
251  const uint8_t *pix3 = pix2 + stride;
252 
253  for (i = 0; i < h; i++) {
254  s += abs(pix1[0] - avg2(pix2[0], pix3[0]));
255  s += abs(pix1[1] - avg2(pix2[1], pix3[1]));
256  s += abs(pix1[2] - avg2(pix2[2], pix3[2]));
257  s += abs(pix1[3] - avg2(pix2[3], pix3[3]));
258  s += abs(pix1[4] - avg2(pix2[4], pix3[4]));
259  s += abs(pix1[5] - avg2(pix2[5], pix3[5]));
260  s += abs(pix1[6] - avg2(pix2[6], pix3[6]));
261  s += abs(pix1[7] - avg2(pix2[7], pix3[7]));
262  s += abs(pix1[8] - avg2(pix2[8], pix3[8]));
263  s += abs(pix1[9] - avg2(pix2[9], pix3[9]));
264  s += abs(pix1[10] - avg2(pix2[10], pix3[10]));
265  s += abs(pix1[11] - avg2(pix2[11], pix3[11]));
266  s += abs(pix1[12] - avg2(pix2[12], pix3[12]));
267  s += abs(pix1[13] - avg2(pix2[13], pix3[13]));
268  s += abs(pix1[14] - avg2(pix2[14], pix3[14]));
269  s += abs(pix1[15] - avg2(pix2[15], pix3[15]));
270  pix1 += stride;
271  pix2 += stride;
272  pix3 += stride;
273  }
274  return s;
275 }
276 
277 static int pix_abs16_xy2_c(MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2,
278  ptrdiff_t stride, int h)
279 {
280  int s = 0, i;
281  const uint8_t *pix3 = pix2 + stride;
282 
283  for (i = 0; i < h; i++) {
284  s += abs(pix1[0] - avg4(pix2[0], pix2[1], pix3[0], pix3[1]));
285  s += abs(pix1[1] - avg4(pix2[1], pix2[2], pix3[1], pix3[2]));
286  s += abs(pix1[2] - avg4(pix2[2], pix2[3], pix3[2], pix3[3]));
287  s += abs(pix1[3] - avg4(pix2[3], pix2[4], pix3[3], pix3[4]));
288  s += abs(pix1[4] - avg4(pix2[4], pix2[5], pix3[4], pix3[5]));
289  s += abs(pix1[5] - avg4(pix2[5], pix2[6], pix3[5], pix3[6]));
290  s += abs(pix1[6] - avg4(pix2[6], pix2[7], pix3[6], pix3[7]));
291  s += abs(pix1[7] - avg4(pix2[7], pix2[8], pix3[7], pix3[8]));
292  s += abs(pix1[8] - avg4(pix2[8], pix2[9], pix3[8], pix3[9]));
293  s += abs(pix1[9] - avg4(pix2[9], pix2[10], pix3[9], pix3[10]));
294  s += abs(pix1[10] - avg4(pix2[10], pix2[11], pix3[10], pix3[11]));
295  s += abs(pix1[11] - avg4(pix2[11], pix2[12], pix3[11], pix3[12]));
296  s += abs(pix1[12] - avg4(pix2[12], pix2[13], pix3[12], pix3[13]));
297  s += abs(pix1[13] - avg4(pix2[13], pix2[14], pix3[13], pix3[14]));
298  s += abs(pix1[14] - avg4(pix2[14], pix2[15], pix3[14], pix3[15]));
299  s += abs(pix1[15] - avg4(pix2[15], pix2[16], pix3[15], pix3[16]));
300  pix1 += stride;
301  pix2 += stride;
302  pix3 += stride;
303  }
304  return s;
305 }
306 
307 static inline int pix_abs8_c(MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2,
308  ptrdiff_t stride, int h)
309 {
310  int s = 0, i;
311 
312  for (i = 0; i < h; i++) {
313  s += abs(pix1[0] - pix2[0]);
314  s += abs(pix1[1] - pix2[1]);
315  s += abs(pix1[2] - pix2[2]);
316  s += abs(pix1[3] - pix2[3]);
317  s += abs(pix1[4] - pix2[4]);
318  s += abs(pix1[5] - pix2[5]);
319  s += abs(pix1[6] - pix2[6]);
320  s += abs(pix1[7] - pix2[7]);
321  pix1 += stride;
322  pix2 += stride;
323  }
324  return s;
325 }
326 
327 static inline int pix_median_abs8_c(MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2,
328  ptrdiff_t stride, int h)
329 {
330  int s = 0, i, j;
331 
332 #define V(x) (pix1[x] - pix2[x])
333 
334  s += abs(V(0));
335  s += abs(V(1) - V(0));
336  s += abs(V(2) - V(1));
337  s += abs(V(3) - V(2));
338  s += abs(V(4) - V(3));
339  s += abs(V(5) - V(4));
340  s += abs(V(6) - V(5));
341  s += abs(V(7) - V(6));
342 
343  pix1 += stride;
344  pix2 += stride;
345 
346  for (i = 1; i < h; i++) {
347  s += abs(V(0) - V(-stride));
348  for (j = 1; j < 8; j++)
349  s += abs(V(j) - mid_pred(V(j-stride), V(j-1), V(j-stride) + V(j-1) - V(j-stride-1)));
350  pix1 += stride;
351  pix2 += stride;
352 
353  }
354 #undef V
355  return s;
356 }
357 
358 static int pix_abs8_x2_c(MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2,
359  ptrdiff_t stride, int h)
360 {
361  int s = 0, i;
362 
363  for (i = 0; i < h; i++) {
364  s += abs(pix1[0] - avg2(pix2[0], pix2[1]));
365  s += abs(pix1[1] - avg2(pix2[1], pix2[2]));
366  s += abs(pix1[2] - avg2(pix2[2], pix2[3]));
367  s += abs(pix1[3] - avg2(pix2[3], pix2[4]));
368  s += abs(pix1[4] - avg2(pix2[4], pix2[5]));
369  s += abs(pix1[5] - avg2(pix2[5], pix2[6]));
370  s += abs(pix1[6] - avg2(pix2[6], pix2[7]));
371  s += abs(pix1[7] - avg2(pix2[7], pix2[8]));
372  pix1 += stride;
373  pix2 += stride;
374  }
375  return s;
376 }
377 
378 static int pix_abs8_y2_c(MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2,
379  ptrdiff_t stride, int h)
380 {
381  int s = 0, i;
382  const uint8_t *pix3 = pix2 + stride;
383 
384  for (i = 0; i < h; i++) {
385  s += abs(pix1[0] - avg2(pix2[0], pix3[0]));
386  s += abs(pix1[1] - avg2(pix2[1], pix3[1]));
387  s += abs(pix1[2] - avg2(pix2[2], pix3[2]));
388  s += abs(pix1[3] - avg2(pix2[3], pix3[3]));
389  s += abs(pix1[4] - avg2(pix2[4], pix3[4]));
390  s += abs(pix1[5] - avg2(pix2[5], pix3[5]));
391  s += abs(pix1[6] - avg2(pix2[6], pix3[6]));
392  s += abs(pix1[7] - avg2(pix2[7], pix3[7]));
393  pix1 += stride;
394  pix2 += stride;
395  pix3 += stride;
396  }
397  return s;
398 }
399 
400 static int pix_abs8_xy2_c(MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2,
401  ptrdiff_t stride, int h)
402 {
403  int s = 0, i;
404  const uint8_t *pix3 = pix2 + stride;
405 
406  for (i = 0; i < h; i++) {
407  s += abs(pix1[0] - avg4(pix2[0], pix2[1], pix3[0], pix3[1]));
408  s += abs(pix1[1] - avg4(pix2[1], pix2[2], pix3[1], pix3[2]));
409  s += abs(pix1[2] - avg4(pix2[2], pix2[3], pix3[2], pix3[3]));
410  s += abs(pix1[3] - avg4(pix2[3], pix2[4], pix3[3], pix3[4]));
411  s += abs(pix1[4] - avg4(pix2[4], pix2[5], pix3[4], pix3[5]));
412  s += abs(pix1[5] - avg4(pix2[5], pix2[6], pix3[5], pix3[6]));
413  s += abs(pix1[6] - avg4(pix2[6], pix2[7], pix3[6], pix3[7]));
414  s += abs(pix1[7] - avg4(pix2[7], pix2[8], pix3[7], pix3[8]));
415  pix1 += stride;
416  pix2 += stride;
417  pix3 += stride;
418  }
419  return s;
420 }
421 
422 static int nsse16_c(MpegEncContext *c, const uint8_t *s1, const uint8_t *s2,
423  ptrdiff_t stride, int h)
424 {
425  int score1 = 0, score2 = 0, x, y;
426 
427  for (y = 0; y < h; y++) {
428  for (x = 0; x < 16; x++)
429  score1 += (s1[x] - s2[x]) * (s1[x] - s2[x]);
430  if (y + 1 < h) {
431  for (x = 0; x < 15; x++)
432  score2 += FFABS(s1[x] - s1[x + stride] -
433  s1[x + 1] + s1[x + stride + 1]) -
434  FFABS(s2[x] - s2[x + stride] -
435  s2[x + 1] + s2[x + stride + 1]);
436  }
437  s1 += stride;
438  s2 += stride;
439  }
440 
441  if (c)
442  return score1 + FFABS(score2) * c->avctx->nsse_weight;
443  else
444  return score1 + FFABS(score2) * 8;
445 }
446 
447 static int nsse8_c(MpegEncContext *c, const uint8_t *s1, const uint8_t *s2,
448  ptrdiff_t stride, int h)
449 {
450  int score1 = 0, score2 = 0, x, y;
451 
452  for (y = 0; y < h; y++) {
453  for (x = 0; x < 8; x++)
454  score1 += (s1[x] - s2[x]) * (s1[x] - s2[x]);
455  if (y + 1 < h) {
456  for (x = 0; x < 7; x++)
457  score2 += FFABS(s1[x] - s1[x + stride] -
458  s1[x + 1] + s1[x + stride + 1]) -
459  FFABS(s2[x] - s2[x + stride] -
460  s2[x + 1] + s2[x + stride + 1]);
461  }
462  s1 += stride;
463  s2 += stride;
464  }
465 
466  if (c)
467  return score1 + FFABS(score2) * c->avctx->nsse_weight;
468  else
469  return score1 + FFABS(score2) * 8;
470 }
471 
472 static int zero_cmp(MpegEncContext *s, const uint8_t *a, const uint8_t *b,
473  ptrdiff_t stride, int h)
474 {
475  return 0;
476 }
477 
478 av_cold int ff_set_cmp(const MECmpContext *c, me_cmp_func *cmp, int type, int mpvenc)
479 {
480 #define ENTRY(CMP_FLAG, ARRAY, MPVENC_ONLY) \
481  [FF_CMP_ ## CMP_FLAG] = { \
482  .offset = offsetof(MECmpContext, ARRAY), \
483  .mpv_only = MPVENC_ONLY, \
484  .available = 1, \
485  }
486  static const struct {
487  char available;
488  char mpv_only;
489  uint16_t offset;
490  } cmp_func_list[] = {
491  ENTRY(SAD, sad, 0),
492  ENTRY(SSE, sse, 0),
493  ENTRY(SATD, hadamard8_diff, 0),
494  ENTRY(DCT, dct_sad, 1),
495  ENTRY(PSNR, quant_psnr, 1),
496  ENTRY(BIT, bit, 1),
497  ENTRY(RD, rd, 1),
498  ENTRY(VSAD, vsad, 0),
499  ENTRY(VSSE, vsse, 0),
500  ENTRY(NSSE, nsse, 0),
501 #if CONFIG_SNOW_DECODER || CONFIG_SNOW_ENCODER
502  ENTRY(W53, w53, 0),
503  ENTRY(W97, w97, 0),
504 #endif
505  ENTRY(DCTMAX, dct_max, 1),
506 #if CONFIG_GPL
507  ENTRY(DCT264, dct264_sad, 1),
508 #endif
509  ENTRY(MEDIAN_SAD, median_sad, 0),
510  };
511  const me_cmp_func *me_cmp_func_array;
512 
513  type &= 0xFF;
514 
515  if (type == FF_CMP_ZERO) {
516  for (int i = 0; i < 6; i++)
517  cmp[i] = zero_cmp;
518  return 0;
519  }
520  if (type >= FF_ARRAY_ELEMS(cmp_func_list) ||
521  !cmp_func_list[type].available ||
522  !mpvenc && cmp_func_list[type].mpv_only) {
524  "invalid cmp function selection\n");
525  return AVERROR(EINVAL);
526  }
527  me_cmp_func_array = (const me_cmp_func*)(((const char*)c) + cmp_func_list[type].offset);
528  for (int i = 0; i < 6; i++)
529  cmp[i] = me_cmp_func_array[i];
530 
531  return 0;
532 }
533 
534 #define BUTTERFLY2(o1, o2, i1, i2) \
535  o1 = (i1) + (i2); \
536  o2 = (i1) - (i2);
537 
538 #define BUTTERFLY1(x, y) \
539  { \
540  int a, b; \
541  a = x; \
542  b = y; \
543  x = a + b; \
544  y = a - b; \
545  }
546 
547 #define BUTTERFLYA(x, y) (FFABS((x) + (y)) + FFABS((x) - (y)))
548 
549 static int hadamard8_diff8x8_c(MpegEncContext *s, const uint8_t *dst,
550  const uint8_t *src, ptrdiff_t stride, int h)
551 {
552  int i, temp[64], sum = 0;
553 
554  for (i = 0; i < 8; i++) {
555  // FIXME: try pointer walks
556  BUTTERFLY2(temp[8 * i + 0], temp[8 * i + 1],
557  src[stride * i + 0] - dst[stride * i + 0],
558  src[stride * i + 1] - dst[stride * i + 1]);
559  BUTTERFLY2(temp[8 * i + 2], temp[8 * i + 3],
560  src[stride * i + 2] - dst[stride * i + 2],
561  src[stride * i + 3] - dst[stride * i + 3]);
562  BUTTERFLY2(temp[8 * i + 4], temp[8 * i + 5],
563  src[stride * i + 4] - dst[stride * i + 4],
564  src[stride * i + 5] - dst[stride * i + 5]);
565  BUTTERFLY2(temp[8 * i + 6], temp[8 * i + 7],
566  src[stride * i + 6] - dst[stride * i + 6],
567  src[stride * i + 7] - dst[stride * i + 7]);
568 
569  BUTTERFLY1(temp[8 * i + 0], temp[8 * i + 2]);
570  BUTTERFLY1(temp[8 * i + 1], temp[8 * i + 3]);
571  BUTTERFLY1(temp[8 * i + 4], temp[8 * i + 6]);
572  BUTTERFLY1(temp[8 * i + 5], temp[8 * i + 7]);
573 
574  BUTTERFLY1(temp[8 * i + 0], temp[8 * i + 4]);
575  BUTTERFLY1(temp[8 * i + 1], temp[8 * i + 5]);
576  BUTTERFLY1(temp[8 * i + 2], temp[8 * i + 6]);
577  BUTTERFLY1(temp[8 * i + 3], temp[8 * i + 7]);
578  }
579 
580  for (i = 0; i < 8; i++) {
581  BUTTERFLY1(temp[8 * 0 + i], temp[8 * 1 + i]);
582  BUTTERFLY1(temp[8 * 2 + i], temp[8 * 3 + i]);
583  BUTTERFLY1(temp[8 * 4 + i], temp[8 * 5 + i]);
584  BUTTERFLY1(temp[8 * 6 + i], temp[8 * 7 + i]);
585 
586  BUTTERFLY1(temp[8 * 0 + i], temp[8 * 2 + i]);
587  BUTTERFLY1(temp[8 * 1 + i], temp[8 * 3 + i]);
588  BUTTERFLY1(temp[8 * 4 + i], temp[8 * 6 + i]);
589  BUTTERFLY1(temp[8 * 5 + i], temp[8 * 7 + i]);
590 
591  sum += BUTTERFLYA(temp[8 * 0 + i], temp[8 * 4 + i]) +
592  BUTTERFLYA(temp[8 * 1 + i], temp[8 * 5 + i]) +
593  BUTTERFLYA(temp[8 * 2 + i], temp[8 * 6 + i]) +
594  BUTTERFLYA(temp[8 * 3 + i], temp[8 * 7 + i]);
595  }
596  return sum;
597 }
598 
599 static int hadamard8_intra8x8_c(MpegEncContext *s, const uint8_t *src,
600  const uint8_t *dummy, ptrdiff_t stride, int h)
601 {
602  int i, temp[64], sum = 0;
603 
604  for (i = 0; i < 8; i++) {
605  // FIXME: try pointer walks
606  BUTTERFLY2(temp[8 * i + 0], temp[8 * i + 1],
607  src[stride * i + 0], src[stride * i + 1]);
608  BUTTERFLY2(temp[8 * i + 2], temp[8 * i + 3],
609  src[stride * i + 2], src[stride * i + 3]);
610  BUTTERFLY2(temp[8 * i + 4], temp[8 * i + 5],
611  src[stride * i + 4], src[stride * i + 5]);
612  BUTTERFLY2(temp[8 * i + 6], temp[8 * i + 7],
613  src[stride * i + 6], src[stride * i + 7]);
614 
615  BUTTERFLY1(temp[8 * i + 0], temp[8 * i + 2]);
616  BUTTERFLY1(temp[8 * i + 1], temp[8 * i + 3]);
617  BUTTERFLY1(temp[8 * i + 4], temp[8 * i + 6]);
618  BUTTERFLY1(temp[8 * i + 5], temp[8 * i + 7]);
619 
620  BUTTERFLY1(temp[8 * i + 0], temp[8 * i + 4]);
621  BUTTERFLY1(temp[8 * i + 1], temp[8 * i + 5]);
622  BUTTERFLY1(temp[8 * i + 2], temp[8 * i + 6]);
623  BUTTERFLY1(temp[8 * i + 3], temp[8 * i + 7]);
624  }
625 
626  for (i = 0; i < 8; i++) {
627  BUTTERFLY1(temp[8 * 0 + i], temp[8 * 1 + i]);
628  BUTTERFLY1(temp[8 * 2 + i], temp[8 * 3 + i]);
629  BUTTERFLY1(temp[8 * 4 + i], temp[8 * 5 + i]);
630  BUTTERFLY1(temp[8 * 6 + i], temp[8 * 7 + i]);
631 
632  BUTTERFLY1(temp[8 * 0 + i], temp[8 * 2 + i]);
633  BUTTERFLY1(temp[8 * 1 + i], temp[8 * 3 + i]);
634  BUTTERFLY1(temp[8 * 4 + i], temp[8 * 6 + i]);
635  BUTTERFLY1(temp[8 * 5 + i], temp[8 * 7 + i]);
636 
637  sum +=
638  BUTTERFLYA(temp[8 * 0 + i], temp[8 * 4 + i])
639  + BUTTERFLYA(temp[8 * 1 + i], temp[8 * 5 + i])
640  + BUTTERFLYA(temp[8 * 2 + i], temp[8 * 6 + i])
641  + BUTTERFLYA(temp[8 * 3 + i], temp[8 * 7 + i]);
642  }
643 
644  sum -= FFABS(temp[8 * 0] + temp[8 * 4]); // -mean
645 
646  return sum;
647 }
648 
649 static int dct_sad8x8_c(MpegEncContext *s, const uint8_t *src1,
650  const uint8_t *src2, ptrdiff_t stride, int h)
651 {
652  LOCAL_ALIGNED_16(int16_t, temp, [64]);
653 
654  s->pdsp.diff_pixels_unaligned(temp, src1, src2, stride);
655  s->fdsp.fdct(temp);
656  return s->sum_abs_dctelem(temp);
657 }
658 
659 #if CONFIG_GPL
660 #define DCT8_1D \
661  { \
662  const int s07 = SRC(0) + SRC(7); \
663  const int s16 = SRC(1) + SRC(6); \
664  const int s25 = SRC(2) + SRC(5); \
665  const int s34 = SRC(3) + SRC(4); \
666  const int a0 = s07 + s34; \
667  const int a1 = s16 + s25; \
668  const int a2 = s07 - s34; \
669  const int a3 = s16 - s25; \
670  const int d07 = SRC(0) - SRC(7); \
671  const int d16 = SRC(1) - SRC(6); \
672  const int d25 = SRC(2) - SRC(5); \
673  const int d34 = SRC(3) - SRC(4); \
674  const int a4 = d16 + d25 + (d07 + (d07 >> 1)); \
675  const int a5 = d07 - d34 - (d25 + (d25 >> 1)); \
676  const int a6 = d07 + d34 - (d16 + (d16 >> 1)); \
677  const int a7 = d16 - d25 + (d34 + (d34 >> 1)); \
678  DST(0, a0 + a1); \
679  DST(1, a4 + (a7 >> 2)); \
680  DST(2, a2 + (a3 >> 1)); \
681  DST(3, a5 + (a6 >> 2)); \
682  DST(4, a0 - a1); \
683  DST(5, a6 - (a5 >> 2)); \
684  DST(6, (a2 >> 1) - a3); \
685  DST(7, (a4 >> 2) - a7); \
686  }
687 
688 static int dct264_sad8x8_c(MpegEncContext *s, const uint8_t *src1,
689  const uint8_t *src2, ptrdiff_t stride, int h)
690 {
691  int16_t dct[8][8];
692  int i, sum = 0;
693 
694  s->pdsp.diff_pixels_unaligned(dct[0], src1, src2, stride);
695 
696 #define SRC(x) dct[i][x]
697 #define DST(x, v) dct[i][x] = v
698  for (i = 0; i < 8; i++)
699  DCT8_1D
700 #undef SRC
701 #undef DST
702 
703 #define SRC(x) dct[x][i]
704 #define DST(x, v) sum += FFABS(v)
705  for (i = 0; i < 8; i++)
706  DCT8_1D
707 #undef SRC
708 #undef DST
709  return sum;
710 }
711 #endif
712 
713 static int dct_max8x8_c(MpegEncContext *s, const uint8_t *src1,
714  const uint8_t *src2, ptrdiff_t stride, int h)
715 {
716  LOCAL_ALIGNED_16(int16_t, temp, [64]);
717  int sum = 0, i;
718 
719  s->pdsp.diff_pixels_unaligned(temp, src1, src2, stride);
720  s->fdsp.fdct(temp);
721 
722  for (i = 0; i < 64; i++)
723  sum = FFMAX(sum, FFABS(temp[i]));
724 
725  return sum;
726 }
727 
728 static int quant_psnr8x8_c(MpegEncContext *s, const uint8_t *src1,
729  const uint8_t *src2, ptrdiff_t stride, int h)
730 {
731  LOCAL_ALIGNED_16(int16_t, temp, [64 * 2]);
732  int16_t *const bak = temp + 64;
733  int sum = 0, i;
734 
735  s->mb_intra = 0;
736 
737  s->pdsp.diff_pixels_unaligned(temp, src1, src2, stride);
738 
739  memcpy(bak, temp, 64 * sizeof(int16_t));
740 
741  s->block_last_index[0 /* FIXME */] =
742  s->dct_quantize(s, temp, 0 /* FIXME */, s->qscale, &i);
743  s->dct_unquantize_inter(s, temp, 0, s->qscale);
745 
746  for (i = 0; i < 64; i++)
747  sum += (temp[i] - bak[i]) * (temp[i] - bak[i]);
748 
749  return sum;
750 }
751 
752 static int rd8x8_c(MpegEncContext *s, const uint8_t *src1, const uint8_t *src2,
753  ptrdiff_t stride, int h)
754 {
755  const uint8_t *scantable = s->intra_scantable.permutated;
756  LOCAL_ALIGNED_16(int16_t, temp, [64]);
757  LOCAL_ALIGNED_16(uint8_t, lsrc1, [64]);
758  LOCAL_ALIGNED_16(uint8_t, lsrc2, [64]);
759  int i, last, run, bits, level, distortion, start_i;
760  const int esc_length = s->ac_esc_length;
761  uint8_t *length, *last_length;
762 
763  copy_block8(lsrc1, src1, 8, stride, 8);
764  copy_block8(lsrc2, src2, 8, stride, 8);
765 
766  s->pdsp.diff_pixels(temp, lsrc1, lsrc2, 8);
767 
768  s->block_last_index[0 /* FIXME */] =
769  last =
770  s->dct_quantize(s, temp, 0 /* FIXME */, s->qscale, &i);
771 
772  bits = 0;
773 
774  if (s->mb_intra) {
775  start_i = 1;
776  length = s->intra_ac_vlc_length;
777  last_length = s->intra_ac_vlc_last_length;
778  bits += s->luma_dc_vlc_length[temp[0] + 256]; // FIXME: chroma
779  } else {
780  start_i = 0;
781  length = s->inter_ac_vlc_length;
782  last_length = s->inter_ac_vlc_last_length;
783  }
784 
785  if (last >= start_i) {
786  run = 0;
787  for (i = start_i; i < last; i++) {
788  int j = scantable[i];
789  level = temp[j];
790 
791  if (level) {
792  level += 64;
793  if ((level & (~127)) == 0)
794  bits += length[UNI_AC_ENC_INDEX(run, level)];
795  else
796  bits += esc_length;
797  run = 0;
798  } else
799  run++;
800  }
801  i = scantable[last];
802 
803  level = temp[i] + 64;
804 
805  av_assert2(level - 64);
806 
807  if ((level & (~127)) == 0) {
808  bits += last_length[UNI_AC_ENC_INDEX(run, level)];
809  } else
810  bits += esc_length;
811  }
812 
813  if (last >= 0) {
814  if (s->mb_intra)
815  s->dct_unquantize_intra(s, temp, 0, s->qscale);
816  else
817  s->dct_unquantize_inter(s, temp, 0, s->qscale);
818  }
819 
820  s->idsp.idct_add(lsrc2, 8, temp);
821 
822  distortion = s->sse_cmp[1](NULL, lsrc2, lsrc1, 8, 8);
823 
824  return distortion + ((bits * s->qscale * s->qscale * 109 + 64) >> 7);
825 }
826 
827 static int bit8x8_c(MpegEncContext *s, const uint8_t *src1, const uint8_t *src2,
828  ptrdiff_t stride, int h)
829 {
830  const uint8_t *scantable = s->intra_scantable.permutated;
831  LOCAL_ALIGNED_16(int16_t, temp, [64]);
832  int i, last, run, bits, level, start_i;
833  const int esc_length = s->ac_esc_length;
834  uint8_t *length, *last_length;
835 
836  s->pdsp.diff_pixels_unaligned(temp, src1, src2, stride);
837 
838  s->block_last_index[0 /* FIXME */] =
839  last =
840  s->dct_quantize(s, temp, 0 /* FIXME */, s->qscale, &i);
841 
842  bits = 0;
843 
844  if (s->mb_intra) {
845  start_i = 1;
846  length = s->intra_ac_vlc_length;
847  last_length = s->intra_ac_vlc_last_length;
848  bits += s->luma_dc_vlc_length[temp[0] + 256]; // FIXME: chroma
849  } else {
850  start_i = 0;
851  length = s->inter_ac_vlc_length;
852  last_length = s->inter_ac_vlc_last_length;
853  }
854 
855  if (last >= start_i) {
856  run = 0;
857  for (i = start_i; i < last; i++) {
858  int j = scantable[i];
859  level = temp[j];
860 
861  if (level) {
862  level += 64;
863  if ((level & (~127)) == 0)
864  bits += length[UNI_AC_ENC_INDEX(run, level)];
865  else
866  bits += esc_length;
867  run = 0;
868  } else
869  run++;
870  }
871  i = scantable[last];
872 
873  level = temp[i] + 64;
874 
875  av_assert2(level - 64);
876 
877  if ((level & (~127)) == 0)
878  bits += last_length[UNI_AC_ENC_INDEX(run, level)];
879  else
880  bits += esc_length;
881  }
882 
883  return bits;
884 }
885 
886 #define VSAD_INTRA(size) \
887 static int vsad_intra ## size ## _c(MpegEncContext *c, \
888  const uint8_t *s, const uint8_t *dummy, \
889  ptrdiff_t stride, int h) \
890 { \
891  int score = 0, x, y; \
892  \
893  for (y = 1; y < h; y++) { \
894  for (x = 0; x < size; x += 4) { \
895  score += FFABS(s[x] - s[x + stride]) + \
896  FFABS(s[x + 1] - s[x + stride + 1]) + \
897  FFABS(s[x + 2] - s[x + 2 + stride]) + \
898  FFABS(s[x + 3] - s[x + 3 + stride]); \
899  } \
900  s += stride; \
901  } \
902  \
903  return score; \
904 }
905 VSAD_INTRA(8)
906 VSAD_INTRA(16)
907 
908 #define VSAD(size) \
909 static int vsad ## size ## _c(MpegEncContext *c, \
910  const uint8_t *s1, const uint8_t *s2, \
911  ptrdiff_t stride, int h) \
912 { \
913  int score = 0, x, y; \
914  \
915  for (y = 1; y < h; y++) { \
916  for (x = 0; x < size; x++) \
917  score += FFABS(s1[x] - s2[x] - s1[x + stride] + s2[x + stride]); \
918  s1 += stride; \
919  s2 += stride; \
920  } \
921  \
922  return score; \
923 }
924 VSAD(8)
925 VSAD(16)
926 
927 #define SQ(a) ((a) * (a))
928 #define VSSE_INTRA(size) \
929 static int vsse_intra ## size ## _c(MpegEncContext *c, \
930  const uint8_t *s, const uint8_t *dummy, \
931  ptrdiff_t stride, int h) \
932 { \
933  int score = 0, x, y; \
934  \
935  for (y = 1; y < h; y++) { \
936  for (x = 0; x < size; x += 4) { \
937  score += SQ(s[x] - s[x + stride]) + \
938  SQ(s[x + 1] - s[x + stride + 1]) + \
939  SQ(s[x + 2] - s[x + stride + 2]) + \
940  SQ(s[x + 3] - s[x + stride + 3]); \
941  } \
942  s += stride; \
943  } \
944  \
945  return score; \
946 }
947 VSSE_INTRA(8)
948 VSSE_INTRA(16)
949 
950 #define VSSE(size) \
951 static int vsse ## size ## _c(MpegEncContext *c, const uint8_t *s1, const uint8_t *s2, \
952  ptrdiff_t stride, int h) \
953 { \
954  int score = 0, x, y; \
955  \
956  for (y = 1; y < h; y++) { \
957  for (x = 0; x < size; x++) \
958  score += SQ(s1[x] - s2[x] - s1[x + stride] + s2[x + stride]); \
959  s1 += stride; \
960  s2 += stride; \
961  } \
962  \
963  return score; \
964 }
965 VSSE(8)
966 VSSE(16)
967 
968 #define WRAPPER8_16_SQ(name8, name16) \
969 static int name16(MpegEncContext *s, const uint8_t *dst, const uint8_t *src, \
970  ptrdiff_t stride, int h) \
971 { \
972  int score = 0; \
973  \
974  score += name8(s, dst, src, stride, 8); \
975  score += name8(s, dst + 8, src + 8, stride, 8); \
976  if (h == 16) { \
977  dst += 8 * stride; \
978  src += 8 * stride; \
979  score += name8(s, dst, src, stride, 8); \
980  score += name8(s, dst + 8, src + 8, stride, 8); \
981  } \
982  return score; \
983 }
984 
985 WRAPPER8_16_SQ(hadamard8_diff8x8_c, hadamard8_diff16_c)
986 WRAPPER8_16_SQ(hadamard8_intra8x8_c, hadamard8_intra16_c)
987 WRAPPER8_16_SQ(dct_sad8x8_c, dct_sad16_c)
988 #if CONFIG_GPL
989 WRAPPER8_16_SQ(dct264_sad8x8_c, dct264_sad16_c)
990 #endif
991 WRAPPER8_16_SQ(dct_max8x8_c, dct_max16_c)
992 WRAPPER8_16_SQ(quant_psnr8x8_c, quant_psnr16_c)
993 WRAPPER8_16_SQ(rd8x8_c, rd16_c)
994 WRAPPER8_16_SQ(bit8x8_c, bit16_c)
995 
997 {
998  memset(c, 0, sizeof(*c));
999 
1000  c->sum_abs_dctelem = sum_abs_dctelem_c;
1001 
1002  /* TODO [0] 16 [1] 8 */
1003  c->pix_abs[0][0] = pix_abs16_c;
1004  c->pix_abs[0][1] = pix_abs16_x2_c;
1005  c->pix_abs[0][2] = pix_abs16_y2_c;
1006  c->pix_abs[0][3] = pix_abs16_xy2_c;
1007  c->pix_abs[1][0] = pix_abs8_c;
1008  c->pix_abs[1][1] = pix_abs8_x2_c;
1009  c->pix_abs[1][2] = pix_abs8_y2_c;
1010  c->pix_abs[1][3] = pix_abs8_xy2_c;
1011 
1012 #define SET_CMP_FUNC(name) \
1013  c->name[0] = name ## 16_c; \
1014  c->name[1] = name ## 8x8_c;
1015 
1016  SET_CMP_FUNC(hadamard8_diff)
1017  c->hadamard8_diff[4] = hadamard8_intra16_c;
1018  c->hadamard8_diff[5] = hadamard8_intra8x8_c;
1019  SET_CMP_FUNC(dct_sad)
1020  SET_CMP_FUNC(dct_max)
1021 #if CONFIG_GPL
1022  SET_CMP_FUNC(dct264_sad)
1023 #endif
1024  c->sad[0] = pix_abs16_c;
1025  c->sad[1] = pix_abs8_c;
1026  c->sse[0] = sse16_c;
1027  c->sse[1] = sse8_c;
1028  c->sse[2] = sse4_c;
1029  SET_CMP_FUNC(quant_psnr)
1030  SET_CMP_FUNC(rd)
1031  SET_CMP_FUNC(bit)
1032  c->vsad[0] = vsad16_c;
1033  c->vsad[1] = vsad8_c;
1034  c->vsad[4] = vsad_intra16_c;
1035  c->vsad[5] = vsad_intra8_c;
1036  c->vsse[0] = vsse16_c;
1037  c->vsse[1] = vsse8_c;
1038  c->vsse[4] = vsse_intra16_c;
1039  c->vsse[5] = vsse_intra8_c;
1040  c->nsse[0] = nsse16_c;
1041  c->nsse[1] = nsse8_c;
1042 #if CONFIG_SNOW_DECODER || CONFIG_SNOW_ENCODER
1044 #endif
1045 
1046  c->median_sad[0] = pix_median_abs16_c;
1047  c->median_sad[1] = pix_median_abs8_c;
1048 
1049 #if ARCH_AARCH64
1050  ff_me_cmp_init_aarch64(c, avctx);
1051 #elif ARCH_ARM
1052  ff_me_cmp_init_arm(c, avctx);
1053 #elif ARCH_PPC
1054  ff_me_cmp_init_ppc(c, avctx);
1055 #elif ARCH_RISCV
1056  ff_me_cmp_init_riscv(c, avctx);
1057 #elif ARCH_X86
1058  ff_me_cmp_init_x86(c, avctx);
1059 #elif ARCH_MIPS
1060  ff_me_cmp_init_mips(c, avctx);
1061 #endif
1062 
1063 }
sum_abs_dctelem_c
static int sum_abs_dctelem_c(const int16_t *block)
Definition: me_cmp.c:140
level
uint8_t level
Definition: svq3.c:205
pix_abs8_y2_c
static int pix_abs8_y2_c(MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2, ptrdiff_t stride, int h)
Definition: me_cmp.c:378
pix_median_abs8_c
static int pix_median_abs8_c(MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2, ptrdiff_t stride, int h)
Definition: me_cmp.c:327
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
nsse16_c
static int nsse16_c(MpegEncContext *c, const uint8_t *s1, const uint8_t *s2, ptrdiff_t stride, int h)
Definition: me_cmp.c:422
mem_internal.h
sse8_c
static int sse8_c(MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2, ptrdiff_t stride, int h)
Definition: me_cmp.c:89
pix_abs8_x2_c
static int pix_abs8_x2_c(MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2, ptrdiff_t stride, int h)
Definition: me_cmp.c:358
src1
const pixel * src1
Definition: h264pred_template.c:421
mpegvideoenc.h
VSSE_INTRA
#define VSSE_INTRA(size)
Definition: me_cmp.c:928
b
#define b
Definition: input.c:41
avg2
#define avg2(a, b)
Definition: me_cmp.c:149
copy_block8
static void copy_block8(uint8_t *dst, const uint8_t *src, ptrdiff_t dstStride, ptrdiff_t srcStride, int h)
Definition: copy_block.h:47
ff_me_cmp_init_x86
void ff_me_cmp_init_x86(MECmpContext *c, AVCodecContext *avctx)
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
BUTTERFLYA
#define BUTTERFLYA(x, y)
Definition: me_cmp.c:547
pix_abs16_x2_c
static int pix_abs16_x2_c(MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2, ptrdiff_t stride, int h)
Definition: me_cmp.c:219
bit
#define bit(string, value)
Definition: cbs_mpeg2.c:56
pix_abs8_xy2_c
static int pix_abs8_xy2_c(MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2, ptrdiff_t stride, int h)
Definition: me_cmp.c:400
ENTRY
#define ENTRY(CMP_FLAG, ARRAY, MPVENC_ONLY)
dummy
int dummy
Definition: motion.c:66
sse4_c
static int sse4_c(MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2, ptrdiff_t stride, int h)
Definition: me_cmp.c:72
ff_me_cmp_init
av_cold void ff_me_cmp_init(MECmpContext *c, AVCodecContext *avctx)
Definition: me_cmp.c:996
pix_abs16_c
static int pix_abs16_c(MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2, ptrdiff_t stride, int h)
Definition: me_cmp.c:152
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:209
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
av_cold
#define av_cold
Definition: attributes.h:90
pix_abs16_y2_c
static int pix_abs16_y2_c(MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2, ptrdiff_t stride, int h)
Definition: me_cmp.c:247
s
#define s(width, name)
Definition: cbs_vp9.c:198
bits
uint8_t bits
Definition: vp3data.h:128
LOCAL_ALIGNED_16
#define LOCAL_ALIGNED_16(t, v,...)
Definition: mem_internal.h:150
pix_median_abs16_c
static int pix_median_abs16_c(MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2, ptrdiff_t stride, int h)
Definition: me_cmp.c:180
simple_idct.h
zero_cmp
static int zero_cmp(MpegEncContext *s, const uint8_t *a, const uint8_t *b, ptrdiff_t stride, int h)
Definition: me_cmp.c:472
pix_abs16_xy2_c
static int pix_abs16_xy2_c(MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2, ptrdiff_t stride, int h)
Definition: me_cmp.c:277
VSAD_INTRA
#define VSAD_INTRA(size)
Definition: me_cmp.c:886
cmp
static av_always_inline int cmp(MpegEncContext *s, const int x, const int y, const int subx, const int suby, const int size, const int h, int ref_index, int src_index, me_cmp_func cmp_func, me_cmp_func chroma_cmp_func, const int flags)
compares a block (either a full macroblock or a partition thereof) against a proposed motion-compensa...
Definition: motion_est.c:262
FFABS
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:74
MECmpContext
Definition: me_cmp.h:55
NULL
#define NULL
Definition: coverity.c:32
run
uint8_t run
Definition: svq3.c:204
VSSE
#define VSSE(size)
Definition: me_cmp.c:950
abs
#define abs(x)
Definition: cuda_runtime.h:35
hadamard8_intra8x8_c
static int hadamard8_intra8x8_c(MpegEncContext *s, const uint8_t *src, const uint8_t *dummy, ptrdiff_t stride, int h)
Definition: me_cmp.c:599
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
ff_me_cmp_init_mips
void ff_me_cmp_init_mips(MECmpContext *c, AVCodecContext *avctx)
Definition: me_cmp_init_mips.c:25
dst
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
Definition: dsp.h:83
pix_abs8_c
static int pix_abs8_c(MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2, ptrdiff_t stride, int h)
Definition: me_cmp.c:307
quant_psnr8x8_c
static int quant_psnr8x8_c(MpegEncContext *s, const uint8_t *src1, const uint8_t *src2, ptrdiff_t stride, int h)
Definition: me_cmp.c:728
ff_set_cmp
av_cold int ff_set_cmp(const MECmpContext *c, me_cmp_func *cmp, int type, int mpvenc)
Fill the function pointer array cmp[6] with me_cmp_funcs from c based upon type.
Definition: me_cmp.c:478
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
FF_CMP_ZERO
#define FF_CMP_ZERO
Definition: avcodec.h:908
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
attributes.h
BUTTERFLY1
#define BUTTERFLY1(x, y)
Definition: me_cmp.c:538
SRC
#define SRC(x, y)
Definition: h264pred_template.c:825
UNI_AC_ENC_INDEX
#define UNI_AC_ENC_INDEX(run, level)
Definition: mpegvideoenc.h:36
hadamard8_diff8x8_c
static int hadamard8_diff8x8_c(MpegEncContext *s, const uint8_t *dst, const uint8_t *src, ptrdiff_t stride, int h)
Definition: me_cmp.c:549
av_assert2
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:67
ff_me_cmp_init_aarch64
av_cold void ff_me_cmp_init_aarch64(MECmpContext *c, AVCodecContext *avctx)
Definition: me_cmp_init_aarch64.c:87
ff_simple_idct_int16_8bit
void ff_simple_idct_int16_8bit(int16_t *block)
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
copy_block.h
internal.h
ff_square_tab
const uint32_t ff_square_tab[512]
Definition: me_cmp.c:37
available
if no frame is available
Definition: filter_design.txt:166
src2
const pixel * src2
Definition: h264pred_template.c:422
dct
static void dct(AudioRNNContext *s, float *out, const float *in)
Definition: af_arnndn.c:1010
dct_max8x8_c
static int dct_max8x8_c(MpegEncContext *s, const uint8_t *src1, const uint8_t *src2, ptrdiff_t stride, int h)
Definition: me_cmp.c:713
avcodec.h
stride
#define stride
Definition: h264pred_template.c:537
mid_pred
#define mid_pred
Definition: mathops.h:96
me_cmp_func
int(* me_cmp_func)(struct MpegEncContext *c, const uint8_t *blk1, const uint8_t *blk2, ptrdiff_t stride, int h)
Definition: me_cmp.h:50
me_cmp.h
AVCodecContext
main external API structure.
Definition: avcodec.h:451
WRAPPER8_16_SQ
#define WRAPPER8_16_SQ(name8, name16)
Definition: me_cmp.c:968
sse16_c
static int sse16_c(MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2, ptrdiff_t stride, int h)
Definition: me_cmp.c:110
ff_me_cmp_init_riscv
void ff_me_cmp_init_riscv(MECmpContext *c, AVCodecContext *avctx)
Definition: me_cmp_init.c:80
avg4
#define avg4(a, b, c, d)
Definition: me_cmp.c:150
temp
else temp
Definition: vf_mcdeint.c:263
dct_sad8x8_c
static int dct_sad8x8_c(MpegEncContext *s, const uint8_t *src1, const uint8_t *src2, ptrdiff_t stride, int h)
Definition: me_cmp.c:649
DCT8_1D
#define DCT8_1D(src, srcstride, dst, dststride)
Definition: h264dsp.c:93
ff_me_cmp_init_arm
av_cold void ff_me_cmp_init_arm(MECmpContext *c, AVCodecContext *avctx)
Definition: me_cmp_init_arm.c:41
DST
#define DST(x, y)
Definition: vp9dsp_template.c:813
rd8x8_c
static int rd8x8_c(MpegEncContext *s, const uint8_t *src1, const uint8_t *src2, ptrdiff_t stride, int h)
Definition: me_cmp.c:752
ff_me_cmp_init_ppc
av_cold void ff_me_cmp_init_ppc(MECmpContext *c, AVCodecContext *avctx)
Definition: me_cmp.c:726
ff_dsputil_init_dwt
void ff_dsputil_init_dwt(MECmpContext *c)
Definition: snow_dwt.c:843
SET_CMP_FUNC
#define SET_CMP_FUNC(name)
nsse8_c
static int nsse8_c(MpegEncContext *c, const uint8_t *s1, const uint8_t *s2, ptrdiff_t stride, int h)
Definition: me_cmp.c:447
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
sse
static int sse(MpegEncContext *s, const uint8_t *src1, const uint8_t *src2, int w, int h, int stride)
Definition: mpegvideo_enc.c:2653
V
#define V(x)
h
h
Definition: vp9dsp_template.c:2070
VSAD
#define VSAD(size)
Definition: me_cmp.c:908
bit8x8_c
static int bit8x8_c(MpegEncContext *s, const uint8_t *src1, const uint8_t *src2, ptrdiff_t stride, int h)
Definition: me_cmp.c:827
BUTTERFLY2
#define BUTTERFLY2(o1, o2, i1, i2)
Definition: me_cmp.c:534
MpegEncContext
MpegEncContext.
Definition: mpegvideo.h:73
src
#define src
Definition: vp8dsp.c:248