Go to the documentation of this file.
64 c->put_no_rnd_qpel_pixels_tab[0][1] =
67 c->put_no_rnd_qpel_pixels_tab[0][3] =
69 c->put_no_rnd_qpel_pixels_tab[0][4] =
71 c->put_no_rnd_qpel_pixels_tab[0][5] =
73 c->put_no_rnd_qpel_pixels_tab[0][6] =
75 c->put_no_rnd_qpel_pixels_tab[0][7] =
78 c->put_no_rnd_qpel_pixels_tab[0][9] =
81 c->put_no_rnd_qpel_pixels_tab[0][11] =
83 c->put_no_rnd_qpel_pixels_tab[0][12] =
85 c->put_no_rnd_qpel_pixels_tab[0][13] =
87 c->put_no_rnd_qpel_pixels_tab[0][14] =
89 c->put_no_rnd_qpel_pixels_tab[0][15] =
93 c->put_no_rnd_qpel_pixels_tab[1][1] =
96 c->put_no_rnd_qpel_pixels_tab[1][3] =
98 c->put_no_rnd_qpel_pixels_tab[1][4] =
100 c->put_no_rnd_qpel_pixels_tab[1][5] =
102 c->put_no_rnd_qpel_pixels_tab[1][6] =
104 c->put_no_rnd_qpel_pixels_tab[1][7] =
107 c->put_no_rnd_qpel_pixels_tab[1][9] =
110 c->put_no_rnd_qpel_pixels_tab[1][11] =
112 c->put_no_rnd_qpel_pixels_tab[1][12] =
114 c->put_no_rnd_qpel_pixels_tab[1][13] =
116 c->put_no_rnd_qpel_pixels_tab[1][14] =
118 c->put_no_rnd_qpel_pixels_tab[1][15] =
122 c->avg_qpel_pixels_tab[0][1] =
125 c->avg_qpel_pixels_tab[0][3] =
128 c->avg_qpel_pixels_tab[0][5] =
131 c->avg_qpel_pixels_tab[0][7] =
138 c->avg_qpel_pixels_tab[0][13] =
141 c->avg_qpel_pixels_tab[0][15] =
145 c->avg_qpel_pixels_tab[1][1] =
148 c->avg_qpel_pixels_tab[1][3] =
void ff_hv_mc_qpel_avg_dst_aver_hv_src01_16x16_msa(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
void ff_vert_mc_qpel_8x8_msa(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
void ff_hv_mc_qpel_aver_h_src0_8x8_msa(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
void ff_hv_mc_qpel_avg_dst_aver_hv_src01_8x8_msa(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
void ff_hv_mc_qpel_aver_v_src0_8x8_msa(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
void ff_vert_mc_qpel_avg_dst_8x8_msa(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
void ff_horiz_mc_qpel_avg_dst_aver_src0_8width_msa(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
void ff_copy_8x8_msa(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
void ff_vert_mc_qpel_avg_dst_16x16_msa(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
void ff_hv_mc_qpel_avg_dst_aver_hv_src00_8x8_msa(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
void ff_horiz_mc_qpel_no_rnd_aver_src0_8width_msa(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
void ff_vert_mc_qpel_no_rnd_8x8_msa(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
void ff_hv_mc_qpel_no_rnd_aver_h_src1_8x8_msa(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
void ff_hv_mc_qpel_aver_h_src1_8x8_msa(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
void ff_hv_mc_qpel_no_rnd_aver_hv_src10_16x16_msa(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
void ff_hv_mc_qpel_aver_v_src0_16x16_msa(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
void ff_copy_16x16_msa(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
void ff_hv_mc_qpel_avg_dst_aver_hv_src00_16x16_msa(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
void ff_horiz_mc_qpel_avg_dst_aver_src1_8width_msa(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
void ff_horiz_mc_qpel_aver_src1_16width_msa(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
void ff_hv_mc_qpel_8x8_msa(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
int av_get_cpu_flags(void)
Return the flags which specify extensions supported by the CPU.
void ff_horiz_mc_qpel_aver_src1_8width_msa(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
void ff_horiz_mc_qpel_no_rnd_aver_src1_8width_msa(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
void ff_horiz_mc_qpel_8width_msa(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
static atomic_int cpu_flags
void ff_vert_mc_qpel_avg_dst_aver_src1_16x16_msa(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
void ff_hv_mc_qpel_avg_dst_aver_v_src0_16x16_msa(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
void ff_hv_mc_qpel_avg_dst_aver_v_src0_8x8_msa(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
void ff_hv_mc_qpel_no_rnd_aver_hv_src01_8x8_msa(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
void ff_hv_mc_qpel_aver_hv_src00_8x8_msa(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
void ff_hv_mc_qpel_no_rnd_aver_v_src1_16x16_msa(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
void ff_hv_mc_qpel_avg_dst_aver_h_src0_8x8_msa(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
void ff_vert_mc_qpel_aver_src0_8x8_msa(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
void ff_hv_mc_qpel_aver_hv_src01_16x16_msa(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
void ff_hv_mc_qpel_aver_hv_src10_16x16_msa(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
void ff_vert_mc_qpel_no_rnd_aver_src0_16x16_msa(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
void ff_hv_mc_qpel_no_rnd_aver_hv_src00_16x16_msa(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
void ff_horiz_mc_qpel_avg_dst_aver_src0_16width_msa(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
void ff_hv_mc_qpel_no_rnd_8x8_msa(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
void ff_hv_mc_qpel_avg_dst_aver_hv_src11_8x8_msa(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
void ff_hv_mc_qpel_avg_dst_aver_v_src1_8x8_msa(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
void ff_vert_mc_qpel_no_rnd_aver_src0_8x8_msa(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
void ff_vert_mc_qpel_aver_src1_16x16_msa(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
void ff_vert_mc_qpel_aver_src1_8x8_msa(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
void ff_hv_mc_qpel_avg_dst_aver_h_src1_8x8_msa(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
void ff_horiz_mc_qpel_no_rnd_8width_msa(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
void ff_vert_mc_qpel_avg_dst_aver_src0_16x16_msa(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
void ff_horiz_mc_qpel_avg_dst_8width_msa(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
void ff_horiz_mc_qpel_avg_dst_16width_msa(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
void ff_hv_mc_qpel_avg_dst_16x16_msa(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
void ff_hv_mc_qpel_aver_v_src1_16x16_msa(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
void ff_vert_mc_qpel_no_rnd_16x16_msa(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
void ff_horiz_mc_qpel_no_rnd_aver_src1_16width_msa(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
void ff_hv_mc_qpel_no_rnd_aver_h_src0_16x16_msa(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
void ff_hv_mc_qpel_aver_hv_src01_8x8_msa(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
void ff_hv_mc_qpel_no_rnd_aver_hv_src10_8x8_msa(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
void ff_hv_mc_qpel_aver_h_src1_16x16_msa(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
void ff_hv_mc_qpel_aver_hv_src11_8x8_msa(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
void ff_horiz_mc_qpel_16width_msa(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
void ff_hv_mc_qpel_no_rnd_aver_v_src0_8x8_msa(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
void ff_hv_mc_qpel_no_rnd_aver_v_src0_16x16_msa(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
void ff_horiz_mc_qpel_no_rnd_aver_src0_16width_msa(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
void ff_vert_mc_qpel_avg_dst_aver_src0_8x8_msa(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
void ff_vert_mc_qpel_aver_src0_16x16_msa(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
void ff_hv_mc_qpel_no_rnd_aver_h_src0_8x8_msa(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
void ff_hv_mc_qpel_no_rnd_aver_hv_src11_16x16_msa(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
void ff_vert_mc_qpel_no_rnd_aver_src1_16x16_msa(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
void ff_hv_mc_qpel_avg_dst_aver_h_src1_16x16_msa(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
void ff_horiz_mc_qpel_aver_src0_16width_msa(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
void ff_hv_mc_qpel_aver_hv_src10_8x8_msa(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
void ff_hv_mc_qpel_aver_h_src0_16x16_msa(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
void ff_qpeldsp_init_mips(QpelDSPContext *c)
void ff_hv_mc_qpel_aver_hv_src11_16x16_msa(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
void ff_hv_mc_qpel_no_rnd_aver_hv_src01_16x16_msa(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
void ff_vert_mc_qpel_16x16_msa(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
void ff_horiz_mc_qpel_avg_dst_aver_src1_16width_msa(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
void ff_hv_mc_qpel_no_rnd_aver_h_src1_16x16_msa(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
void ff_vert_mc_qpel_avg_dst_aver_src1_8x8_msa(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
void ff_vert_mc_qpel_no_rnd_aver_src1_8x8_msa(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
void ff_horiz_mc_qpel_no_rnd_16width_msa(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
void ff_hv_mc_qpel_avg_dst_aver_hv_src10_16x16_msa(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
void ff_hv_mc_qpel_aver_v_src1_8x8_msa(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
void ff_hv_mc_qpel_avg_dst_aver_hv_src11_16x16_msa(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
void ff_horiz_mc_qpel_aver_src0_8width_msa(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
void ff_hv_mc_qpel_aver_hv_src00_16x16_msa(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
void ff_hv_mc_qpel_no_rnd_16x16_msa(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
void ff_hv_mc_qpel_no_rnd_aver_hv_src11_8x8_msa(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
void ff_avg_width8_msa(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
void ff_hv_mc_qpel_avg_dst_aver_h_src0_16x16_msa(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
void ff_hv_mc_qpel_no_rnd_aver_v_src1_8x8_msa(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
void ff_hv_mc_qpel_16x16_msa(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
void ff_hv_mc_qpel_avg_dst_aver_hv_src10_8x8_msa(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
void ff_hv_mc_qpel_avg_dst_aver_v_src1_16x16_msa(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
void ff_hv_mc_qpel_no_rnd_aver_hv_src00_8x8_msa(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
void ff_hv_mc_qpel_avg_dst_8x8_msa(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
void ff_avg_width16_msa(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)