Libav
me_cmp_init.c
Go to the documentation of this file.
1 /*
2  * SIMD-optimized motion estimation
3  * Copyright (c) 2000, 2001 Fabrice Bellard
4  * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5  *
6  * MMX optimization by Nick Kurshev <nickols_k@mail.ru>
7  *
8  * This file is part of Libav.
9  *
10  * Libav is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public
12  * License as published by the Free Software Foundation; either
13  * version 2.1 of the License, or (at your option) any later version.
14  *
15  * Libav is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18  * Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with Libav; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23  */
24 
25 #include "libavutil/attributes.h"
26 #include "libavutil/cpu.h"
27 #include "libavutil/x86/asm.h"
28 #include "libavutil/x86/cpu.h"
29 #include "libavcodec/me_cmp.h"
30 #include "libavcodec/mpegvideo.h"
31 
32 #if HAVE_INLINE_ASM
33 
34 static int sse8_mmx(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
35  int line_size, int h)
36 {
37  int tmp;
38 
39  __asm__ volatile (
40  "movl %4, %%ecx \n"
41  "shr $1, %%ecx \n"
42  "pxor %%mm0, %%mm0 \n" /* mm0 = 0 */
43  "pxor %%mm7, %%mm7 \n" /* mm7 holds the sum */
44  "1: \n"
45  "movq (%0), %%mm1 \n" /* mm1 = pix1[0][0 - 7] */
46  "movq (%1), %%mm2 \n" /* mm2 = pix2[0][0 - 7] */
47  "movq (%0, %3), %%mm3 \n" /* mm3 = pix1[1][0 - 7] */
48  "movq (%1, %3), %%mm4 \n" /* mm4 = pix2[1][0 - 7] */
49 
50  /* todo: mm1-mm2, mm3-mm4 */
51  /* algo: subtract mm1 from mm2 with saturation and vice versa */
52  /* OR the results to get absolute difference */
53  "movq %%mm1, %%mm5 \n"
54  "movq %%mm3, %%mm6 \n"
55  "psubusb %%mm2, %%mm1 \n"
56  "psubusb %%mm4, %%mm3 \n"
57  "psubusb %%mm5, %%mm2 \n"
58  "psubusb %%mm6, %%mm4 \n"
59 
60  "por %%mm1, %%mm2 \n"
61  "por %%mm3, %%mm4 \n"
62 
63  /* now convert to 16-bit vectors so we can square them */
64  "movq %%mm2, %%mm1 \n"
65  "movq %%mm4, %%mm3 \n"
66 
67  "punpckhbw %%mm0, %%mm2 \n"
68  "punpckhbw %%mm0, %%mm4 \n"
69  "punpcklbw %%mm0, %%mm1 \n" /* mm1 now spread over (mm1, mm2) */
70  "punpcklbw %%mm0, %%mm3 \n" /* mm4 now spread over (mm3, mm4) */
71 
72  "pmaddwd %%mm2, %%mm2 \n"
73  "pmaddwd %%mm4, %%mm4 \n"
74  "pmaddwd %%mm1, %%mm1 \n"
75  "pmaddwd %%mm3, %%mm3 \n"
76 
77  "lea (%0, %3, 2), %0 \n" /* pix1 += 2 * line_size */
78  "lea (%1, %3, 2), %1 \n" /* pix2 += 2 * line_size */
79 
80  "paddd %%mm2, %%mm1 \n"
81  "paddd %%mm4, %%mm3 \n"
82  "paddd %%mm1, %%mm7 \n"
83  "paddd %%mm3, %%mm7 \n"
84 
85  "decl %%ecx \n"
86  "jnz 1b \n"
87 
88  "movq %%mm7, %%mm1 \n"
89  "psrlq $32, %%mm7 \n" /* shift hi dword to lo */
90  "paddd %%mm7, %%mm1 \n"
91  "movd %%mm1, %2 \n"
92  : "+r" (pix1), "+r" (pix2), "=r" (tmp)
93  : "r" ((x86_reg) line_size), "m" (h)
94  : "%ecx");
95 
96  return tmp;
97 }
98 
99 static int sse16_mmx(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
100  int line_size, int h)
101 {
102  int tmp;
103 
104  __asm__ volatile (
105  "movl %4, %%ecx\n"
106  "pxor %%mm0, %%mm0\n" /* mm0 = 0 */
107  "pxor %%mm7, %%mm7\n" /* mm7 holds the sum */
108  "1:\n"
109  "movq (%0), %%mm1\n" /* mm1 = pix1[0 - 7] */
110  "movq (%1), %%mm2\n" /* mm2 = pix2[0 - 7] */
111  "movq 8(%0), %%mm3\n" /* mm3 = pix1[8 - 15] */
112  "movq 8(%1), %%mm4\n" /* mm4 = pix2[8 - 15] */
113 
114  /* todo: mm1-mm2, mm3-mm4 */
115  /* algo: subtract mm1 from mm2 with saturation and vice versa */
116  /* OR the results to get absolute difference */
117  "movq %%mm1, %%mm5\n"
118  "movq %%mm3, %%mm6\n"
119  "psubusb %%mm2, %%mm1\n"
120  "psubusb %%mm4, %%mm3\n"
121  "psubusb %%mm5, %%mm2\n"
122  "psubusb %%mm6, %%mm4\n"
123 
124  "por %%mm1, %%mm2\n"
125  "por %%mm3, %%mm4\n"
126 
127  /* now convert to 16-bit vectors so we can square them */
128  "movq %%mm2, %%mm1\n"
129  "movq %%mm4, %%mm3\n"
130 
131  "punpckhbw %%mm0, %%mm2\n"
132  "punpckhbw %%mm0, %%mm4\n"
133  "punpcklbw %%mm0, %%mm1\n" /* mm1 now spread over (mm1, mm2) */
134  "punpcklbw %%mm0, %%mm3\n" /* mm4 now spread over (mm3, mm4) */
135 
136  "pmaddwd %%mm2, %%mm2\n"
137  "pmaddwd %%mm4, %%mm4\n"
138  "pmaddwd %%mm1, %%mm1\n"
139  "pmaddwd %%mm3, %%mm3\n"
140 
141  "add %3, %0\n"
142  "add %3, %1\n"
143 
144  "paddd %%mm2, %%mm1\n"
145  "paddd %%mm4, %%mm3\n"
146  "paddd %%mm1, %%mm7\n"
147  "paddd %%mm3, %%mm7\n"
148 
149  "decl %%ecx\n"
150  "jnz 1b\n"
151 
152  "movq %%mm7, %%mm1\n"
153  "psrlq $32, %%mm7\n" /* shift hi dword to lo */
154  "paddd %%mm7, %%mm1\n"
155  "movd %%mm1, %2\n"
156  : "+r" (pix1), "+r" (pix2), "=r" (tmp)
157  : "r" ((x86_reg) line_size), "m" (h)
158  : "%ecx");
159 
160  return tmp;
161 }
162 
163 static int hf_noise8_mmx(uint8_t *pix1, int line_size, int h)
164 {
165  int tmp;
166 
167  __asm__ volatile (
168  "movl %3, %%ecx\n"
169  "pxor %%mm7, %%mm7\n"
170  "pxor %%mm6, %%mm6\n"
171 
172  "movq (%0), %%mm0\n"
173  "movq %%mm0, %%mm1\n"
174  "psllq $8, %%mm0\n"
175  "psrlq $8, %%mm1\n"
176  "psrlq $8, %%mm0\n"
177  "movq %%mm0, %%mm2\n"
178  "movq %%mm1, %%mm3\n"
179  "punpcklbw %%mm7, %%mm0\n"
180  "punpcklbw %%mm7, %%mm1\n"
181  "punpckhbw %%mm7, %%mm2\n"
182  "punpckhbw %%mm7, %%mm3\n"
183  "psubw %%mm1, %%mm0\n"
184  "psubw %%mm3, %%mm2\n"
185 
186  "add %2, %0\n"
187 
188  "movq (%0), %%mm4\n"
189  "movq %%mm4, %%mm1\n"
190  "psllq $8, %%mm4\n"
191  "psrlq $8, %%mm1\n"
192  "psrlq $8, %%mm4\n"
193  "movq %%mm4, %%mm5\n"
194  "movq %%mm1, %%mm3\n"
195  "punpcklbw %%mm7, %%mm4\n"
196  "punpcklbw %%mm7, %%mm1\n"
197  "punpckhbw %%mm7, %%mm5\n"
198  "punpckhbw %%mm7, %%mm3\n"
199  "psubw %%mm1, %%mm4\n"
200  "psubw %%mm3, %%mm5\n"
201  "psubw %%mm4, %%mm0\n"
202  "psubw %%mm5, %%mm2\n"
203  "pxor %%mm3, %%mm3\n"
204  "pxor %%mm1, %%mm1\n"
205  "pcmpgtw %%mm0, %%mm3\n\t"
206  "pcmpgtw %%mm2, %%mm1\n\t"
207  "pxor %%mm3, %%mm0\n"
208  "pxor %%mm1, %%mm2\n"
209  "psubw %%mm3, %%mm0\n"
210  "psubw %%mm1, %%mm2\n"
211  "paddw %%mm0, %%mm2\n"
212  "paddw %%mm2, %%mm6\n"
213 
214  "add %2, %0\n"
215  "1:\n"
216 
217  "movq (%0), %%mm0\n"
218  "movq %%mm0, %%mm1\n"
219  "psllq $8, %%mm0\n"
220  "psrlq $8, %%mm1\n"
221  "psrlq $8, %%mm0\n"
222  "movq %%mm0, %%mm2\n"
223  "movq %%mm1, %%mm3\n"
224  "punpcklbw %%mm7, %%mm0\n"
225  "punpcklbw %%mm7, %%mm1\n"
226  "punpckhbw %%mm7, %%mm2\n"
227  "punpckhbw %%mm7, %%mm3\n"
228  "psubw %%mm1, %%mm0\n"
229  "psubw %%mm3, %%mm2\n"
230  "psubw %%mm0, %%mm4\n"
231  "psubw %%mm2, %%mm5\n"
232  "pxor %%mm3, %%mm3\n"
233  "pxor %%mm1, %%mm1\n"
234  "pcmpgtw %%mm4, %%mm3\n\t"
235  "pcmpgtw %%mm5, %%mm1\n\t"
236  "pxor %%mm3, %%mm4\n"
237  "pxor %%mm1, %%mm5\n"
238  "psubw %%mm3, %%mm4\n"
239  "psubw %%mm1, %%mm5\n"
240  "paddw %%mm4, %%mm5\n"
241  "paddw %%mm5, %%mm6\n"
242 
243  "add %2, %0\n"
244 
245  "movq (%0), %%mm4\n"
246  "movq %%mm4, %%mm1\n"
247  "psllq $8, %%mm4\n"
248  "psrlq $8, %%mm1\n"
249  "psrlq $8, %%mm4\n"
250  "movq %%mm4, %%mm5\n"
251  "movq %%mm1, %%mm3\n"
252  "punpcklbw %%mm7, %%mm4\n"
253  "punpcklbw %%mm7, %%mm1\n"
254  "punpckhbw %%mm7, %%mm5\n"
255  "punpckhbw %%mm7, %%mm3\n"
256  "psubw %%mm1, %%mm4\n"
257  "psubw %%mm3, %%mm5\n"
258  "psubw %%mm4, %%mm0\n"
259  "psubw %%mm5, %%mm2\n"
260  "pxor %%mm3, %%mm3\n"
261  "pxor %%mm1, %%mm1\n"
262  "pcmpgtw %%mm0, %%mm3\n\t"
263  "pcmpgtw %%mm2, %%mm1\n\t"
264  "pxor %%mm3, %%mm0\n"
265  "pxor %%mm1, %%mm2\n"
266  "psubw %%mm3, %%mm0\n"
267  "psubw %%mm1, %%mm2\n"
268  "paddw %%mm0, %%mm2\n"
269  "paddw %%mm2, %%mm6\n"
270 
271  "add %2, %0\n"
272  "subl $2, %%ecx\n"
273  " jnz 1b\n"
274 
275  "movq %%mm6, %%mm0\n"
276  "punpcklwd %%mm7, %%mm0\n"
277  "punpckhwd %%mm7, %%mm6\n"
278  "paddd %%mm0, %%mm6\n"
279 
280  "movq %%mm6, %%mm0\n"
281  "psrlq $32, %%mm6\n"
282  "paddd %%mm6, %%mm0\n"
283  "movd %%mm0, %1\n"
284  : "+r" (pix1), "=r" (tmp)
285  : "r" ((x86_reg) line_size), "g" (h - 2)
286  : "%ecx");
287 
288  return tmp;
289 }
290 
291 static int hf_noise16_mmx(uint8_t *pix1, int line_size, int h)
292 {
293  int tmp;
294  uint8_t *pix = pix1;
295 
296  __asm__ volatile (
297  "movl %3, %%ecx\n"
298  "pxor %%mm7, %%mm7\n"
299  "pxor %%mm6, %%mm6\n"
300 
301  "movq (%0), %%mm0\n"
302  "movq 1(%0), %%mm1\n"
303  "movq %%mm0, %%mm2\n"
304  "movq %%mm1, %%mm3\n"
305  "punpcklbw %%mm7, %%mm0\n"
306  "punpcklbw %%mm7, %%mm1\n"
307  "punpckhbw %%mm7, %%mm2\n"
308  "punpckhbw %%mm7, %%mm3\n"
309  "psubw %%mm1, %%mm0\n"
310  "psubw %%mm3, %%mm2\n"
311 
312  "add %2, %0\n"
313 
314  "movq (%0), %%mm4\n"
315  "movq 1(%0), %%mm1\n"
316  "movq %%mm4, %%mm5\n"
317  "movq %%mm1, %%mm3\n"
318  "punpcklbw %%mm7, %%mm4\n"
319  "punpcklbw %%mm7, %%mm1\n"
320  "punpckhbw %%mm7, %%mm5\n"
321  "punpckhbw %%mm7, %%mm3\n"
322  "psubw %%mm1, %%mm4\n"
323  "psubw %%mm3, %%mm5\n"
324  "psubw %%mm4, %%mm0\n"
325  "psubw %%mm5, %%mm2\n"
326  "pxor %%mm3, %%mm3\n"
327  "pxor %%mm1, %%mm1\n"
328  "pcmpgtw %%mm0, %%mm3\n\t"
329  "pcmpgtw %%mm2, %%mm1\n\t"
330  "pxor %%mm3, %%mm0\n"
331  "pxor %%mm1, %%mm2\n"
332  "psubw %%mm3, %%mm0\n"
333  "psubw %%mm1, %%mm2\n"
334  "paddw %%mm0, %%mm2\n"
335  "paddw %%mm2, %%mm6\n"
336 
337  "add %2, %0\n"
338  "1:\n"
339 
340  "movq (%0), %%mm0\n"
341  "movq 1(%0), %%mm1\n"
342  "movq %%mm0, %%mm2\n"
343  "movq %%mm1, %%mm3\n"
344  "punpcklbw %%mm7, %%mm0\n"
345  "punpcklbw %%mm7, %%mm1\n"
346  "punpckhbw %%mm7, %%mm2\n"
347  "punpckhbw %%mm7, %%mm3\n"
348  "psubw %%mm1, %%mm0\n"
349  "psubw %%mm3, %%mm2\n"
350  "psubw %%mm0, %%mm4\n"
351  "psubw %%mm2, %%mm5\n"
352  "pxor %%mm3, %%mm3\n"
353  "pxor %%mm1, %%mm1\n"
354  "pcmpgtw %%mm4, %%mm3\n\t"
355  "pcmpgtw %%mm5, %%mm1\n\t"
356  "pxor %%mm3, %%mm4\n"
357  "pxor %%mm1, %%mm5\n"
358  "psubw %%mm3, %%mm4\n"
359  "psubw %%mm1, %%mm5\n"
360  "paddw %%mm4, %%mm5\n"
361  "paddw %%mm5, %%mm6\n"
362 
363  "add %2, %0\n"
364 
365  "movq (%0), %%mm4\n"
366  "movq 1(%0), %%mm1\n"
367  "movq %%mm4, %%mm5\n"
368  "movq %%mm1, %%mm3\n"
369  "punpcklbw %%mm7, %%mm4\n"
370  "punpcklbw %%mm7, %%mm1\n"
371  "punpckhbw %%mm7, %%mm5\n"
372  "punpckhbw %%mm7, %%mm3\n"
373  "psubw %%mm1, %%mm4\n"
374  "psubw %%mm3, %%mm5\n"
375  "psubw %%mm4, %%mm0\n"
376  "psubw %%mm5, %%mm2\n"
377  "pxor %%mm3, %%mm3\n"
378  "pxor %%mm1, %%mm1\n"
379  "pcmpgtw %%mm0, %%mm3\n\t"
380  "pcmpgtw %%mm2, %%mm1\n\t"
381  "pxor %%mm3, %%mm0\n"
382  "pxor %%mm1, %%mm2\n"
383  "psubw %%mm3, %%mm0\n"
384  "psubw %%mm1, %%mm2\n"
385  "paddw %%mm0, %%mm2\n"
386  "paddw %%mm2, %%mm6\n"
387 
388  "add %2, %0\n"
389  "subl $2, %%ecx\n"
390  " jnz 1b\n"
391 
392  "movq %%mm6, %%mm0\n"
393  "punpcklwd %%mm7, %%mm0\n"
394  "punpckhwd %%mm7, %%mm6\n"
395  "paddd %%mm0, %%mm6\n"
396 
397  "movq %%mm6, %%mm0\n"
398  "psrlq $32, %%mm6\n"
399  "paddd %%mm6, %%mm0\n"
400  "movd %%mm0, %1\n"
401  : "+r" (pix1), "=r" (tmp)
402  : "r" ((x86_reg) line_size), "g" (h - 2)
403  : "%ecx");
404 
405  return tmp + hf_noise8_mmx(pix + 8, line_size, h);
406 }
407 
408 static int nsse16_mmx(MpegEncContext *c, uint8_t *pix1, uint8_t *pix2,
409  int line_size, int h)
410 {
411  int score1, score2;
412 
413  if (c)
414  score1 = c->mecc.sse[0](c, pix1, pix2, line_size, h);
415  else
416  score1 = sse16_mmx(c, pix1, pix2, line_size, h);
417  score2 = hf_noise16_mmx(pix1, line_size, h) -
418  hf_noise16_mmx(pix2, line_size, h);
419 
420  if (c)
421  return score1 + FFABS(score2) * c->avctx->nsse_weight;
422  else
423  return score1 + FFABS(score2) * 8;
424 }
425 
426 static int nsse8_mmx(MpegEncContext *c, uint8_t *pix1, uint8_t *pix2,
427  int line_size, int h)
428 {
429  int score1 = sse8_mmx(c, pix1, pix2, line_size, h);
430  int score2 = hf_noise8_mmx(pix1, line_size, h) -
431  hf_noise8_mmx(pix2, line_size, h);
432 
433  if (c)
434  return score1 + FFABS(score2) * c->avctx->nsse_weight;
435  else
436  return score1 + FFABS(score2) * 8;
437 }
438 
439 static int vsad_intra16_mmx(MpegEncContext *v, uint8_t *pix, uint8_t *dummy,
440  int line_size, int h)
441 {
442  int tmp;
443 
444  assert((((int) pix) & 7) == 0);
445  assert((line_size & 7) == 0);
446 
447 #define SUM(in0, in1, out0, out1) \
448  "movq (%0), %%mm2\n" \
449  "movq 8(%0), %%mm3\n" \
450  "add %2,%0\n" \
451  "movq %%mm2, " #out0 "\n" \
452  "movq %%mm3, " #out1 "\n" \
453  "psubusb " #in0 ", %%mm2\n" \
454  "psubusb " #in1 ", %%mm3\n" \
455  "psubusb " #out0 ", " #in0 "\n" \
456  "psubusb " #out1 ", " #in1 "\n" \
457  "por %%mm2, " #in0 "\n" \
458  "por %%mm3, " #in1 "\n" \
459  "movq " #in0 ", %%mm2\n" \
460  "movq " #in1 ", %%mm3\n" \
461  "punpcklbw %%mm7, " #in0 "\n" \
462  "punpcklbw %%mm7, " #in1 "\n" \
463  "punpckhbw %%mm7, %%mm2\n" \
464  "punpckhbw %%mm7, %%mm3\n" \
465  "paddw " #in1 ", " #in0 "\n" \
466  "paddw %%mm3, %%mm2\n" \
467  "paddw %%mm2, " #in0 "\n" \
468  "paddw " #in0 ", %%mm6\n"
469 
470 
471  __asm__ volatile (
472  "movl %3, %%ecx\n"
473  "pxor %%mm6, %%mm6\n"
474  "pxor %%mm7, %%mm7\n"
475  "movq (%0), %%mm0\n"
476  "movq 8(%0), %%mm1\n"
477  "add %2, %0\n"
478  "jmp 2f\n"
479  "1:\n"
480 
481  SUM(%%mm4, %%mm5, %%mm0, %%mm1)
482  "2:\n"
483  SUM(%%mm0, %%mm1, %%mm4, %%mm5)
484 
485  "subl $2, %%ecx\n"
486  "jnz 1b\n"
487 
488  "movq %%mm6, %%mm0\n"
489  "psrlq $32, %%mm6\n"
490  "paddw %%mm6, %%mm0\n"
491  "movq %%mm0, %%mm6\n"
492  "psrlq $16, %%mm0\n"
493  "paddw %%mm6, %%mm0\n"
494  "movd %%mm0, %1\n"
495  : "+r" (pix), "=r" (tmp)
496  : "r" ((x86_reg) line_size), "m" (h)
497  : "%ecx");
498 
499  return tmp & 0xFFFF;
500 }
501 #undef SUM
502 
503 static int vsad_intra16_mmxext(MpegEncContext *v, uint8_t *pix, uint8_t *dummy,
504  int line_size, int h)
505 {
506  int tmp;
507 
508  assert((((int) pix) & 7) == 0);
509  assert((line_size & 7) == 0);
510 
511 #define SUM(in0, in1, out0, out1) \
512  "movq (%0), " #out0 "\n" \
513  "movq 8(%0), " #out1 "\n" \
514  "add %2, %0\n" \
515  "psadbw " #out0 ", " #in0 "\n" \
516  "psadbw " #out1 ", " #in1 "\n" \
517  "paddw " #in1 ", " #in0 "\n" \
518  "paddw " #in0 ", %%mm6\n"
519 
520  __asm__ volatile (
521  "movl %3, %%ecx\n"
522  "pxor %%mm6, %%mm6\n"
523  "pxor %%mm7, %%mm7\n"
524  "movq (%0), %%mm0\n"
525  "movq 8(%0), %%mm1\n"
526  "add %2, %0\n"
527  "jmp 2f\n"
528  "1:\n"
529 
530  SUM(%%mm4, %%mm5, %%mm0, %%mm1)
531  "2:\n"
532  SUM(%%mm0, %%mm1, %%mm4, %%mm5)
533 
534  "subl $2, %%ecx\n"
535  "jnz 1b\n"
536 
537  "movd %%mm6, %1\n"
538  : "+r" (pix), "=r" (tmp)
539  : "r" ((x86_reg) line_size), "m" (h)
540  : "%ecx");
541 
542  return tmp;
543 }
544 #undef SUM
545 
546 static int vsad16_mmx(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
547  int line_size, int h)
548 {
549  int tmp;
550 
551  assert((((int) pix1) & 7) == 0);
552  assert((((int) pix2) & 7) == 0);
553  assert((line_size & 7) == 0);
554 
555 #define SUM(in0, in1, out0, out1) \
556  "movq (%0), %%mm2\n" \
557  "movq (%1), " #out0 "\n" \
558  "movq 8(%0), %%mm3\n" \
559  "movq 8(%1), " #out1 "\n" \
560  "add %3, %0\n" \
561  "add %3, %1\n" \
562  "psubb " #out0 ", %%mm2\n" \
563  "psubb " #out1 ", %%mm3\n" \
564  "pxor %%mm7, %%mm2\n" \
565  "pxor %%mm7, %%mm3\n" \
566  "movq %%mm2, " #out0 "\n" \
567  "movq %%mm3, " #out1 "\n" \
568  "psubusb " #in0 ", %%mm2\n" \
569  "psubusb " #in1 ", %%mm3\n" \
570  "psubusb " #out0 ", " #in0 "\n" \
571  "psubusb " #out1 ", " #in1 "\n" \
572  "por %%mm2, " #in0 "\n" \
573  "por %%mm3, " #in1 "\n" \
574  "movq " #in0 ", %%mm2\n" \
575  "movq " #in1 ", %%mm3\n" \
576  "punpcklbw %%mm7, " #in0 "\n" \
577  "punpcklbw %%mm7, " #in1 "\n" \
578  "punpckhbw %%mm7, %%mm2\n" \
579  "punpckhbw %%mm7, %%mm3\n" \
580  "paddw " #in1 ", " #in0 "\n" \
581  "paddw %%mm3, %%mm2\n" \
582  "paddw %%mm2, " #in0 "\n" \
583  "paddw " #in0 ", %%mm6\n"
584 
585 
586  __asm__ volatile (
587  "movl %4, %%ecx\n"
588  "pxor %%mm6, %%mm6\n"
589  "pcmpeqw %%mm7, %%mm7\n"
590  "psllw $15, %%mm7\n"
591  "packsswb %%mm7, %%mm7\n"
592  "movq (%0), %%mm0\n"
593  "movq (%1), %%mm2\n"
594  "movq 8(%0), %%mm1\n"
595  "movq 8(%1), %%mm3\n"
596  "add %3, %0\n"
597  "add %3, %1\n"
598  "psubb %%mm2, %%mm0\n"
599  "psubb %%mm3, %%mm1\n"
600  "pxor %%mm7, %%mm0\n"
601  "pxor %%mm7, %%mm1\n"
602  "jmp 2f\n"
603  "1:\n"
604 
605  SUM(%%mm4, %%mm5, %%mm0, %%mm1)
606  "2:\n"
607  SUM(%%mm0, %%mm1, %%mm4, %%mm5)
608 
609  "subl $2, %%ecx\n"
610  "jnz 1b\n"
611 
612  "movq %%mm6, %%mm0\n"
613  "psrlq $32, %%mm6\n"
614  "paddw %%mm6, %%mm0\n"
615  "movq %%mm0, %%mm6\n"
616  "psrlq $16, %%mm0\n"
617  "paddw %%mm6, %%mm0\n"
618  "movd %%mm0, %2\n"
619  : "+r" (pix1), "+r" (pix2), "=r" (tmp)
620  : "r" ((x86_reg) line_size), "m" (h)
621  : "%ecx");
622 
623  return tmp & 0x7FFF;
624 }
625 #undef SUM
626 
627 static int vsad16_mmxext(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
628  int line_size, int h)
629 {
630  int tmp;
631 
632  assert((((int) pix1) & 7) == 0);
633  assert((((int) pix2) & 7) == 0);
634  assert((line_size & 7) == 0);
635 
636 #define SUM(in0, in1, out0, out1) \
637  "movq (%0), " #out0 "\n" \
638  "movq (%1), %%mm2\n" \
639  "movq 8(%0), " #out1 "\n" \
640  "movq 8(%1), %%mm3\n" \
641  "add %3, %0\n" \
642  "add %3, %1\n" \
643  "psubb %%mm2, " #out0 "\n" \
644  "psubb %%mm3, " #out1 "\n" \
645  "pxor %%mm7, " #out0 "\n" \
646  "pxor %%mm7, " #out1 "\n" \
647  "psadbw " #out0 ", " #in0 "\n" \
648  "psadbw " #out1 ", " #in1 "\n" \
649  "paddw " #in1 ", " #in0 "\n" \
650  "paddw " #in0 ", %%mm6\n "
651 
652  __asm__ volatile (
653  "movl %4, %%ecx\n"
654  "pxor %%mm6, %%mm6\n"
655  "pcmpeqw %%mm7, %%mm7\n"
656  "psllw $15, %%mm7\n"
657  "packsswb %%mm7, %%mm7\n"
658  "movq (%0), %%mm0\n"
659  "movq (%1), %%mm2\n"
660  "movq 8(%0), %%mm1\n"
661  "movq 8(%1), %%mm3\n"
662  "add %3, %0\n"
663  "add %3, %1\n"
664  "psubb %%mm2, %%mm0\n"
665  "psubb %%mm3, %%mm1\n"
666  "pxor %%mm7, %%mm0\n"
667  "pxor %%mm7, %%mm1\n"
668  "jmp 2f\n"
669  "1:\n"
670 
671  SUM(%%mm4, %%mm5, %%mm0, %%mm1)
672  "2:\n"
673  SUM(%%mm0, %%mm1, %%mm4, %%mm5)
674 
675  "subl $2, %%ecx\n"
676  "jnz 1b\n"
677 
678  "movd %%mm6, %2\n"
679  : "+r" (pix1), "+r" (pix2), "=r" (tmp)
680  : "r" ((x86_reg) line_size), "m" (h)
681  : "%ecx");
682 
683  return tmp;
684 }
685 #undef SUM
686 
687 #define MMABS_MMX(a,z) \
688  "pxor " #z ", " #z " \n\t" \
689  "pcmpgtw " #a ", " #z " \n\t" \
690  "pxor " #z ", " #a " \n\t" \
691  "psubw " #z ", " #a " \n\t"
692 
693 #define MMABS_MMXEXT(a, z) \
694  "pxor " #z ", " #z " \n\t" \
695  "psubw " #a ", " #z " \n\t" \
696  "pmaxsw " #z ", " #a " \n\t"
697 
698 #define MMABS_SSSE3(a,z) \
699  "pabsw " #a ", " #a " \n\t"
700 
701 #define MMABS_SUM(a,z, sum) \
702  MMABS(a,z) \
703  "paddusw " #a ", " #sum " \n\t"
704 
705 /* FIXME: HSUM_* saturates at 64k, while an 8x8 hadamard or dct block can get
706  * up to about 100k on extreme inputs. But that's very unlikely to occur in
707  * natural video, and it's even more unlikely to not have any alternative
708  * mvs/modes with lower cost. */
709 #define HSUM_MMX(a, t, dst) \
710  "movq " #a ", " #t " \n\t" \
711  "psrlq $32, " #a " \n\t" \
712  "paddusw " #t ", " #a " \n\t" \
713  "movq " #a ", " #t " \n\t" \
714  "psrlq $16, " #a " \n\t" \
715  "paddusw " #t ", " #a " \n\t" \
716  "movd " #a ", " #dst " \n\t" \
717 
718 #define HSUM_MMXEXT(a, t, dst) \
719  "pshufw $0x0E, " #a ", " #t " \n\t" \
720  "paddusw " #t ", " #a " \n\t" \
721  "pshufw $0x01, " #a ", " #t " \n\t" \
722  "paddusw " #t ", " #a " \n\t" \
723  "movd " #a ", " #dst " \n\t" \
724 
725 #define HSUM_SSE2(a, t, dst) \
726  "movhlps " #a ", " #t " \n\t" \
727  "paddusw " #t ", " #a " \n\t" \
728  "pshuflw $0x0E, " #a ", " #t " \n\t" \
729  "paddusw " #t ", " #a " \n\t" \
730  "pshuflw $0x01, " #a ", " #t " \n\t" \
731  "paddusw " #t ", " #a " \n\t" \
732  "movd " #a ", " #dst " \n\t" \
733 
734 #define DCT_SAD4(m, mm, o) \
735  "mov"#m" "#o" + 0(%1), " #mm "2 \n\t" \
736  "mov"#m" "#o" + 16(%1), " #mm "3 \n\t" \
737  "mov"#m" "#o" + 32(%1), " #mm "4 \n\t" \
738  "mov"#m" "#o" + 48(%1), " #mm "5 \n\t" \
739  MMABS_SUM(mm ## 2, mm ## 6, mm ## 0) \
740  MMABS_SUM(mm ## 3, mm ## 7, mm ## 1) \
741  MMABS_SUM(mm ## 4, mm ## 6, mm ## 0) \
742  MMABS_SUM(mm ## 5, mm ## 7, mm ## 1) \
743 
744 #define DCT_SAD_MMX \
745  "pxor %%mm0, %%mm0 \n\t" \
746  "pxor %%mm1, %%mm1 \n\t" \
747  DCT_SAD4(q, %%mm, 0) \
748  DCT_SAD4(q, %%mm, 8) \
749  DCT_SAD4(q, %%mm, 64) \
750  DCT_SAD4(q, %%mm, 72) \
751  "paddusw %%mm1, %%mm0 \n\t" \
752  HSUM(%%mm0, %%mm1, %0)
753 
754 #define DCT_SAD_SSE2 \
755  "pxor %%xmm0, %%xmm0 \n\t" \
756  "pxor %%xmm1, %%xmm1 \n\t" \
757  DCT_SAD4(dqa, %%xmm, 0) \
758  DCT_SAD4(dqa, %%xmm, 64) \
759  "paddusw %%xmm1, %%xmm0 \n\t" \
760  HSUM(%%xmm0, %%xmm1, %0)
761 
762 #define DCT_SAD_FUNC(cpu) \
763 static int sum_abs_dctelem_ ## cpu(int16_t *block) \
764 { \
765  int sum; \
766  __asm__ volatile ( \
767  DCT_SAD \
768  :"=r"(sum) \
769  :"r"(block)); \
770  return sum & 0xFFFF; \
771 }
772 
773 #define DCT_SAD DCT_SAD_MMX
774 #define HSUM(a, t, dst) HSUM_MMX(a, t, dst)
775 #define MMABS(a, z) MMABS_MMX(a, z)
776 DCT_SAD_FUNC(mmx)
777 #undef MMABS
778 #undef HSUM
779 
780 #define HSUM(a, t, dst) HSUM_MMXEXT(a, t, dst)
781 #define MMABS(a, z) MMABS_MMXEXT(a, z)
782 DCT_SAD_FUNC(mmxext)
783 #undef HSUM
784 #undef DCT_SAD
785 
786 #define DCT_SAD DCT_SAD_SSE2
787 #define HSUM(a, t, dst) HSUM_SSE2(a, t, dst)
788 DCT_SAD_FUNC(sse2)
789 #undef MMABS
790 
791 #if HAVE_SSSE3_INLINE
792 #define MMABS(a, z) MMABS_SSSE3(a, z)
793 DCT_SAD_FUNC(ssse3)
794 #undef MMABS
795 #endif
796 #undef HSUM
797 #undef DCT_SAD
798 
799 
800 DECLARE_ASM_CONST(8, uint64_t, round_tab)[3] = {
801  0x0000000000000000ULL,
802  0x0001000100010001ULL,
803  0x0002000200020002ULL,
804 };
805 
806 DECLARE_ASM_CONST(8, uint64_t, bone) = 0x0101010101010101LL;
807 
808 static inline void sad8_1_mmx(uint8_t *blk1, uint8_t *blk2, int stride, int h)
809 {
810  x86_reg len = -(stride * h);
811  __asm__ volatile (
812  ".p2align 4 \n\t"
813  "1: \n\t"
814  "movq (%1, %%"REG_a"), %%mm0 \n\t"
815  "movq (%2, %%"REG_a"), %%mm2 \n\t"
816  "movq (%2, %%"REG_a"), %%mm4 \n\t"
817  "add %3, %%"REG_a" \n\t"
818  "psubusb %%mm0, %%mm2 \n\t"
819  "psubusb %%mm4, %%mm0 \n\t"
820  "movq (%1, %%"REG_a"), %%mm1 \n\t"
821  "movq (%2, %%"REG_a"), %%mm3 \n\t"
822  "movq (%2, %%"REG_a"), %%mm5 \n\t"
823  "psubusb %%mm1, %%mm3 \n\t"
824  "psubusb %%mm5, %%mm1 \n\t"
825  "por %%mm2, %%mm0 \n\t"
826  "por %%mm1, %%mm3 \n\t"
827  "movq %%mm0, %%mm1 \n\t"
828  "movq %%mm3, %%mm2 \n\t"
829  "punpcklbw %%mm7, %%mm0 \n\t"
830  "punpckhbw %%mm7, %%mm1 \n\t"
831  "punpcklbw %%mm7, %%mm3 \n\t"
832  "punpckhbw %%mm7, %%mm2 \n\t"
833  "paddw %%mm1, %%mm0 \n\t"
834  "paddw %%mm3, %%mm2 \n\t"
835  "paddw %%mm2, %%mm0 \n\t"
836  "paddw %%mm0, %%mm6 \n\t"
837  "add %3, %%"REG_a" \n\t"
838  " js 1b \n\t"
839  : "+a" (len)
840  : "r" (blk1 - len), "r" (blk2 - len), "r" ((x86_reg) stride));
841 }
842 
843 static inline void sad8_1_mmxext(uint8_t *blk1, uint8_t *blk2,
844  int stride, int h)
845 {
846  __asm__ volatile (
847  ".p2align 4 \n\t"
848  "1: \n\t"
849  "movq (%1), %%mm0 \n\t"
850  "movq (%1, %3), %%mm1 \n\t"
851  "psadbw (%2), %%mm0 \n\t"
852  "psadbw (%2, %3), %%mm1 \n\t"
853  "paddw %%mm0, %%mm6 \n\t"
854  "paddw %%mm1, %%mm6 \n\t"
855  "lea (%1,%3,2), %1 \n\t"
856  "lea (%2,%3,2), %2 \n\t"
857  "sub $2, %0 \n\t"
858  " jg 1b \n\t"
859  : "+r" (h), "+r" (blk1), "+r" (blk2)
860  : "r" ((x86_reg) stride));
861 }
862 
863 static int sad16_sse2(MpegEncContext *v, uint8_t *blk2, uint8_t *blk1,
864  int stride, int h)
865 {
866  int ret;
867  __asm__ volatile (
868  "pxor %%xmm2, %%xmm2 \n\t"
869  ".p2align 4 \n\t"
870  "1: \n\t"
871  "movdqu (%1), %%xmm0 \n\t"
872  "movdqu (%1, %4), %%xmm1 \n\t"
873  "psadbw (%2), %%xmm0 \n\t"
874  "psadbw (%2, %4), %%xmm1 \n\t"
875  "paddw %%xmm0, %%xmm2 \n\t"
876  "paddw %%xmm1, %%xmm2 \n\t"
877  "lea (%1,%4,2), %1 \n\t"
878  "lea (%2,%4,2), %2 \n\t"
879  "sub $2, %0 \n\t"
880  " jg 1b \n\t"
881  "movhlps %%xmm2, %%xmm0 \n\t"
882  "paddw %%xmm0, %%xmm2 \n\t"
883  "movd %%xmm2, %3 \n\t"
884  : "+r" (h), "+r" (blk1), "+r" (blk2), "=r" (ret)
885  : "r" ((x86_reg) stride));
886  return ret;
887 }
888 
889 static inline void sad8_x2a_mmxext(uint8_t *blk1, uint8_t *blk2,
890  int stride, int h)
891 {
892  __asm__ volatile (
893  ".p2align 4 \n\t"
894  "1: \n\t"
895  "movq (%1), %%mm0 \n\t"
896  "movq (%1, %3), %%mm1 \n\t"
897  "pavgb 1(%1), %%mm0 \n\t"
898  "pavgb 1(%1, %3), %%mm1 \n\t"
899  "psadbw (%2), %%mm0 \n\t"
900  "psadbw (%2, %3), %%mm1 \n\t"
901  "paddw %%mm0, %%mm6 \n\t"
902  "paddw %%mm1, %%mm6 \n\t"
903  "lea (%1,%3,2), %1 \n\t"
904  "lea (%2,%3,2), %2 \n\t"
905  "sub $2, %0 \n\t"
906  " jg 1b \n\t"
907  : "+r" (h), "+r" (blk1), "+r" (blk2)
908  : "r" ((x86_reg) stride));
909 }
910 
911 static inline void sad8_y2a_mmxext(uint8_t *blk1, uint8_t *blk2,
912  int stride, int h)
913 {
914  __asm__ volatile (
915  "movq (%1), %%mm0 \n\t"
916  "add %3, %1 \n\t"
917  ".p2align 4 \n\t"
918  "1: \n\t"
919  "movq (%1), %%mm1 \n\t"
920  "movq (%1, %3), %%mm2 \n\t"
921  "pavgb %%mm1, %%mm0 \n\t"
922  "pavgb %%mm2, %%mm1 \n\t"
923  "psadbw (%2), %%mm0 \n\t"
924  "psadbw (%2, %3), %%mm1 \n\t"
925  "paddw %%mm0, %%mm6 \n\t"
926  "paddw %%mm1, %%mm6 \n\t"
927  "movq %%mm2, %%mm0 \n\t"
928  "lea (%1,%3,2), %1 \n\t"
929  "lea (%2,%3,2), %2 \n\t"
930  "sub $2, %0 \n\t"
931  " jg 1b \n\t"
932  : "+r" (h), "+r" (blk1), "+r" (blk2)
933  : "r" ((x86_reg) stride));
934 }
935 
936 static inline void sad8_4_mmxext(uint8_t *blk1, uint8_t *blk2,
937  int stride, int h)
938 {
939  __asm__ volatile (
940  "movq "MANGLE(bone)", %%mm5 \n\t"
941  "movq (%1), %%mm0 \n\t"
942  "pavgb 1(%1), %%mm0 \n\t"
943  "add %3, %1 \n\t"
944  ".p2align 4 \n\t"
945  "1: \n\t"
946  "movq (%1), %%mm1 \n\t"
947  "movq (%1,%3), %%mm2 \n\t"
948  "pavgb 1(%1), %%mm1 \n\t"
949  "pavgb 1(%1,%3), %%mm2 \n\t"
950  "psubusb %%mm5, %%mm1 \n\t"
951  "pavgb %%mm1, %%mm0 \n\t"
952  "pavgb %%mm2, %%mm1 \n\t"
953  "psadbw (%2), %%mm0 \n\t"
954  "psadbw (%2,%3), %%mm1 \n\t"
955  "paddw %%mm0, %%mm6 \n\t"
956  "paddw %%mm1, %%mm6 \n\t"
957  "movq %%mm2, %%mm0 \n\t"
958  "lea (%1,%3,2), %1 \n\t"
959  "lea (%2,%3,2), %2 \n\t"
960  "sub $2, %0 \n\t"
961  " jg 1b \n\t"
962  : "+r" (h), "+r" (blk1), "+r" (blk2)
963  : "r" ((x86_reg) stride));
964 }
965 
966 static inline void sad8_2_mmx(uint8_t *blk1a, uint8_t *blk1b, uint8_t *blk2,
967  int stride, int h)
968 {
969  x86_reg len = -(stride * h);
970  __asm__ volatile (
971  ".p2align 4 \n\t"
972  "1: \n\t"
973  "movq (%1, %%"REG_a"), %%mm0 \n\t"
974  "movq (%2, %%"REG_a"), %%mm1 \n\t"
975  "movq (%1, %%"REG_a"), %%mm2 \n\t"
976  "movq (%2, %%"REG_a"), %%mm3 \n\t"
977  "punpcklbw %%mm7, %%mm0 \n\t"
978  "punpcklbw %%mm7, %%mm1 \n\t"
979  "punpckhbw %%mm7, %%mm2 \n\t"
980  "punpckhbw %%mm7, %%mm3 \n\t"
981  "paddw %%mm0, %%mm1 \n\t"
982  "paddw %%mm2, %%mm3 \n\t"
983  "movq (%3, %%"REG_a"), %%mm4 \n\t"
984  "movq (%3, %%"REG_a"), %%mm2 \n\t"
985  "paddw %%mm5, %%mm1 \n\t"
986  "paddw %%mm5, %%mm3 \n\t"
987  "psrlw $1, %%mm1 \n\t"
988  "psrlw $1, %%mm3 \n\t"
989  "packuswb %%mm3, %%mm1 \n\t"
990  "psubusb %%mm1, %%mm4 \n\t"
991  "psubusb %%mm2, %%mm1 \n\t"
992  "por %%mm4, %%mm1 \n\t"
993  "movq %%mm1, %%mm0 \n\t"
994  "punpcklbw %%mm7, %%mm0 \n\t"
995  "punpckhbw %%mm7, %%mm1 \n\t"
996  "paddw %%mm1, %%mm0 \n\t"
997  "paddw %%mm0, %%mm6 \n\t"
998  "add %4, %%"REG_a" \n\t"
999  " js 1b \n\t"
1000  : "+a" (len)
1001  : "r" (blk1a - len), "r" (blk1b - len), "r" (blk2 - len),
1002  "r" ((x86_reg) stride));
1003 }
1004 
1005 static inline void sad8_4_mmx(uint8_t *blk1, uint8_t *blk2, int stride, int h)
1006 {
1007  x86_reg len = -(stride * h);
1008  __asm__ volatile (
1009  "movq (%1, %%"REG_a"), %%mm0 \n\t"
1010  "movq 1(%1, %%"REG_a"), %%mm2 \n\t"
1011  "movq %%mm0, %%mm1 \n\t"
1012  "movq %%mm2, %%mm3 \n\t"
1013  "punpcklbw %%mm7, %%mm0 \n\t"
1014  "punpckhbw %%mm7, %%mm1 \n\t"
1015  "punpcklbw %%mm7, %%mm2 \n\t"
1016  "punpckhbw %%mm7, %%mm3 \n\t"
1017  "paddw %%mm2, %%mm0 \n\t"
1018  "paddw %%mm3, %%mm1 \n\t"
1019  ".p2align 4 \n\t"
1020  "1: \n\t"
1021  "movq (%2, %%"REG_a"), %%mm2 \n\t"
1022  "movq 1(%2, %%"REG_a"), %%mm4 \n\t"
1023  "movq %%mm2, %%mm3 \n\t"
1024  "movq %%mm4, %%mm5 \n\t"
1025  "punpcklbw %%mm7, %%mm2 \n\t"
1026  "punpckhbw %%mm7, %%mm3 \n\t"
1027  "punpcklbw %%mm7, %%mm4 \n\t"
1028  "punpckhbw %%mm7, %%mm5 \n\t"
1029  "paddw %%mm4, %%mm2 \n\t"
1030  "paddw %%mm5, %%mm3 \n\t"
1031  "movq 16+"MANGLE(round_tab)", %%mm5 \n\t"
1032  "paddw %%mm2, %%mm0 \n\t"
1033  "paddw %%mm3, %%mm1 \n\t"
1034  "paddw %%mm5, %%mm0 \n\t"
1035  "paddw %%mm5, %%mm1 \n\t"
1036  "movq (%3, %%"REG_a"), %%mm4 \n\t"
1037  "movq (%3, %%"REG_a"), %%mm5 \n\t"
1038  "psrlw $2, %%mm0 \n\t"
1039  "psrlw $2, %%mm1 \n\t"
1040  "packuswb %%mm1, %%mm0 \n\t"
1041  "psubusb %%mm0, %%mm4 \n\t"
1042  "psubusb %%mm5, %%mm0 \n\t"
1043  "por %%mm4, %%mm0 \n\t"
1044  "movq %%mm0, %%mm4 \n\t"
1045  "punpcklbw %%mm7, %%mm0 \n\t"
1046  "punpckhbw %%mm7, %%mm4 \n\t"
1047  "paddw %%mm0, %%mm6 \n\t"
1048  "paddw %%mm4, %%mm6 \n\t"
1049  "movq %%mm2, %%mm0 \n\t"
1050  "movq %%mm3, %%mm1 \n\t"
1051  "add %4, %%"REG_a" \n\t"
1052  " js 1b \n\t"
1053  : "+a" (len)
1054  : "r" (blk1 - len), "r" (blk1 - len + stride), "r" (blk2 - len),
1055  "r" ((x86_reg) stride));
1056 }
1057 
1058 static inline int sum_mmx(void)
1059 {
1060  int ret;
1061  __asm__ volatile (
1062  "movq %%mm6, %%mm0 \n\t"
1063  "psrlq $32, %%mm6 \n\t"
1064  "paddw %%mm0, %%mm6 \n\t"
1065  "movq %%mm6, %%mm0 \n\t"
1066  "psrlq $16, %%mm6 \n\t"
1067  "paddw %%mm0, %%mm6 \n\t"
1068  "movd %%mm6, %0 \n\t"
1069  : "=r" (ret));
1070  return ret & 0xFFFF;
1071 }
1072 
1073 static inline int sum_mmxext(void)
1074 {
1075  int ret;
1076  __asm__ volatile (
1077  "movd %%mm6, %0 \n\t"
1078  : "=r" (ret));
1079  return ret;
1080 }
1081 
1082 static inline void sad8_x2a_mmx(uint8_t *blk1, uint8_t *blk2, int stride, int h)
1083 {
1084  sad8_2_mmx(blk1, blk1 + 1, blk2, stride, h);
1085 }
1086 
1087 static inline void sad8_y2a_mmx(uint8_t *blk1, uint8_t *blk2, int stride, int h)
1088 {
1089  sad8_2_mmx(blk1, blk1 + stride, blk2, stride, h);
1090 }
1091 
1092 #define PIX_SAD(suf) \
1093 static int sad8_ ## suf(MpegEncContext *v, uint8_t *blk2, \
1094  uint8_t *blk1, int stride, int h) \
1095 { \
1096  assert(h == 8); \
1097  __asm__ volatile ( \
1098  "pxor %%mm7, %%mm7 \n\t" \
1099  "pxor %%mm6, %%mm6 \n\t" \
1100  :); \
1101  \
1102  sad8_1_ ## suf(blk1, blk2, stride, 8); \
1103  \
1104  return sum_ ## suf(); \
1105 } \
1106  \
1107 static int sad8_x2_ ## suf(MpegEncContext *v, uint8_t *blk2, \
1108  uint8_t *blk1, int stride, int h) \
1109 { \
1110  assert(h == 8); \
1111  __asm__ volatile ( \
1112  "pxor %%mm7, %%mm7 \n\t" \
1113  "pxor %%mm6, %%mm6 \n\t" \
1114  "movq %0, %%mm5 \n\t" \
1115  :: "m" (round_tab[1])); \
1116  \
1117  sad8_x2a_ ## suf(blk1, blk2, stride, 8); \
1118  \
1119  return sum_ ## suf(); \
1120 } \
1121  \
1122 static int sad8_y2_ ## suf(MpegEncContext *v, uint8_t *blk2, \
1123  uint8_t *blk1, int stride, int h) \
1124 { \
1125  assert(h == 8); \
1126  __asm__ volatile ( \
1127  "pxor %%mm7, %%mm7 \n\t" \
1128  "pxor %%mm6, %%mm6 \n\t" \
1129  "movq %0, %%mm5 \n\t" \
1130  :: "m" (round_tab[1])); \
1131  \
1132  sad8_y2a_ ## suf(blk1, blk2, stride, 8); \
1133  \
1134  return sum_ ## suf(); \
1135 } \
1136  \
1137 static int sad8_xy2_ ## suf(MpegEncContext *v, uint8_t *blk2, \
1138  uint8_t *blk1, int stride, int h) \
1139 { \
1140  assert(h == 8); \
1141  __asm__ volatile ( \
1142  "pxor %%mm7, %%mm7 \n\t" \
1143  "pxor %%mm6, %%mm6 \n\t" \
1144  ::); \
1145  \
1146  sad8_4_ ## suf(blk1, blk2, stride, 8); \
1147  \
1148  return sum_ ## suf(); \
1149 } \
1150  \
1151 static int sad16_ ## suf(MpegEncContext *v, uint8_t *blk2, \
1152  uint8_t *blk1, int stride, int h) \
1153 { \
1154  __asm__ volatile ( \
1155  "pxor %%mm7, %%mm7 \n\t" \
1156  "pxor %%mm6, %%mm6 \n\t" \
1157  :); \
1158  \
1159  sad8_1_ ## suf(blk1, blk2, stride, h); \
1160  sad8_1_ ## suf(blk1 + 8, blk2 + 8, stride, h); \
1161  \
1162  return sum_ ## suf(); \
1163 } \
1164  \
1165 static int sad16_x2_ ## suf(MpegEncContext *v, uint8_t *blk2, \
1166  uint8_t *blk1, int stride, int h) \
1167 { \
1168  __asm__ volatile ( \
1169  "pxor %%mm7, %%mm7 \n\t" \
1170  "pxor %%mm6, %%mm6 \n\t" \
1171  "movq %0, %%mm5 \n\t" \
1172  :: "m" (round_tab[1])); \
1173  \
1174  sad8_x2a_ ## suf(blk1, blk2, stride, h); \
1175  sad8_x2a_ ## suf(blk1 + 8, blk2 + 8, stride, h); \
1176  \
1177  return sum_ ## suf(); \
1178 } \
1179  \
1180 static int sad16_y2_ ## suf(MpegEncContext *v, uint8_t *blk2, \
1181  uint8_t *blk1, int stride, int h) \
1182 { \
1183  __asm__ volatile ( \
1184  "pxor %%mm7, %%mm7 \n\t" \
1185  "pxor %%mm6, %%mm6 \n\t" \
1186  "movq %0, %%mm5 \n\t" \
1187  :: "m" (round_tab[1])); \
1188  \
1189  sad8_y2a_ ## suf(blk1, blk2, stride, h); \
1190  sad8_y2a_ ## suf(blk1 + 8, blk2 + 8, stride, h); \
1191  \
1192  return sum_ ## suf(); \
1193 } \
1194  \
1195 static int sad16_xy2_ ## suf(MpegEncContext *v, uint8_t *blk2, \
1196  uint8_t *blk1, int stride, int h) \
1197 { \
1198  __asm__ volatile ( \
1199  "pxor %%mm7, %%mm7 \n\t" \
1200  "pxor %%mm6, %%mm6 \n\t" \
1201  ::); \
1202  \
1203  sad8_4_ ## suf(blk1, blk2, stride, h); \
1204  sad8_4_ ## suf(blk1 + 8, blk2 + 8, stride, h); \
1205  \
1206  return sum_ ## suf(); \
1207 } \
1208 
1209 PIX_SAD(mmx)
1210 PIX_SAD(mmxext)
1211 
1212 #endif /* HAVE_INLINE_ASM */
1213 
1214 int ff_sse16_sse2(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
1215  int line_size, int h);
1216 
1217 #define hadamard_func(cpu) \
1218  int ff_hadamard8_diff_ ## cpu(MpegEncContext *s, uint8_t *src1, \
1219  uint8_t *src2, int stride, int h); \
1220  int ff_hadamard8_diff16_ ## cpu(MpegEncContext *s, uint8_t *src1, \
1221  uint8_t *src2, int stride, int h);
1222 
1224 hadamard_func(mmxext)
1225 hadamard_func(sse2)
1226 hadamard_func(ssse3)
1227 
1229 {
1230  int cpu_flags = av_get_cpu_flags();
1231 
1232 #if HAVE_INLINE_ASM
1233  if (INLINE_MMX(cpu_flags)) {
1234  c->sum_abs_dctelem = sum_abs_dctelem_mmx;
1235 
1236  c->pix_abs[0][0] = sad16_mmx;
1237  c->pix_abs[0][1] = sad16_x2_mmx;
1238  c->pix_abs[0][2] = sad16_y2_mmx;
1239  c->pix_abs[0][3] = sad16_xy2_mmx;
1240  c->pix_abs[1][0] = sad8_mmx;
1241  c->pix_abs[1][1] = sad8_x2_mmx;
1242  c->pix_abs[1][2] = sad8_y2_mmx;
1243  c->pix_abs[1][3] = sad8_xy2_mmx;
1244 
1245  c->sad[0] = sad16_mmx;
1246  c->sad[1] = sad8_mmx;
1247 
1248  c->sse[0] = sse16_mmx;
1249  c->sse[1] = sse8_mmx;
1250  c->vsad[4] = vsad_intra16_mmx;
1251 
1252  c->nsse[0] = nsse16_mmx;
1253  c->nsse[1] = nsse8_mmx;
1254 
1255  if (!(avctx->flags & CODEC_FLAG_BITEXACT)) {
1256  c->vsad[0] = vsad16_mmx;
1257  }
1258  }
1259 
1260  if (INLINE_MMXEXT(cpu_flags)) {
1261  c->sum_abs_dctelem = sum_abs_dctelem_mmxext;
1262 
1263  c->vsad[4] = vsad_intra16_mmxext;
1264 
1265  c->pix_abs[0][0] = sad16_mmxext;
1266  c->pix_abs[1][0] = sad8_mmxext;
1267 
1268  c->sad[0] = sad16_mmxext;
1269  c->sad[1] = sad8_mmxext;
1270 
1271  if (!(avctx->flags & CODEC_FLAG_BITEXACT)) {
1272  c->pix_abs[0][1] = sad16_x2_mmxext;
1273  c->pix_abs[0][2] = sad16_y2_mmxext;
1274  c->pix_abs[0][3] = sad16_xy2_mmxext;
1275  c->pix_abs[1][1] = sad8_x2_mmxext;
1276  c->pix_abs[1][2] = sad8_y2_mmxext;
1277  c->pix_abs[1][3] = sad8_xy2_mmxext;
1278 
1279  c->vsad[0] = vsad16_mmxext;
1280  }
1281  }
1282 
1283  if (INLINE_SSE2(cpu_flags)) {
1284  c->sum_abs_dctelem = sum_abs_dctelem_sse2;
1285  }
1286 
1287  if (INLINE_SSE2(cpu_flags) && !(cpu_flags & AV_CPU_FLAG_3DNOW)) {
1288  c->sad[0] = sad16_sse2;
1289  }
1290 
1291 #if HAVE_SSSE3_INLINE
1292  if (INLINE_SSSE3(cpu_flags)) {
1293  c->sum_abs_dctelem = sum_abs_dctelem_ssse3;
1294  }
1295 #endif
1296 #endif /* HAVE_INLINE_ASM */
1297 
1298  if (EXTERNAL_MMX(cpu_flags)) {
1299  c->hadamard8_diff[0] = ff_hadamard8_diff16_mmx;
1300  c->hadamard8_diff[1] = ff_hadamard8_diff_mmx;
1301  }
1302 
1303  if (EXTERNAL_MMXEXT(cpu_flags)) {
1304  c->hadamard8_diff[0] = ff_hadamard8_diff16_mmxext;
1305  c->hadamard8_diff[1] = ff_hadamard8_diff_mmxext;
1306  }
1307 
1308  if (EXTERNAL_SSE2(cpu_flags)) {
1309  c->sse[0] = ff_sse16_sse2;
1310 
1311 #if HAVE_ALIGNED_STACK
1312  c->hadamard8_diff[0] = ff_hadamard8_diff16_sse2;
1313  c->hadamard8_diff[1] = ff_hadamard8_diff_sse2;
1314 #endif
1315  }
1316 
1317  if (EXTERNAL_SSSE3(cpu_flags) && HAVE_ALIGNED_STACK) {
1318  c->hadamard8_diff[0] = ff_hadamard8_diff16_ssse3;
1319  c->hadamard8_diff[1] = ff_hadamard8_diff_ssse3;
1320  }
1321 }
#define EXTERNAL_MMX(flags)
Definition: cpu.h:47
#define INLINE_SSE2(flags)
Definition: cpu.h:66
mpegvideo header.
int ff_sse16_sse2(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
int stride
Definition: mace.c:144
Macro definitions for various function/variable attributes.
#define DECLARE_ASM_CONST(n, t, v)
Definition: mem.h:59
uint8_t
#define av_cold
Definition: attributes.h:66
int x86_reg
Definition: asm.h:70
#define hadamard_func(cpu)
Definition: me_cmp_init.c:1217
#define MANGLE(a)
Definition: asm.h:110
#define CODEC_FLAG_BITEXACT
Use only bitexact stuff (except (I)DCT).
Definition: avcodec.h:658
#define EXTERNAL_SSE2(flags)
Definition: cpu.h:50
#define INLINE_MMX(flags)
Definition: cpu.h:63
#define INLINE_SSSE3(flags)
Definition: cpu.h:68
#define FFABS(a)
Definition: common.h:52
#define HAVE_ALIGNED_STACK
Definition: config.h:144
#define AV_CPU_FLAG_3DNOW
AMD 3DNOW.
Definition: cpu.h:34
main external API structure.
Definition: avcodec.h:1050
#define EXTERNAL_SSSE3(flags)
Definition: cpu.h:52
int av_get_cpu_flags(void)
Return the flags which specify extensions supported by the CPU.
Definition: cpu.c:47
#define EXTERNAL_MMXEXT(flags)
Definition: cpu.h:48
me_cmp_func sse[6]
Definition: me_cmp.h:42
MpegEncContext.
Definition: mpegvideo.h:204
struct AVCodecContext * avctx
Definition: mpegvideo.h:221
#define INLINE_MMXEXT(flags)
Definition: cpu.h:64
MECmpContext mecc
Definition: mpegvideo.h:355
int nsse_weight
noise vs.
Definition: avcodec.h:2631
int len
void ff_me_cmp_init_x86(MECmpContext *c, AVCodecContext *avctx)