[xiph-commits] r14336 - in trunk: theora/lib/dec/x86
theora-exp/lib/x86
tterribe at svn.xiph.org
tterribe at svn.xiph.org
Sun Dec 30 18:22:28 PST 2007
Author: tterribe
Date: 2007-12-30 18:22:21 -0800 (Sun, 30 Dec 2007)
New Revision: 14336
Modified:
trunk/theora-exp/lib/x86/mmxfrag.c
trunk/theora/lib/dec/x86/mmxfrag.c
Log:
MMX optimizations by Nils Pipenbrinck.
See http://lists.xiph.org/pipermail/theora-dev/2007-December/003510.html
Modified: trunk/theora/lib/dec/x86/mmxfrag.c
===================================================================
--- trunk/theora/lib/dec/x86/mmxfrag.c 2007-12-30 17:42:25 UTC (rev 14335)
+++ trunk/theora/lib/dec/x86/mmxfrag.c 2007-12-31 02:22:21 UTC (rev 14336)
@@ -5,7 +5,7 @@
* GOVERNED BY A BSD-STYLE SOURCE LICENSE INCLUDED WITH THIS SOURCE *
* IN 'COPYING'. PLEASE READ THESE TERMS BEFORE DISTRIBUTING. *
* *
- * THE Theora SOURCE CODE IS COPYRIGHT (C) 2002-2007 *
+ * THE Theora SOURCE CODE IS COPYRIGHT (C) 2002-2003 *
* by the Xiph.Org Foundation http://www.xiph.org/ *
* *
********************************************************************
@@ -16,45 +16,127 @@
********************************************************************/
/*MMX acceleration of fragment reconstruction for motion compensation.
- Originally written by Rudolf Marek.*/
+ Originally written by Rudolf Marek.
+ Additional optimization by Nils Pipenbrinck.
+ Note: Loops are unrolled for best performance.
+ The iteration each instruction belongs to is marked in the comments as #i.*/
#include "x86int.h"
#if defined(USE_ASM)
void oc_frag_recon_intra_mmx(unsigned char *_dst,int _dst_ystride,
const ogg_int16_t *_residue){
- int i;
- for(i=8;i-->0;){
- __asm__ __volatile__(
- /*Set mm0 to 0xFFFFFFFFFFFFFFFF.*/
- "pcmpeqw %%mm0,%%mm0\n\t"
- /*First four input values*/
- "movq (%[residue]),%%mm2\n\t"
- /*Set mm0 to 0x8000800080008000.*/
- "psllw $15,%%mm0\n\t"
- /*Next four input values.*/
- "movq 8(%[residue]),%%mm3\n\t"
- /*Set mm0 to 0x0080008000800080.*/
- "psrlw $8,%%mm0\n\t"
- /*_residue+=16*/
- "lea 0x10(%[residue]),%[residue]\n\t"
- /*Set mm1=mm0.*/
- "movq %%mm0,%%mm1\n\t"
- /*Add 128 and saturate to 16 bits.*/
- "paddsw %%mm2,%%mm0\n\t"
- /*Add 128 and saturate to 16 bits.*/
- "paddsw %%mm3,%%mm1\n\t"
- /*Pack saturate with next(high) four values.*/
- "packuswb %%mm1,%%mm0\n\t"
- /*Writeback.*/
- "movq %%mm0,(%[dst])\n\t"
- /*_dst+=_dst_ystride*/
- "lea (%[dst],%[dst_ystride]),%[dst]\n\t"
- :[dst]"+r"(_dst),[residue]"+r"(_residue)
- :[dst_ystride]"r"((long)_dst_ystride)
- :"memory"
- );
- }
+ __asm__ __volatile__(
+ /*Set mm0 to 0xFFFFFFFFFFFFFFFF.*/
+ "pcmpeqw %%mm0,%%mm0\n\t"
+ /*#0 Load low residue.*/
+ "movq 0*8(%[residue]),%%mm1\n\t"
+ /*#0 Load high residue.*/
+ "movq 1*8(%[residue]),%%mm2\n\t"
+ /*Set mm0 to 0x8000800080008000.*/
+ "psllw $15,%%mm0\n\t"
+ /*#1 Load low residue.*/
+ "movq 2*8(%[residue]),%%mm3\n\t"
+ /*#1 Load high residue.*/
+ "movq 3*8(%[residue]),%%mm4\n\t"
+ /*Set mm0 to 0x0080008000800080.*/
+ "psrlw $8,%%mm0\n\t"
+ /*#2 Load low residue.*/
+ "movq 4*8(%[residue]),%%mm5\n\t"
+ /*#2 Load high residue.*/
+ "movq 5*8(%[residue]),%%mm6\n\t"
+ /*#0 Bias low residue.*/
+ "paddsw %%mm0,%%mm1\n\t"
+ /*#0 Bias high residue.*/
+ "paddsw %%mm0,%%mm2\n\t"
+ /*#0 Pack to byte.*/
+ "packuswb %%mm2,%%mm1\n\t"
+ /*#1 Bias low residue.*/
+ "paddsw %%mm0,%%mm3\n\t"
+ /*#1 Bias high residue.*/
+ "paddsw %%mm0,%%mm4\n\t"
+ /*#1 Pack to byte.*/
+ "packuswb %%mm4,%%mm3\n\t"
+ /*#2 Bias low residue.*/
+ "paddsw %%mm0,%%mm5\n\t"
+ /*#2 Bias high residue.*/
+ "paddsw %%mm0,%%mm6\n\t"
+ /*#2 Pack to byte.*/
+ "packuswb %%mm6,%%mm5\n\t"
+ /*#0 Write row.*/
+ "movq %%mm1,(%[dst])\n\t"
+ /*#1 Write row.*/
+ "movq %%mm3,(%[dst],%[dst_ystride])\n\t"
+ /*#2 Write row.*/
+ "movq %%mm5,(%[dst],%[dst_ystride],2)\n\t"
+ /*#3 Load low residue.*/
+ "movq 6*8(%[residue]),%%mm1\n\t"
+ /*#3 Load high residue.*/
+ "movq 7*8(%[residue]),%%mm2\n\t"
+ /*#4 Load high residue.*/
+ "movq 8*8(%[residue]),%%mm3\n\t"
+ /*#4 Load high residue.*/
+ "movq 9*8(%[residue]),%%mm4\n\t"
+ /*#5 Load high residue.*/
+ "movq 10*8(%[residue]),%%mm5\n\t"
+ /*#5 Load high residue.*/
+ "movq 11*8(%[residue]),%%mm6\n\t"
+ /*#3 Bias low residue.*/
+ "paddsw %%mm0,%%mm1\n\t"
+ /*#3 Bias high residue.*/
+ "paddsw %%mm0,%%mm2\n\t"
+ /*#3 Pack to byte.*/
+ "packuswb %%mm2,%%mm1\n\t"
+ /*#4 Bias low residue.*/
+ "paddsw %%mm0,%%mm3\n\t"
+ /*#4 Bias high residue.*/
+ "paddsw %%mm0,%%mm4\n\t"
+ /*#4 Pack to byte.*/
+ "packuswb %%mm4,%%mm3\n\t"
+ /*#5 Bias low residue.*/
+ "paddsw %%mm0,%%mm5\n\t"
+ /*#5 Bias high residue.*/
+ "paddsw %%mm0,%%mm6\n\t"
+ /*#5 Pack to byte.*/
+ "packuswb %%mm6,%%mm5\n\t"
+ /*#3 Write row.*/
+ "movq %%mm1,(%[dst],%[dst_ystride3])\n\t"
+ /*#4 Write row.*/
+ "movq %%mm3,(%[dst4])\n\t"
+ /*#5 Write row.*/
+ "movq %%mm5,(%[dst4],%[dst_ystride])\n\t"
+ /*#6 Load low residue.*/
+ "movq 12*8(%[residue]),%%mm1\n\t"
+ /*#6 Load high residue.*/
+ "movq 13*8(%[residue]),%%mm2\n\t"
+ /*#7 Load low residue.*/
+ "movq 14*8(%[residue]),%%mm3\n\t"
+ /*#7 Load high residue.*/
+ "movq 15*8(%[residue]),%%mm4\n\t"
+ /*#6 Bias low residue.*/
+ "paddsw %%mm0,%%mm1\n\t"
+ /*#6 Bias high residue.*/
+ "paddsw %%mm0,%%mm2\n\t"
+ /*#6 Pack to byte.*/
+ "packuswb %%mm2,%%mm1\n\t"
+ /*#7 Bias low residue.*/
+ "paddsw %%mm0,%%mm3\n\t"
+ /*#7 Bias high residue.*/
+ "paddsw %%mm0,%%mm4\n\t"
+ /*#7 Pack to byte.*/
+ "packuswb %%mm4,%%mm3\n\t"
+ /*#6 Write row.*/
+ "movq %%mm1,(%[dst4],%[dst_ystride],2)\n\t"
+ /*#7 Write row.*/
+ "movq %%mm3,(%[dst4],%[dst_ystride3])\n\t"
+ :
+ :[residue]"r"(_residue),
+ [dst]"r"(_dst),
+ [dst4]"r"(_dst+(_dst_ystride<<2)),
+ [dst_ystride]"r"((long)_dst_ystride),
+ [dst_ystride3]"r"((long)_dst_ystride*3)
+ :"memory"
+ );
}
void oc_frag_recon_inter_mmx(unsigned char *_dst,int _dst_ystride,
@@ -62,31 +144,47 @@
int i;
/*Zero mm0.*/
__asm__ __volatile__("pxor %%mm0,%%mm0\n\t"::);
- for(i=8;i-->0;){
+ for(i=4;i-->0;){
__asm__ __volatile__(
- /*Load mm2 with _src*/
- "movq (%[src]),%%mm2\n\t"
- /*Copy mm2 to mm3.*/
- "movq %%mm2,%%mm3\n\t"
- /*Expand high part of _src to 16 bits.*/
+ /*#0 Load source.*/
+ "movq (%[src]),%%mm3\n\t"
+ /*#1 Load source.*/
+ "movq (%[src],%[src_ystride]),%%mm7\n\t"
+ /*#0 Get copy of src.*/
+ "movq %%mm3,%%mm4\n\t"
+ /*#0 Expand high source.*/
+ "punpckhbw %%mm0,%%mm4\n\t"
+ /*#0 Expand low source.*/
+ "punpcklbw %%mm0,%%mm3\n\t"
+ /*#0 Add residue high.*/
+ "paddsw 8(%[residue]),%%mm4\n\t"
+ /*#1 Get copy of src.*/
+ "movq %%mm7,%%mm2\n\t"
+ /*#0 Add residue low.*/
+ "paddsw (%[residue]), %%mm3\n\t"
+ /*#1 Expand high source.*/
"punpckhbw %%mm0,%%mm2\n\t"
- /*Expand low part of _src to 16 bits.*/
- "punpcklbw %%mm0,%%mm3\n\t"
- /*Add low part with low part of residue.*/
- "paddsw (%[residue]),%%mm3\n\t"
- /*High with high.*/
- "paddsw 8(%[residue]),%%mm2\n\t"
- /*Pack and saturate to mm3.*/
- "packuswb %%mm2,%%mm3\n\t"
- /*_src+=_src_ystride*/
- "lea (%[src],%[src_ystride]),%[src]\n\t"
- /*_residue+=16*/
- "lea 0x10(%[residue]),%[residue]\n\t"
- /*Put mm3 to dest.*/
+ /*#0 Pack final row pixels.*/
+ "packuswb %%mm4,%%mm3\n\t"
+ /*#1 Expand low source.*/
+ "punpcklbw %%mm0,%%mm7\n\t"
+ /*#1 Add residue low.*/
+ "paddsw 16(%[residue]),%%mm7\n\t"
+ /*#1 Add residue high.*/
+ "paddsw 24(%[residue]),%%mm2\n\t"
+ /*Advance residue.*/
+ "lea 32(%[residue]),%[residue]\n\t"
+ /*#1 Pack final row pixels.*/
+ "packuswb %%mm2,%%mm7\n\t"
+ /*Advance src.*/
+ "lea (%[src],%[src_ystride],2),%[src]\n\t"
+ /*#0 Write row.*/
"movq %%mm3,(%[dst])\n\t"
- /*_dst+=_dst_ystride*/
- "lea (%[dst],%[dst_ystride]),%[dst]\n\t"
- :[dst]"+r"(_dst),[src]"+r"(_src),[residue]"+r"(_residue)
+ /*#1 Write row.*/
+ "movq %%mm7,(%[dst],%[dst_ystride])\n\t"
+ /*Advance dst.*/
+ "lea (%[dst],%[dst_ystride],2),%[dst]\n\t"
+ :[residue]"+r"(_residue),[dst]"+r"(_dst),[src]"+r"(_src)
:[dst_ystride]"r"((long)_dst_ystride),
[src_ystride]"r"((long)_src_ystride)
:"memory"
@@ -94,147 +192,98 @@
}
}
-#if defined(__amd64__)||defined(__x86_64__)
-
void oc_frag_recon_inter2_mmx(unsigned char *_dst,int _dst_ystride,
const unsigned char *_src1,int _src1_ystride,const unsigned char *_src2,
int _src2_ystride,const ogg_int16_t *_residue){
int i;
- __asm__ __volatile__(
- /*Zero mm0.*/
- "pxor %%mm0,%%mm0\n\t"
- /*Load mm2 with _src1.*/
- "movq (%[src1]),%%mm2\n\t"
- :[src1]"+r"(_src1)
- :
- );
- for(i=8;i-->0;){
+ /*NOTE: This assumes that
+ _dst_ystride==_src1_ystride&&_dst_ystride==_src2_ystride.
+ This is currently always the case, but a slower fallback version will need
+ to be written if it ever is not.*/
+ /*Zero mm7.*/
+ __asm__ __volatile__("pxor %%mm7,%%mm7\n\t"::);
+ for(i=4;i-->0;){
__asm__ __volatile__(
- /*Packed _src2.*/
- "movq (%[src2]),%%mm4\n\t"
- /*Copy packed src1 to mm3.*/
- "movq %%mm2,%%mm3\n\t"
- /*Copy packed src2 to mm5.*/
- "movq %%mm4,%%mm5\n\t"
- /*Expand low part of src1 to mm2.*/
- "punpcklbw %%mm0,%%mm2\n\t"
- /*Expand Low part of src2 to mm4.*/
- "punpcklbw %%mm0,%%mm4\n\t"
- /*_src1+=_src1_ystride*/
- "lea (%[src1],%[src1_ystride]),%[src1]\n\t"
- /*Expand high part of src1 to mm3.*/
- "punpckhbw %%mm0,%%mm3\n\t"
- /*Expand high part of src2 to mm5.*/
- "punpckhbw %%mm0,%%mm5\n\t"
- /*Add low parts of src1 and src2.*/
- "paddsw %%mm2,%%mm4\n\t"
- /*Add high parts of src1 and src2.*/
- "paddsw %%mm3,%%mm5\n\t"
- /*_src2+=_src2_ystride.*/
- "lea (%[src2],%[src2_ystride]),%[src2]\n\t"
- /*Load mm2 with _src1.*/
- "movq (%[src1]),%%mm2\n\t"
- /*Shift logical 1 to right o 2 dolu.*/
- "psrlw $1,%%mm4\n\t"
- /*Shift logical 1 to right.*/
- "psrlw $1,%%mm5\n\t"
- /*Add low parts wwith low parts.*/
- "paddsw (%[residue]),%%mm4\n\t"
- /*Add highparts with high.*/
- "paddsw 8(%[residue]),%%mm5\n\t"
- /*Pack saturate high to low.*/
- "packuswb %%mm5,%%mm4\n\t"
- /*_residue+=16.*/
- "lea 0x10(%[residue]),%[residue]\n\t"
- /*Write to dst.*/
- "movq %%mm4,(%[dst])\n\t"
- /*_dst+=_dst_ystride*/
- "lea (%[dst],%[dst_ystride]),%[dst]\n\t"
+ "movq (%[src1]),%%mm0\n\t"
+ /*#0 Load src1.*/
+ "movq (%[src2]),%%mm2\n\t"
+ /*#0 Load src2.*/
+ "movq %%mm0,%%mm1\n\t"
+ /*#0 Copy src1.*/
+ "movq %%mm2,%%mm3\n\t"
+ /*#0 Copy src2.*/
+ "movq (%[src1],%[ystride]),%%mm4\n\t"
+ /*#1 Load src1.*/
+ "punpcklbw %%mm7,%%mm0\n\t"
+ /*#0 Unpack lower src1.*/
+ "movq (%[src2],%[ystride]),%%mm5\n\t"
+ /*#1 Load src2.*/
+ "punpckhbw %%mm7,%%mm1\n\t"
+ /*#0 Unpack higher src1.*/
+ "punpcklbw %%mm7,%%mm2\n\t"
+ /*#0 Unpack lower src2.*/
+ "punpckhbw %%mm7,%%mm3\n\t"
+ /*#0 Unpack higher src2.*/
+ "lea (%[src1],%[ystride],2),%[src1]\n\t"
+ /*Advance src1 ptr.*/
+ "lea (%[src2],%[ystride],2),%[src2]\n\t"
+ /*Advance src2 ptr.*/
+ "paddsw %%mm2,%%mm0\n\t"
+ /*#0 Lower src1+src2.*/
+ "paddsw %%mm3,%%mm1\n\t"
+ /*#0 Higher src1+src2.*/
+ "movq %%mm4,%%mm2\n\t"
+ /*#1 Copy src1.*/
+ "psraw $1,%%mm0\n\t"
+ /*#0 Build lo average.*/
+ "movq %%mm5,%%mm3\n\t"
+ /*#1 Copy src2.*/
+ "punpcklbw %%mm7,%%mm4\n\t"
+ /*#1 Unpack lower src1.*/
+ "psraw $1,%%mm1\n\t"
+ /*#0 Build hi average.*/
+ "punpckhbw %%mm7,%%mm2\n\t"
+ /*#1 Unpack higher src1.*/
+ "paddsw (%[residue]),%%mm0\n\t"
+ /*#0 low+=residue.*/
+ "punpcklbw %%mm7,%%mm5\n\t"
+ /*#1 Unpack lower src2.*/
+ "paddsw 8(%[residue]),%%mm1\n\t"
+ /*#0 high+=residue.*/
+ "punpckhbw %%mm7,%%mm3\n\t"
+ /*#1 Unpack higher src2.*/
+ "paddsw %%mm4,%%mm5\n\t"
+ /*#1 Lower src1+src2.*/
+ "packuswb %%mm1,%%mm0\n\t"
+ /*#0 Pack and saturate.*/
+ "paddsw %%mm2,%%mm3\n\t"
+ /*#1 Higher src1+src2.*/
+ "movq %%mm0,(%[dst])\n\t"
+ /*#0 Write row.*/
+ "psraw $1,%%mm5\n\t"
+ /*#1 Build lo average.*/
+ "psraw $1,%%mm3\n\t"
+ /*#1 Build hi average.*/
+ "paddsw 16(%[residue]),%%mm5\n\t"
+ /*#1 low+=residue.*/
+ "paddsw 24(%[residue]),%%mm3\n\t"
+ /*#1 high+=residue.*/
+ "packuswb %%mm3,%%mm5\n\t"
+ /*#1 Pack and saturate.*/
+ "movq %%mm5,(%[dst],%[ystride])\n\t"
+ /*#1 Write row ptr.*/
+ "add $32,%[residue]\n\t"
+ /*Advance residue ptr.*/
+ "lea (%[dst],%[ystride],2),%[dst]\n\t"
+ /*Advance dest ptr.*/
:[dst]"+r"(_dst),[residue]"+r"(_residue),
[src1]"+r"(_src1),[src2]"+r"(_src2)
- :[dst_ystride]"r"((long)_dst_ystride),
- [src1_ystride]"r"((long)_src1_ystride),
- [src2_ystride]"r"((long)_src2_ystride)
+ :[ystride]"r"((long)_dst_ystride)
:"memory"
);
}
}
-#else
-
-void oc_frag_recon_inter2_mmx(unsigned char *_dst,int _dst_ystride,
- const unsigned char *_src1,int _src1_ystride,const unsigned char *_src2,
- int _src2_ystride,const ogg_int16_t *_residue){
- long a;
- int i;
- __asm__ __volatile__(
- /*Zero mm0.*/
- "pxor %%mm0,%%mm0\n\t"
- /*Load mm2 with _src1.*/
- "movq (%[src1]),%%mm2\n\t"
- :[src1]"+r"(_src1)
- :
- );
- for(i=8;i-->0;){
- __asm__ __volatile__(
- /*Packed _src2.*/
- "movq (%[src2]),%%mm4\n\t"
- /*Copy packed src1 to mm3.*/
- "movq %%mm2,%%mm3\n\t"
- /*Copy packed src2 to mm5.*/
- "movq %%mm4,%%mm5\n\t"
- /*eax=_src1_ystride*/
- "mov %[src1_ystride],%[a]\n\t"
- /*Expand low part of src1 to mm2.*/
- "punpcklbw %%mm0,%%mm2\n\t"
- /*Expand Low part of src2 to mm4.*/
- "punpcklbw %%mm0,%%mm4\n\t"
- /*_src1+=_src1_ystride*/
- "lea (%[src1],%[a]),%[src1]\n\t"
- /*Expand high part of src1 to mm3.*/
- "punpckhbw %%mm0,%%mm3\n\t"
- /*Expand high part of src2 to mm5.*/
- "punpckhbw %%mm0,%%mm5\n\t"
- /*eax=_src2_ystride*/
- "mov %[src2_ystride],%[a]\n\t"
- /*Add low parts of src1 and src2.*/
- "paddsw %%mm2,%%mm4\n\t"
- /*Add high parts of src1 and src2.*/
- "paddsw %%mm3,%%mm5\n\t"
- /*_src2+=_src2_ystride.*/
- "lea (%[src2],%[a]),%[src2]\n\t"
- /*Load mm2 with _src1.*/
- "movq (%[src1]),%%mm2\n\t"
- /*Shift logical 1 to right o 2 dolu.*/
- "psrlw $1,%%mm4\n\t"
- /*Shift logical 1 to right.*/
- "psrlw $1,%%mm5\n\t"
- /*Add low parts wwith low parts.*/
- "paddsw (%[residue]),%%mm4\n\t"
- /*Add highparts with high.*/
- "paddsw 8(%[residue]),%%mm5\n\t"
- /*eax=_dst_ystride.*/
- "mov %[dst_ystride],%[a]\n\t"
- /*Pack saturate high to low.*/
- "packuswb %%mm5,%%mm4\n\t"
- /*_residue+=16.*/
- "lea 0x10(%[residue]),%[residue]\n\t"
- /*Write to dst.*/
- "movq %%mm4,(%[dst])\n\t"
- /*_dst+=_dst_ystride*/
- "lea (%[dst],%[a]),%[dst]\n\t"
- :[a]"=&a"(a),[dst]"+r"(_dst),[residue]"+r"(_residue),
- [src1]"+r"(_src1),[src2]"+r"(_src2)
- :[dst_ystride]"m"((long)_dst_ystride),
- [src1_ystride]"m"((long)_src1_ystride),
- [src2_ystride]"m"((long)_src2_ystride)
- :"memory"
- );
- }
-}
-
-#endif
-
void oc_restore_fpu_mmx(void){
__asm__ __volatile__("emms\n\t");
}
Modified: trunk/theora-exp/lib/x86/mmxfrag.c
===================================================================
--- trunk/theora-exp/lib/x86/mmxfrag.c 2007-12-30 17:42:25 UTC (rev 14335)
+++ trunk/theora-exp/lib/x86/mmxfrag.c 2007-12-31 02:22:21 UTC (rev 14336)
@@ -11,45 +11,127 @@
********************************************************************
*/
/*MMX acceleration of fragment reconstruction for motion compensation.
- Originally written by Rudolf Marek.*/
+ Originally written by Rudolf Marek.
+ Additional optimization by Nils Pipenbrinck.
+ Note: Loops are unrolled for best performance.
+ The iteration each instruction belongs to is marked in the comments as #i.*/
#include "x86int.h"
#if defined(OC_X86ASM)
void oc_frag_recon_intra_mmx(unsigned char *_dst,int _dst_ystride,
const ogg_int16_t *_residue){
- int i;
- for(i=8;i-->0;){
- __asm__ __volatile__(
- /*Set mm0 to 0xFFFFFFFFFFFFFFFF.*/
- "pcmpeqw %%mm0,%%mm0\n\t"
- /*First four input values*/
- "movq (%[residue]),%%mm2\n\t"
- /*Set mm0 to 0x8000800080008000.*/
- "psllw $15,%%mm0\n\t"
- /*Next four input values.*/
- "movq 8(%[residue]),%%mm3\n\t"
- /*Set mm0 to 0x0080008000800080.*/
- "psrlw $8,%%mm0\n\t"
- /*_residue+=16*/
- "lea 0x10(%[residue]),%[residue]\n\t"
- /*Set mm1=mm0.*/
- "movq %%mm0,%%mm1\n\t"
- /*Add 128 and saturate to 16 bits.*/
- "paddsw %%mm2,%%mm0\n\t"
- /*Add 128 and saturate to 16 bits.*/
- "paddsw %%mm3,%%mm1\n\t"
- /*Pack saturate with next(high) four values.*/
- "packuswb %%mm1,%%mm0\n\t"
- /*Writeback.*/
- "movq %%mm0,(%[dst])\n\t"
- /*_dst+=_dst_ystride*/
- "lea (%[dst],%[dst_ystride]),%[dst]\n\t"
- :[dst]"+r"(_dst),[residue]"+r"(_residue)
- :[dst_ystride]"r"((long)_dst_ystride)
- :"memory"
- );
- }
+ __asm__ __volatile__(
+ /*Set mm0 to 0xFFFFFFFFFFFFFFFF.*/
+ "pcmpeqw %%mm0,%%mm0\n\t"
+ /*#0 Load low residue.*/
+ "movq 0*8(%[residue]),%%mm1\n\t"
+ /*#0 Load high residue.*/
+ "movq 1*8(%[residue]),%%mm2\n\t"
+ /*Set mm0 to 0x8000800080008000.*/
+ "psllw $15,%%mm0\n\t"
+ /*#1 Load low residue.*/
+ "movq 2*8(%[residue]),%%mm3\n\t"
+ /*#1 Load high residue.*/
+ "movq 3*8(%[residue]),%%mm4\n\t"
+ /*Set mm0 to 0x0080008000800080.*/
+ "psrlw $8,%%mm0\n\t"
+ /*#2 Load low residue.*/
+ "movq 4*8(%[residue]),%%mm5\n\t"
+ /*#2 Load high residue.*/
+ "movq 5*8(%[residue]),%%mm6\n\t"
+ /*#0 Bias low residue.*/
+ "paddsw %%mm0,%%mm1\n\t"
+ /*#0 Bias high residue.*/
+ "paddsw %%mm0,%%mm2\n\t"
+ /*#0 Pack to byte.*/
+ "packuswb %%mm2,%%mm1\n\t"
+ /*#1 Bias low residue.*/
+ "paddsw %%mm0,%%mm3\n\t"
+ /*#1 Bias high residue.*/
+ "paddsw %%mm0,%%mm4\n\t"
+ /*#1 Pack to byte.*/
+ "packuswb %%mm4,%%mm3\n\t"
+ /*#2 Bias low residue.*/
+ "paddsw %%mm0,%%mm5\n\t"
+ /*#2 Bias high residue.*/
+ "paddsw %%mm0,%%mm6\n\t"
+ /*#2 Pack to byte.*/
+ "packuswb %%mm6,%%mm5\n\t"
+ /*#0 Write row.*/
+ "movq %%mm1,(%[dst])\n\t"
+ /*#1 Write row.*/
+ "movq %%mm3,(%[dst],%[dst_ystride])\n\t"
+ /*#2 Write row.*/
+ "movq %%mm5,(%[dst],%[dst_ystride],2)\n\t"
+ /*#3 Load low residue.*/
+ "movq 6*8(%[residue]),%%mm1\n\t"
+ /*#3 Load high residue.*/
+ "movq 7*8(%[residue]),%%mm2\n\t"
+ /*#4 Load high residue.*/
+ "movq 8*8(%[residue]),%%mm3\n\t"
+ /*#4 Load high residue.*/
+ "movq 9*8(%[residue]),%%mm4\n\t"
+ /*#5 Load high residue.*/
+ "movq 10*8(%[residue]),%%mm5\n\t"
+ /*#5 Load high residue.*/
+ "movq 11*8(%[residue]),%%mm6\n\t"
+ /*#3 Bias low residue.*/
+ "paddsw %%mm0,%%mm1\n\t"
+ /*#3 Bias high residue.*/
+ "paddsw %%mm0,%%mm2\n\t"
+ /*#3 Pack to byte.*/
+ "packuswb %%mm2,%%mm1\n\t"
+ /*#4 Bias low residue.*/
+ "paddsw %%mm0,%%mm3\n\t"
+ /*#4 Bias high residue.*/
+ "paddsw %%mm0,%%mm4\n\t"
+ /*#4 Pack to byte.*/
+ "packuswb %%mm4,%%mm3\n\t"
+ /*#5 Bias low residue.*/
+ "paddsw %%mm0,%%mm5\n\t"
+ /*#5 Bias high residue.*/
+ "paddsw %%mm0,%%mm6\n\t"
+ /*#5 Pack to byte.*/
+ "packuswb %%mm6,%%mm5\n\t"
+ /*#3 Write row.*/
+ "movq %%mm1,(%[dst],%[dst_ystride3])\n\t"
+ /*#4 Write row.*/
+ "movq %%mm3,(%[dst4])\n\t"
+ /*#5 Write row.*/
+ "movq %%mm5,(%[dst4],%[dst_ystride])\n\t"
+ /*#6 Load low residue.*/
+ "movq 12*8(%[residue]),%%mm1\n\t"
+ /*#6 Load high residue.*/
+ "movq 13*8(%[residue]),%%mm2\n\t"
+ /*#7 Load low residue.*/
+ "movq 14*8(%[residue]),%%mm3\n\t"
+ /*#7 Load high residue.*/
+ "movq 15*8(%[residue]),%%mm4\n\t"
+ /*#6 Bias low residue.*/
+ "paddsw %%mm0,%%mm1\n\t"
+ /*#6 Bias high residue.*/
+ "paddsw %%mm0,%%mm2\n\t"
+ /*#6 Pack to byte.*/
+ "packuswb %%mm2,%%mm1\n\t"
+ /*#7 Bias low residue.*/
+ "paddsw %%mm0,%%mm3\n\t"
+ /*#7 Bias high residue.*/
+ "paddsw %%mm0,%%mm4\n\t"
+ /*#7 Pack to byte.*/
+ "packuswb %%mm4,%%mm3\n\t"
+ /*#6 Write row.*/
+ "movq %%mm1,(%[dst4],%[dst_ystride],2)\n\t"
+ /*#7 Write row.*/
+ "movq %%mm3,(%[dst4],%[dst_ystride3])\n\t"
+ :
+ :[residue]"r"(_residue),
+ [dst]"r"(_dst),
+ [dst4]"r"(_dst+(_dst_ystride<<2)),
+ [dst_ystride]"r"((long)_dst_ystride),
+ [dst_ystride3]"r"((long)_dst_ystride*3)
+ :"memory"
+ );
}
void oc_frag_recon_inter_mmx(unsigned char *_dst,int _dst_ystride,
@@ -57,31 +139,47 @@
int i;
/*Zero mm0.*/
__asm__ __volatile__("pxor %%mm0,%%mm0\n\t"::);
- for(i=8;i-->0;){
+ for(i=4;i-->0;){
__asm__ __volatile__(
- /*Load mm2 with _src*/
- "movq (%[src]),%%mm2\n\t"
- /*Copy mm2 to mm3.*/
- "movq %%mm2,%%mm3\n\t"
- /*Expand high part of _src to 16 bits.*/
+ /*#0 Load source.*/
+ "movq (%[src]),%%mm3\n\t"
+ /*#1 Load source.*/
+ "movq (%[src],%[src_ystride]),%%mm7\n\t"
+ /*#0 Get copy of src.*/
+ "movq %%mm3,%%mm4\n\t"
+ /*#0 Expand high source.*/
+ "punpckhbw %%mm0,%%mm4\n\t"
+ /*#0 Expand low source.*/
+ "punpcklbw %%mm0,%%mm3\n\t"
+ /*#0 Add residue high.*/
+ "paddsw 8(%[residue]),%%mm4\n\t"
+ /*#1 Get copy of src.*/
+ "movq %%mm7,%%mm2\n\t"
+ /*#0 Add residue low.*/
+ "paddsw (%[residue]), %%mm3\n\t"
+ /*#1 Expand high source.*/
"punpckhbw %%mm0,%%mm2\n\t"
- /*Expand low part of _src to 16 bits.*/
- "punpcklbw %%mm0,%%mm3\n\t"
- /*Add low part with low part of residue.*/
- "paddsw (%[residue]),%%mm3\n\t"
- /*High with high.*/
- "paddsw 8(%[residue]),%%mm2\n\t"
- /*Pack and saturate to mm3.*/
- "packuswb %%mm2,%%mm3\n\t"
- /*_src+=_src_ystride*/
- "lea (%[src],%[src_ystride]),%[src]\n\t"
- /*_residue+=16*/
- "lea 0x10(%[residue]),%[residue]\n\t"
- /*Put mm3 to dest.*/
+ /*#0 Pack final row pixels.*/
+ "packuswb %%mm4,%%mm3\n\t"
+ /*#1 Expand low source.*/
+ "punpcklbw %%mm0,%%mm7\n\t"
+ /*#1 Add residue low.*/
+ "paddsw 16(%[residue]),%%mm7\n\t"
+ /*#1 Add residue high.*/
+ "paddsw 24(%[residue]),%%mm2\n\t"
+ /*Advance residue.*/
+ "lea 32(%[residue]),%[residue]\n\t"
+ /*#1 Pack final row pixels.*/
+ "packuswb %%mm2,%%mm7\n\t"
+ /*Advance src.*/
+ "lea (%[src],%[src_ystride],2),%[src]\n\t"
+ /*#0 Write row.*/
"movq %%mm3,(%[dst])\n\t"
- /*_dst+=_dst_ystride*/
- "lea (%[dst],%[dst_ystride]),%[dst]\n\t"
- :[dst]"+r"(_dst),[src]"+r"(_src),[residue]"+r"(_residue)
+ /*#1 Write row.*/
+ "movq %%mm7,(%[dst],%[dst_ystride])\n\t"
+ /*Advance dst.*/
+ "lea (%[dst],%[dst_ystride],2),%[dst]\n\t"
+ :[residue]"+r"(_residue),[dst]"+r"(_dst),[src]"+r"(_src)
:[dst_ystride]"r"((long)_dst_ystride),
[src_ystride]"r"((long)_src_ystride)
:"memory"
@@ -89,147 +187,98 @@
}
}
-#if defined(__amd64__)||defined(__x86_64__)
-
void oc_frag_recon_inter2_mmx(unsigned char *_dst,int _dst_ystride,
const unsigned char *_src1,int _src1_ystride,const unsigned char *_src2,
int _src2_ystride,const ogg_int16_t *_residue){
int i;
- __asm__ __volatile__(
- /*Zero mm0.*/
- "pxor %%mm0,%%mm0\n\t"
- /*Load mm2 with _src1.*/
- "movq (%[src1]),%%mm2\n\t"
- :[src1]"+r"(_src1)
- :
- );
- for(i=8;i-->0;){
+ /*NOTE: This assumes that
+ _dst_ystride==_src1_ystride&&_dst_ystride==_src2_ystride.
+ This is currently always the case, but a slower fallback version will need
+ to be written if it ever is not.*/
+ /*Zero mm7.*/
+ __asm__ __volatile__("pxor %%mm7,%%mm7\n\t"::);
+ for(i=4;i-->0;){
__asm__ __volatile__(
- /*Packed _src2.*/
- "movq (%[src2]),%%mm4\n\t"
- /*Copy packed src1 to mm3.*/
- "movq %%mm2,%%mm3\n\t"
- /*Copy packed src2 to mm5.*/
- "movq %%mm4,%%mm5\n\t"
- /*Expand low part of src1 to mm2.*/
- "punpcklbw %%mm0,%%mm2\n\t"
- /*Expand Low part of src2 to mm4.*/
- "punpcklbw %%mm0,%%mm4\n\t"
- /*_src1+=_src1_ystride*/
- "lea (%[src1],%[src1_ystride]),%[src1]\n\t"
- /*Expand high part of src1 to mm3.*/
- "punpckhbw %%mm0,%%mm3\n\t"
- /*Expand high part of src2 to mm5.*/
- "punpckhbw %%mm0,%%mm5\n\t"
- /*Add low parts of src1 and src2.*/
- "paddsw %%mm2,%%mm4\n\t"
- /*Add high parts of src1 and src2.*/
- "paddsw %%mm3,%%mm5\n\t"
- /*_src2+=_src2_ystride.*/
- "lea (%[src2],%[src2_ystride]),%[src2]\n\t"
- /*Load mm2 with _src1.*/
- "movq (%[src1]),%%mm2\n\t"
- /*Shift logical 1 to right o 2 dolu.*/
- "psrlw $1,%%mm4\n\t"
- /*Shift logical 1 to right.*/
- "psrlw $1,%%mm5\n\t"
- /*Add low parts wwith low parts.*/
- "paddsw (%[residue]),%%mm4\n\t"
- /*Add highparts with high.*/
- "paddsw 8(%[residue]),%%mm5\n\t"
- /*Pack saturate high to low.*/
- "packuswb %%mm5,%%mm4\n\t"
- /*_residue+=16.*/
- "lea 0x10(%[residue]),%[residue]\n\t"
- /*Write to dst.*/
- "movq %%mm4,(%[dst])\n\t"
- /*_dst+=_dst_ystride*/
- "lea (%[dst],%[dst_ystride]),%[dst]\n\t"
+ "movq (%[src1]),%%mm0\n\t"
+ /*#0 Load src1.*/
+ "movq (%[src2]),%%mm2\n\t"
+ /*#0 Load src2.*/
+ "movq %%mm0,%%mm1\n\t"
+ /*#0 Copy src1.*/
+ "movq %%mm2,%%mm3\n\t"
+ /*#0 Copy src2.*/
+ "movq (%[src1],%[ystride]),%%mm4\n\t"
+ /*#1 Load src1.*/
+ "punpcklbw %%mm7,%%mm0\n\t"
+ /*#0 Unpack lower src1.*/
+ "movq (%[src2],%[ystride]),%%mm5\n\t"
+ /*#1 Load src2.*/
+ "punpckhbw %%mm7,%%mm1\n\t"
+ /*#0 Unpack higher src1.*/
+ "punpcklbw %%mm7,%%mm2\n\t"
+ /*#0 Unpack lower src2.*/
+ "punpckhbw %%mm7,%%mm3\n\t"
+ /*#0 Unpack higher src2.*/
+ "lea (%[src1],%[ystride],2),%[src1]\n\t"
+ /*Advance src1 ptr.*/
+ "lea (%[src2],%[ystride],2),%[src2]\n\t"
+ /*Advance src2 ptr.*/
+ "paddsw %%mm2,%%mm0\n\t"
+ /*#0 Lower src1+src2.*/
+ "paddsw %%mm3,%%mm1\n\t"
+ /*#0 Higher src1+src2.*/
+ "movq %%mm4,%%mm2\n\t"
+ /*#1 Copy src1.*/
+ "psraw $1,%%mm0\n\t"
+ /*#0 Build lo average.*/
+ "movq %%mm5,%%mm3\n\t"
+ /*#1 Copy src2.*/
+ "punpcklbw %%mm7,%%mm4\n\t"
+ /*#1 Unpack lower src1.*/
+ "psraw $1,%%mm1\n\t"
+ /*#0 Build hi average.*/
+ "punpckhbw %%mm7,%%mm2\n\t"
+ /*#1 Unpack higher src1.*/
+ "paddsw (%[residue]),%%mm0\n\t"
+ /*#0 low+=residue.*/
+ "punpcklbw %%mm7,%%mm5\n\t"
+ /*#1 Unpack lower src2.*/
+ "paddsw 8(%[residue]),%%mm1\n\t"
+ /*#0 high+=residue.*/
+ "punpckhbw %%mm7,%%mm3\n\t"
+ /*#1 Unpack higher src2.*/
+ "paddsw %%mm4,%%mm5\n\t"
+ /*#1 Lower src1+src2.*/
+ "packuswb %%mm1,%%mm0\n\t"
+ /*#0 Pack and saturate.*/
+ "paddsw %%mm2,%%mm3\n\t"
+ /*#1 Higher src1+src2.*/
+ "movq %%mm0,(%[dst])\n\t"
+ /*#0 Write row.*/
+ "psraw $1,%%mm5\n\t"
+ /*#1 Build lo average.*/
+ "psraw $1,%%mm3\n\t"
+ /*#1 Build hi average.*/
+ "paddsw 16(%[residue]),%%mm5\n\t"
+ /*#1 low+=residue.*/
+ "paddsw 24(%[residue]),%%mm3\n\t"
+ /*#1 high+=residue.*/
+ "packuswb %%mm3,%%mm5\n\t"
+ /*#1 Pack and saturate.*/
+ "movq %%mm5,(%[dst],%[ystride])\n\t"
+ /*#1 Write row ptr.*/
+ "add $32,%[residue]\n\t"
+ /*Advance residue ptr.*/
+ "lea (%[dst],%[ystride],2),%[dst]\n\t"
+ /*Advance dest ptr.*/
:[dst]"+r"(_dst),[residue]"+r"(_residue),
[src1]"+r"(_src1),[src2]"+r"(_src2)
- :[dst_ystride]"r"((long)_dst_ystride),
- [src1_ystride]"r"((long)_src1_ystride),
- [src2_ystride]"r"((long)_src2_ystride)
+ :[ystride]"r"((long)_dst_ystride)
:"memory"
);
}
}
-#else
-
-void oc_frag_recon_inter2_mmx(unsigned char *_dst,int _dst_ystride,
- const unsigned char *_src1,int _src1_ystride,const unsigned char *_src2,
- int _src2_ystride,const ogg_int16_t *_residue){
- long a;
- int i;
- __asm__ __volatile__(
- /*Zero mm0.*/
- "pxor %%mm0,%%mm0\n\t"
- /*Load mm2 with _src1.*/
- "movq (%[src1]),%%mm2\n\t"
- :[src1]"+r"(_src1)
- :
- );
- for(i=8;i-->0;){
- __asm__ __volatile__(
- /*Packed _src2.*/
- "movq (%[src2]),%%mm4\n\t"
- /*Copy packed src1 to mm3.*/
- "movq %%mm2,%%mm3\n\t"
- /*Copy packed src2 to mm5.*/
- "movq %%mm4,%%mm5\n\t"
- /*eax=_src1_ystride*/
- "mov %[src1_ystride],%[a]\n\t"
- /*Expand low part of src1 to mm2.*/
- "punpcklbw %%mm0,%%mm2\n\t"
- /*Expand Low part of src2 to mm4.*/
- "punpcklbw %%mm0,%%mm4\n\t"
- /*_src1+=_src1_ystride*/
- "lea (%[src1],%[a]),%[src1]\n\t"
- /*Expand high part of src1 to mm3.*/
- "punpckhbw %%mm0,%%mm3\n\t"
- /*Expand high part of src2 to mm5.*/
- "punpckhbw %%mm0,%%mm5\n\t"
- /*eax=_src2_ystride*/
- "mov %[src2_ystride],%[a]\n\t"
- /*Add low parts of src1 and src2.*/
- "paddsw %%mm2,%%mm4\n\t"
- /*Add high parts of src1 and src2.*/
- "paddsw %%mm3,%%mm5\n\t"
- /*_src2+=_src2_ystride.*/
- "lea (%[src2],%[a]),%[src2]\n\t"
- /*Load mm2 with _src1.*/
- "movq (%[src1]),%%mm2\n\t"
- /*Shift logical 1 to right o 2 dolu.*/
- "psrlw $1,%%mm4\n\t"
- /*Shift logical 1 to right.*/
- "psrlw $1,%%mm5\n\t"
- /*Add low parts wwith low parts.*/
- "paddsw (%[residue]),%%mm4\n\t"
- /*Add highparts with high.*/
- "paddsw 8(%[residue]),%%mm5\n\t"
- /*eax=_dst_ystride.*/
- "mov %[dst_ystride],%[a]\n\t"
- /*Pack saturate high to low.*/
- "packuswb %%mm5,%%mm4\n\t"
- /*_residue+=16.*/
- "lea 0x10(%[residue]),%[residue]\n\t"
- /*Write to dst.*/
- "movq %%mm4,(%[dst])\n\t"
- /*_dst+=_dst_ystride*/
- "lea (%[dst],%[a]),%[dst]\n\t"
- :[a]"=&a"(a),[dst]"+r"(_dst),[residue]"+r"(_residue),
- [src1]"+r"(_src1),[src2]"+r"(_src2)
- :[dst_ystride]"m"((long)_dst_ystride),
- [src1_ystride]"m"((long)_src1_ystride),
- [src2_ystride]"m"((long)_src2_ystride)
- :"memory"
- );
- }
-}
-
-#endif
-
void oc_restore_fpu_mmx(void){
__asm__ __volatile__("emms\n\t");
}
More information about the commits
mailing list