This is the mail archive of the gdb-patches@sources.redhat.com mailing list for the GDB project.


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]
Other format: [Raw text]

[patch] AltiVec support for PSIM.




hi folks.


the subject pretty much says it all.  all the testcases are
known to also run correctly on linuxppc (powermac G4).  the
debugger support also works with elena's recent altivec work.


OK?


.mrg.


2001-12-29  matthew green  <mrg@redhat.com>

	* sim/configure.in (extra_subdirs): Add testsuite for ppc.
	* sim/configure: Regenerate.

	* sim/ppc/idecode_expression.h (ALTIVEC_SET_CR6): New macro.
	(ALTIVEC_SET_SAT): Likewise.
	* sim/ppc/main.c (zalloc): Fix typo in error message.
	* sim/ppc/ppc-cache-rules (VS, vS, VS_BITMASK): New cache entries.
	(VA, vA, vA_BITMASK, VB, vB, vB_BITMASK, VC, vC, vC_BITMASK): Likewise.
	* sim/ppc/ppc-instructions (PPC_INSN_INT_VR): New model macros.
	(PPC_INSN_VR, PPC_INSN_VR_CR, PPC_INSN_VR_VSCR, PPC_INSN_FROM_VSCR,
	PPC_INSN_TO_VSCR):  Likewise.
	(model_trace_altivec_busy_p, model_trace_altivec_make_busy): New model
	functions.
	(struct _model_busy, struct _model_data): New vr_busy and vscr_busy
	elements.
	(model_trace_release): Add vr_busy and vrcr_busy support.
	(model_new_cycle): Likewise.
	(model_make): Likewise.
	(ppc_insn_int_vr, ppc_insn_vr, ppc_insn_vr_cr): New model functions.
	(ppc_insn_vr_vscr, ppc_insn_from_vscr, ppc_insn_to_vscr): Likewise.
	(altivec_signed_saturate_8, altivec_signed_saturate_16): Likewise.
	(altivec_signed_saturate_32, altivec_unsigned_saturate_8): Likewise.
	(altivec_unsigned_saturate_16, altivec_unsigned_saturate_32): Likewise.
	(lvebx, lvehx, lvewx, lvsl, lvsr, lvx, lvxl): New AltiVec instructions.
	(mfvrsave, mfvscr, mtvrsave, mtvscr, stvebx, stvehx, stvewx): Likewise.
	(stvx, stvxl, vaddcuw, vaddfp, vaddsbs, vaddshs, vaddsws): Likewise.
	(vaddubm, vaddubs, vadduhm, vadduhs, vadduwm, vadduws, vand): Likewise.
	(vandc, vavgsb, vavgsh, vavgsw, vavgub, vavguh, vavguw): Likewise.
	(vcfsx, vcfux, vcmpbfp, vcmpeqfp, vcmpequb, vcmpequh): Likewise.
	(vcmpequw, vcmpgefp, vcmpgtfp, vcmpgtsb, vcmpgtsh, vcmpgtsw): Likewise.
	(vcmpgtub, vcmpgtuh, vcmpgtuw, vctsxs, vctuxs, vexptefp): Likewise.
	(vlogefp, vmaddfp, vmaxfp, vmaxsb, vmaxsh, vmaxsw, vmaxub): Likewise.
	(vmaxuh, vmaxuw, vmhaddshs, vmhraddshs, vminfp, vminsb): Likewise.
	(vminsh, vminsw, vminub, vminuh, vminuw, vmladduhm, vmrghb): Likewise.
	(vmrghh, vmrghw, vmrglb, vmrglh, vmrglw, vmsummbm, vmsumshm): Likewise.
	(vmsumshs, vmsumubm, vmsumuhm, vmsumuhs, vmulesb, vmulesh): Likewise.
	(vmuleub, vmuleuh, vmulosb, vmulosh, vmuloub, vmulouh): Likewise.
	(vnmsubfp, vnor, vor, vperm, vpkpx, vpkshss, vpkshus): Likewise.
	(vpkswss, vpkswus, vpkuhum, vpkuhus, vpkuwum, vpkuwus, vrefp): Likewise.
	(vrfim, vrfin, vrfip, vrfiz, vrlb, vrlh, vrlw, vrsqrtefp): Likewise.
	(vsel, vsl, vslb, vsldoi, vslh, vslo, vslw, vspltb, vsplth): Likewise.
	(vspltisb, vspltish, vspltisw, vspltw, vsr, vsrab, vsrah): Likewise.
	(vsraw, vsrb, vsrh, vsro, vsrw, vsubcuw, vsubfp, vsubsbs): Likewise.
	(vsubshs, vsubsws, vsububm, vsububs, vsubuhm, vsubuhs): Likewise.
	(vsubuwm, vsubuws, vsum2sws, vsum4sbs, vsum4shs, vsum4ubs): Likewise.
	(vsumsws, vupkhpx, vupkhsb, vupkhsh, vupklpx, vupklsb): Likewise.
	(vupklsh, vxor): Likewise.
	* sim/ppc/ppc-spr-table (VRSAVE): New SPR number 256.
	* sim/ppc/psim.c (psim_read_register): Add vreg and 16-bit support.
	* sim/ppc/psim.c (psim_write_register): Likewise.
	* sim/ppc/registers.c (register_description): Add vr and vscr support.
	* sim/ppc/registers.h (vreg): New datatype for AltiVec registers.
	(vscreg): New datatype for AltiVec Vector Status and Control Register.
	(_registers): Add the VSCR and 32 AltiVec registers.
	(registers_types): Add reg_vr and reg_vscr.
	(VR, VSCR): New macros for VR registers and VSCR regsiters.
	* sim/ppc/sim-endian.h (AV_BINDEX, AV_HINDEX): New macros.

	* sim/testsuite/sim/ppc/psim.exp: New file.
	* sim/testsuite/sim/ppc/testutils.inc: New file.
	* sim/testsuite/sim/ppc/*.s: New testsuite.



Index: sim/configure.in
===================================================================
RCS file: /cvs/src/src/sim/configure.in,v
retrieving revision 1.6
diff -p -r1.6 configure.in
*** configure.in	2001/10/20 00:16:44	1.6
--- configure.in	2001/12/27 05:53:11
*************** case "${target}" in
*** 94,100 ****
  	# unless asked to.
  	sim_target=ppc
  	only_if_gcc=yes
! 	#extra_subdirs="${extra_subdirs}"
  	;;
    tic80-*-*)
  	sim_target=tic80
--- 94,100 ----
  	# unless asked to.
  	sim_target=ppc
  	only_if_gcc=yes
! 	extra_subdirs="${extra_subdirs} testsuite"
  	;;
    tic80-*-*)
  	sim_target=tic80
Index: sim/ppc/idecode_expression.h
===================================================================
RCS file: /cvs/src/src/sim/ppc/idecode_expression.h,v
retrieving revision 1.1.1.1
diff -p -r1.1.1.1 idecode_expression.h
*** idecode_expression.h	1999/04/16 01:35:10	1.1.1.1
--- idecode_expression.h	2001/12/27 05:53:14
*************** do { \
*** 408,410 ****
--- 408,437 ----
  do { \
    FPSCR = (FPSCR & ~fpscr_fprf) | (VAL); \
  } while (0)
+ 
+ /* AltiVec macro helpers.  */
+ 
+ #define ALTIVEC_SET_CR6(vS, checkone) \
+ do { \
+   if (checkone && ((*vS).w[0] == 0xffffffff && \
+ 		   (*vS).w[1] == 0xffffffff && \
+ 		   (*vS).w[2] == 0xffffffff && \
+ 		   (*vS).w[3] == 0xffffffff)) \
+     CR_SET(6, 1 << 3); \
+   else if ((*vS).w[0] == 0 && \
+            (*vS).w[1] == 0 && \
+            (*vS).w[2] == 0 && \
+            (*vS).w[3] == 0) \
+     CR_SET(6, 1 << 1); \
+   else \
+     CR_SET(6, 0); \
+ } while (0)
+ 
+ #define	VSCR_SAT	0x00000001
+ #define	VSCR_NJ		0x00010000
+ 
+ #define ALTIVEC_SET_SAT(sat) \
+ do { \
+   if (sat) \
+     VSCR |= VSCR_SAT; \
+ } while (0)
Index: sim/ppc/main.c
===================================================================
RCS file: /cvs/src/src/sim/ppc/main.c,v
retrieving revision 1.2
diff -p -r1.2 main.c
*** main.c	2001/12/15 05:08:44	1.2
--- main.c	2001/12/27 05:53:14
*************** zalloc(long size)
*** 248,254 ****
  {
    void *memory = malloc(size);
    if (memory == NULL)
!     error("zmalloc failed\n");
    memset(memory, 0, size);
    return memory;
  }
--- 248,254 ----
  {
    void *memory = malloc(size);
    if (memory == NULL)
!     error("zalloc failed\n");
    memset(memory, 0, size);
    return memory;
  }
Index: sim/ppc/ppc-cache-rules
===================================================================
RCS file: /cvs/src/src/sim/ppc/ppc-cache-rules,v
retrieving revision 1.1.1.1
diff -p -r1.1.1.1 ppc-cache-rules
*** ppc-cache-rules	1999/04/16 01:35:10	1.1.1.1
--- ppc-cache-rules	2001/12/27 05:53:14
*************** cache:LI:EXTS_LI_0b00:unsigned_word:((((
*** 63,65 ****
--- 63,77 ----
  cache:D:EXTS_D:unsigned_word:((signed_word)(signed16)(instruction))
  cache:DS:EXTS_DS_0b00:unsigned_word:(((signed_word)(signed16)instruction) & ~0x3)
  #compute:SPR:SPR_is_256:int:(SPR == 256)
+ cache:VS:VS::
+ cache:VS:vS:vreg *:(cpu_registers(processor)->vr + VS)
+ cache:VS:VS_BITMASK:unsigned32:(1 << VS)
+ cache:VA:VA::
+ cache:VA:vA:vreg *:(cpu_registers(processor)->vr + VA)
+ cache:VA:VA_BITMASK:unsigned32:(1 << VA)
+ cache:VB:VB::
+ cache:VB:vB:vreg *:(cpu_registers(processor)->vr + VB)
+ cache:VB:VB_BITMASK:unsigned32:(1 << VB)
+ cache:VC:VC::
+ cache:VC:vC:vreg *:(cpu_registers(processor)->vr + VC)
+ cache:VC:VC_BITMASK:unsigned32:(1 << VC)
Index: sim/ppc/ppc-instructions
===================================================================
RCS file: /cvs/src/src/sim/ppc/ppc-instructions,v
retrieving revision 1.3
diff -p -r1.3 ppc-instructions
*** ppc-instructions	2000/10/24 16:16:43	1.3
--- ppc-instructions	2001/12/27 05:53:17
***************
*** 110,115 ****
--- 110,151 ----
  		    ppc_insn_mtcr(MY_INDEX, cpu_model(processor), INT_MASK, FXM); \
  		} while (0)
  
+ 	#define PPC_INSN_INT_VR(OUT_MASK, IN_MASK, OUT_VMASK, IN_VMASK) \
+ 		do { \
+ 		  if (CURRENT_MODEL_ISSUE > 0) \
+ 		    ppc_insn_int_vr(MY_INDEX, cpu_model(processor), OUT_MASK, IN_MASK, OUT_VMASK, IN_VMASK); \
+ 		} while (0)
+ 
+ 	#define PPC_INSN_VR(OUT_VMASK, IN_VMASK) \
+ 		do { \
+ 		  if (CURRENT_MODEL_ISSUE > 0) \
+ 		    ppc_insn_vr(MY_INDEX, cpu_model(processor), OUT_VMASK, IN_VMASK); \
+ 		} while (0)
+ 
+ 	#define PPC_INSN_VR_CR(OUT_VMASK, IN_VMASK, CR_MASK) \
+ 		do { \
+ 		  if (CURRENT_MODEL_ISSUE > 0) \
+ 		    ppc_insn_vr_cr(MY_INDEX, cpu_model(processor), OUT_VMASK, IN_VMASK, CR_MASK); \
+ 		} while (0)
+ 
+ 	#define PPC_INSN_VR_VSCR(OUT_VMASK, IN_VMASK) \
+ 		do { \
+ 		  if (CURRENT_MODEL_ISSUE > 0) \
+ 		    ppc_insn_vr_vscr(MY_INDEX, cpu_model(processor), OUT_VMASK, IN_VMASK); \
+ 		} while (0)
+ 
+ 	#define PPC_INSN_FROM_VSCR(VR_MASK) \
+ 		do { \
+ 		  if (CURRENT_MODEL_ISSUE > 0) \
+ 		    ppc_insn_from_vscr(MY_INDEX, cpu_model(processor), VR_MASK); \
+ 		} while (0)
+ 
+ 	#define PPC_INSN_TO_VSCR(VR_MASK) \
+ 		do { \
+ 		  if (CURRENT_MODEL_ISSUE > 0) \
+ 		    ppc_insn_to_vscr(MY_INDEX, cpu_model(processor), VR_MASK); \
+ 		} while (0)
+ 
  ::model-data:::
  	typedef enum _ppc_function_unit {
  	  PPC_UNIT_BAD,				/* unknown function unit */
***************
*** 150,156 ****
--- 186,194 ----
  	  unsigned32 int_busy;				/* int registers that are busy */
  	  unsigned32 fp_busy;				/* floating point registers that are busy */
  	  unsigned32 cr_fpscr_busy;			/* CR/FPSCR registers that are busy */
+ 	  unsigned32 vr_busy;				/* AltiVec registers that are busy */
  	  signed16 spr_busy;				/* SPR register that is busy or PPC_NO_SPR */
+ 	  signed16 vscr_busy;				/* AltiVec status register busy */
  	  signed16 issue;				/* # of cycles until unit can accept another insn */
  	  signed16 done;				/* # of cycles until insn is done */
  	  signed16 nr_writebacks;			/* # of registers this unit writes back */
***************
*** 179,186 ****
--- 217,226 ----
  	  int max_nr_writebacks;			/* max # of writeback slots available */
  	  unsigned32 int_busy;				/* int registers that are busy */
  	  unsigned32 fp_busy;				/* floating point registers that are busy */
+ 	  unsigned32 vr_busy;				/* AltiVec registers that are busy */
  	  unsigned32 cr_fpscr_busy;			/* CR/FPSCR registers that are busy */
  	  unsigned8 spr_busy[nr_of_sprs];		/* SPR registers that are busy */
+ 	  unsigned8 vscr_busy;				/* AltiVec SC register busy */
  	  unsigned8 busy[nr_ppc_function_units];	/* whether a function is busy or not */
  	};
  
*************** void::model-static::model_trace_release:
*** 273,278 ****
--- 313,327 ----
  	}
  	if (busy->spr_busy != PPC_NO_SPR)
  	  TRACE(trace_model, ("Register %s is now available.\n", spr_name(busy->spr_busy)));
+ 	if (busy->vr_busy) {
+ 	  for(i = 0; i < 32; i++) {
+ 	    if (((1 << i) & busy->vr_busy) != 0) {
+ 	      TRACE(trace_model, ("Register v%d is now available.\n", i));
+ 	    }
+ 	  }
+ 	}
+ 	if (busy->vscr_busy)
+ 	  TRACE(trace_model, ("VSCR Register is now available.\n", spr_name(busy->spr_busy)));
  
  # Trace making registers busy
  void::model-static::model_trace_make_busy:model_data *model_ptr, unsigned32 int_mask, unsigned32 fp_mask, unsigned32 cr_mask
*************** void::model-static::model_trace_busy_p:m
*** 330,335 ****
--- 379,416 ----
  	}
  	if (spr_busy != PPC_NO_SPR && model_ptr->spr_busy[spr_busy])
  	  TRACE(trace_model, ("Waiting for register %s.\n", spr_name(spr_busy)));
+ 
+ # Trace waiting for AltiVec registers to become available
+ void::model-static::model_trace_altivec_busy_p:model_data *model_ptr, unsigned32 vr_busy
+ 	int i;
+ 	if (vr_busy) {
+ 	  vr_busy &= model_ptr->vr_busy;
+ 	  for(i = 0; i < 32; i++) {
+ 	    if (((1 << i) & vr_busy) != 0) {
+ 	      TRACE(trace_model, ("Waiting for register v%d.\n", i));
+ 	    }
+ 	  }
+ 	}
+ 	if (model_ptr->vscr_busy)
+ 	  TRACE(trace_model, ("Waiting for VSCR\n"));
+ 
+ # Trace making AltiVec registers busy
+ void::model-static::model_trace_altivec_make_busy:model_data *model_ptr, unsigned32 vr_mask, unsigned32 cr_mask
+ 	int i;
+ 	if (vr_mask) {
+ 	  for(i = 0; i < 32; i++) {
+ 	    if (((1 << i) & vr_mask) != 0) {
+ 	      TRACE(trace_model, ("Register v%d is now busy.\n", i));
+ 	    }
+ 	  }
+ 	}
+ 	if (cr_mask) {
+ 	  for(i = 0; i < 8; i++) {
+ 	    if (((1 << i) & cr_mask) != 0) {
+ 	      TRACE(trace_model, ("Register cr%d is now busy.\n", i));
+ 	    }
+ 	  }
+ 	}
  
  # Advance state to next cycle, releasing any registers allocated
  void::model-internal::model_new_cycle:model_data *model_ptr
*************** void::model-internal::model_new_cycle:mo
*** 351,356 ****
--- 432,439 ----
  	      model_ptr->cr_fpscr_busy &= ~cur_busy->cr_fpscr_busy;
  	      if (cur_busy->spr_busy != PPC_NO_SPR)
  		model_ptr->spr_busy[cur_busy->spr_busy] = 0;
+ 	      model_ptr->vr_busy &= ~cur_busy->vr_busy;
+ 	      model_ptr->vscr_busy = ~cur_busy->vscr_busy;
  
  	      if (WITH_TRACE && ppc_trace[trace_model])
  		model_trace_release(model_ptr, cur_busy);
*************** model_busy *::model-internal::model_make
*** 407,412 ****
--- 490,497 ----
  	  busy->fp_busy = 0;
  	  busy->cr_fpscr_busy = 0;
  	  busy->nr_writebacks = 0;
+ 	  busy->vr_busy = 0;
+ 	  busy->vscr_busy = 0;
  	}
  
  	busy->unit = unit;
*************** void::model-function::ppc_insn_mtcr:itab
*** 727,732 ****
--- 812,985 ----
  	busy_ptr->nr_writebacks = 1;
  	if (WITH_TRACE && ppc_trace[trace_model])
  	  model_trace_make_busy(model_ptr, 0, 0, cr_mask);
+ 
+ # Schedule an AltiVec instruction that takes integer input registers and produces output registers
+ void::model-function::ppc_insn_int_vr:itable_index index, model_data *model_ptr, const unsigned32 out_mask, const unsigned32 in_mask, const unsigned32 out_vmask, const unsigned32 in_vmask
+ 	const unsigned32 int_mask = out_mask | in_mask;
+ 	const unsigned32 vr_mask = out_vmask | in_vmask;
+ 	model_busy *busy_ptr;
+ 
+ 	if ((model_ptr->int_busy & int_mask) != 0 || (model_ptr->vr_busy & vr_mask)) {
+ 	  model_new_cycle(model_ptr);			/* don't count first dependency as a stall */
+ 
+ 	  while ((model_ptr->int_busy & int_mask) != 0 || (model_ptr->vr_busy & vr_mask)) {
+ 	    if (WITH_TRACE && ppc_trace[trace_model]) {
+ 	      model_trace_busy_p(model_ptr, int_mask, 0, 0, PPC_NO_SPR);
+ 	      model_trace_altivec_busy_p(model_ptr, vr_mask);
+ 	    }
+ 
+ 	    model_ptr->nr_stalls_data++;
+ 	    model_new_cycle(model_ptr);
+ 	  }
+ 	}
+ 
+ 	busy_ptr = model_wait_for_unit(index, model_ptr, &model_ptr->timing[index]);
+ 	model_ptr->int_busy |= out_mask;
+ 	busy_ptr->int_busy |= out_mask;
+ 	model_ptr->vr_busy |= out_vmask;
+ 	busy_ptr->vr_busy |= out_vmask;
+ 
+ 	if (out_mask)
+ 	  busy_ptr->nr_writebacks = (PPC_ONE_BIT_SET_P(out_vmask)) ? 1 : 2;
+ 
+ 	if (out_vmask)
+ 	  busy_ptr->nr_writebacks += (PPC_ONE_BIT_SET_P(out_vmask)) ? 1 : 2;
+ 
+ 	if (WITH_TRACE && ppc_trace[trace_model]) {
+ 	  model_trace_make_busy(model_ptr, out_mask, 0, 0);
+ 	  model_trace_altivec_make_busy(model_ptr, vr_mask, 0);
+ 	}
+ 
+ # Schedule an AltiVec instruction that takes vector input registers and produces vector output registers
+ void::model-function::ppc_insn_vr:itable_index index, model_data *model_ptr, const unsigned32 out_vmask, const unsigned32 in_vmask
+ 	const unsigned32 vr_mask = out_vmask | in_vmask;
+ 	model_busy *busy_ptr;
+ 
+ 	if (model_ptr->vr_busy & vr_mask) {
+ 	  model_new_cycle(model_ptr);			/* don't count first dependency as a stall */
+ 
+ 	  while (model_ptr->vr_busy & vr_mask) {
+ 	    if (WITH_TRACE && ppc_trace[trace_model]) {
+ 	      model_trace_altivec_busy_p(model_ptr, vr_mask);
+ 	    }
+ 
+ 	    model_ptr->nr_stalls_data++;
+ 	    model_new_cycle(model_ptr);
+ 	  }
+ 	}
+ 
+ 	busy_ptr = model_wait_for_unit(index, model_ptr, &model_ptr->timing[index]);
+ 	model_ptr->vr_busy |= out_vmask;
+ 	busy_ptr->vr_busy |= out_vmask;
+ 	if (out_vmask)
+ 	  busy_ptr->nr_writebacks = (PPC_ONE_BIT_SET_P(out_vmask)) ? 1 : 2;
+ 
+ 	if (WITH_TRACE && ppc_trace[trace_model]) {
+ 	  model_trace_altivec_make_busy(model_ptr, vr_mask, 0);
+ 	}
+ 
+ # Schedule an AltiVec instruction that takes vector input registers and produces vector output registers, touches CR
+ void::model-function::ppc_insn_vr_cr:itable_index index, model_data *model_ptr, const unsigned32 out_vmask, const unsigned32 in_vmask, const unsigned32 cr_mask
+ 	const unsigned32 vr_mask = out_vmask | in_vmask;
+ 	model_busy *busy_ptr;
+ 
+ 	if ((model_ptr->vr_busy & vr_mask) || (model_ptr->cr_fpscr_busy & cr_mask)) {
+ 	  model_new_cycle(model_ptr);			/* don't count first dependency as a stall */
+ 
+ 	  while ((model_ptr->vr_busy & vr_mask) || (model_ptr->cr_fpscr_busy & cr_mask)) {
+ 	    if (WITH_TRACE && ppc_trace[trace_model]) {
+ 	      model_trace_busy_p(model_ptr, 0, 0, cr_mask, PPC_NO_SPR);
+ 	      model_trace_altivec_busy_p(model_ptr, vr_mask);
+ 	    }
+ 
+ 	    model_ptr->nr_stalls_data++;
+ 	    model_new_cycle(model_ptr);
+ 	  }
+ 	}
+ 
+ 	busy_ptr = model_wait_for_unit(index, model_ptr, &model_ptr->timing[index]);
+ 	model_ptr->cr_fpscr_busy |= cr_mask;
+ 	busy_ptr->cr_fpscr_busy |= cr_mask;
+ 	model_ptr->vr_busy |= out_vmask;
+ 	busy_ptr->vr_busy |= out_vmask;
+ 
+ 	if (out_vmask)
+ 	  busy_ptr->nr_writebacks = (PPC_ONE_BIT_SET_P(out_vmask)) ? 1 : 2;
+ 
+ 	if (cr_mask)
+ 	  busy_ptr->nr_writebacks++;
+ 
+ 	if (WITH_TRACE && ppc_trace[trace_model])
+ 	  model_trace_altivec_make_busy(model_ptr, vr_mask, cr_mask);
+ 
+ # Schedule an AltiVec instruction that takes vector input registers and produces vector output registers, touches VSCR
+ void::model-function::ppc_insn_vr_vscr:itable_index index, model_data *model_ptr, const unsigned32 out_vmask, const unsigned32 in_vmask
+ 	const unsigned32 vr_mask = out_vmask | in_vmask;
+ 	model_busy *busy_ptr;
+ 
+ 	if ((model_ptr->vr_busy & vr_mask) != 0 || model_ptr->vscr_busy != 0) {
+ 	  model_new_cycle(model_ptr);			/* don't count first dependency as a stall */
+ 
+ 	  while ((model_ptr->vr_busy & vr_mask) != 0 || model_ptr->vscr_busy != 0) {
+ 	    if (WITH_TRACE && ppc_trace[trace_model])
+ 	      model_trace_altivec_busy_p(model_ptr, vr_mask);
+ 
+ 	    model_ptr->nr_stalls_data++;
+ 	    model_new_cycle(model_ptr);
+ 	  }
+ 	}
+ 
+ 	busy_ptr = model_wait_for_unit(index, model_ptr, &model_ptr->timing[index]);
+ 	model_ptr->vr_busy |= out_vmask;
+ 	busy_ptr->vr_busy |= out_vmask;
+ 	model_ptr->vscr_busy = 1;
+ 	busy_ptr->vscr_busy = 1;
+ 
+ 	if (out_vmask)
+ 	  busy_ptr->nr_writebacks = 1 + (PPC_ONE_BIT_SET_P(out_vmask)) ? 1 : 2;
+ 
+ 	if (WITH_TRACE && ppc_trace[trace_model])
+ 	  model_trace_altivec_make_busy(model_ptr, vr_mask, 0);
+ 
+ # Schedule an MFVSCR instruction that VSCR input register and produces an AltiVec output register
+ void::model-function::ppc_insn_from_vscr:itable_index index, model_data *model_ptr, const unsigned32 vr_mask
+ 	model_busy *busy_ptr;
+ 
+ 	while ((model_ptr->vr_busy & vr_mask) != 0 || model_ptr->vscr_busy != 0) {
+ 	  if (WITH_TRACE && ppc_trace[trace_model])
+ 	    model_trace_altivec_busy_p(model_ptr, vr_mask);
+ 
+ 	  model_ptr->nr_stalls_data++;
+ 	  model_new_cycle(model_ptr);
+ 	}
+ 	busy_ptr = model_wait_for_unit(index, model_ptr, &model_ptr->timing[index]);
+ 	model_ptr->cr_fpscr_busy |= vr_mask;
+ 	busy_ptr->cr_fpscr_busy |= vr_mask;
+ 
+ 	if (vr_mask)
+ 	  busy_ptr->nr_writebacks = 1;
+ 
+ 	model_ptr->vr_busy |= vr_mask;
+ 	if (WITH_TRACE && ppc_trace[trace_model])
+ 	  model_trace_altivec_make_busy(model_ptr, vr_mask, 0);
+ 
+ # Schedule an MTVSCR instruction that one AltiVec input register and produces a vscr output register
+ void::model-function::ppc_insn_to_vscr:itable_index index, model_data *model_ptr, const unsigned32 vr_mask
+ 	model_busy *busy_ptr;
+ 
+ 	while ((model_ptr->vr_busy & vr_mask) != 0 || model_ptr->vscr_busy != 0) {
+ 	  if (WITH_TRACE && ppc_trace[trace_model])
+ 	    model_trace_altivec_busy_p(model_ptr, vr_mask);
+ 
+ 	  model_ptr->nr_stalls_data++;
+ 	  model_new_cycle(model_ptr);
+ 	}
+ 	busy_ptr = model_wait_for_unit(index, model_ptr, &model_ptr->timing[index]);
+ 	busy_ptr ->vscr_busy = 1;
+ 	model_ptr->vscr_busy = 1;
+ 	busy_ptr->nr_writebacks = 1;
+ 
+ 	TRACE(trace_model,("Making VSCR busy.\n"));
  
  model_data *::model-function::model_create:cpu *processor
  	model_data *model_ptr = ZALLOC(model_data);
*************** void::model-function::model_branch_predi
*** 927,933 ****
--- 1180,1272 ----
  ::internal::floating_point_unavailable
  	floating_point_unavailable_interrupt(processor, cia);
  
+ 
+ # The follow are AltiVec saturate operations
  
+ signed8::model-function::altivec_signed_saturate_8:signed16 val, int *sat
+ 	  signed8 rv;
+ 	  if (val > 127) {
+ 	    rv = 127;
+ 	    *sat = 1;
+ 	  } else if (val < -128) {
+ 	    rv = -128;
+ 	    *sat = 1;
+ 	  } else {
+ 	    rv = val;
+ 	    *sat = 0;
+ 	  }
+ 	  return rv;
+ 
+ signed16::model-function::altivec_signed_saturate_16:signed32 val, int *sat
+ 	  signed16 rv;
+ 	  if (val > 32767) {
+ 	    rv = 32767;
+ 	    *sat = 1;
+ 	  } else if (val < -32768) {
+ 	    rv = -32768;
+ 	    *sat = 1;
+ 	  } else {
+ 	    rv = val;
+ 	    *sat = 0;
+ 	  }
+ 	  return rv;
+ 
+ signed32::model-function::altivec_signed_saturate_32:signed64 val, int *sat
+ 	  signed32 rv;
+ 	  if (val > 2147483647) {
+ 	    rv = 2147483647;
+ 	    *sat = 1;
+ 	  } else if (val < -2147483648LL) {
+ 	    rv = -2147483648LL;
+ 	    *sat = 1;
+ 	  } else {
+ 	    rv = val;
+ 	    *sat = 0;
+ 	  }
+ 	  return rv;
+ 
+ unsigned8::model-function::altivec_unsigned_saturate_8:signed16 val, int *sat
+ 	  unsigned8 rv;
+ 	  if (val > 255) {
+ 	    rv = 255;
+ 	    *sat = 1;
+ 	  } else if (val < 0) {
+ 	    rv = 0;
+ 	    *sat = 1;
+ 	  } else {
+ 	    rv = val;
+ 	    *sat = 0;
+ 	  }
+ 	  return rv;
+ 
+ unsigned16::model-function::altivec_unsigned_saturate_16:signed32 val, int *sat
+ 	  unsigned16 rv;
+ 	  if (val > 65535) {
+ 	    rv = 65535;
+ 	    *sat = 1;
+ 	  } else if (val < 0) {
+ 	    rv = 0;
+ 	    *sat = 1;
+ 	  } else {
+ 	    rv = val;
+ 	    *sat = 0;
+ 	  }
+ 	  return rv;
+ 
+ unsigned32::model-function::altivec_unsigned_saturate_32:signed64 val, int *sat
+ 	  unsigned32 rv;
+ 	  if (val > 4294967295LL) {
+ 	    rv = 4294967295LL;
+ 	    *sat = 1;
+ 	  } else if (val < 0) {
+ 	    rv = 0;
+ 	    *sat = 1;
+ 	  } else {
+ 	    rv = val;
+ 	    *sat = 0;
+ 	  }
+ 	  return rv;
+ 
  #
  # Floating point support functions
  #
*************** void::function::invalid_zero_divide_oper
*** 4925,4927 ****
--- 5264,7256 ----
  0.31,6.RT,11.RA,16.RB,21.310,31./:X:earwax::External Control In Word Indexed
  
  0.31,6.RS,11.RA,16.RB,21.438,31./:X:earwax::External Control Out Word Indexed
+ 
+ 
+ #
+ # Motorola AltiVec instructions.
+ #
+ 
+ #
+ # Load instructions, 6-14 ... 6-22.
+ #
+ 
+ 0.31,6.VS,11.RA,16.RB,21.7,31.0:X::lvebx %VD, %RA, %RB:Load Vector Element Byte Indexed
+ 	unsigned_word b;
+ 	unsigned_word EA;
+ 	unsigned_word eb;
+ 	if (RA_is_0) b = 0;
+ 	else         b = *rA;
+ 	EA = b + *rB;
+ 	eb = EA & 0xf;
+ 	(*vS).b[AV_BINDEX(eb)] = MEM(unsigned, EA, 1);
+ 	PPC_INSN_INT_VR(0, RA_BITMASK | RB_BITMASK, VS_BITMASK, 0);
+ 
+ 0.31,6.VS,11.RA,16.RB,21.39,31.0:X::lvehx %VD, %RA, %RB:Load Vector Element Half Word Indexed
+ 	unsigned_word b;
+ 	unsigned_word EA;
+ 	unsigned_word eb;
+ 	if (RA_is_0) b = 0;
+ 	else         b = *rA;
+ 	EA = (b + *rB) & ~1;
+ 	eb = EA & 0xf;
+ 	(*vS).h[AV_HINDEX(eb/2)] = MEM(unsigned, EA, 2);
+ 	PPC_INSN_INT_VR(0, RA_BITMASK | RB_BITMASK, VS_BITMASK, 0);
+ 
+ 0.31,6.VS,11.RA,16.RB,21.71,31.0:X::lvewx %VD, %RA, %RB:Load Vector Element Word Indexed
+ 	unsigned_word b;
+ 	unsigned_word EA;
+ 	unsigned_word eb;
+ 	if (RA_is_0) b = 0;
+ 	else         b = *rA;
+ 	EA = (b + *rB) & ~3;
+ 	eb = EA & 0xf;
+ 	(*vS).w[eb/4] = MEM(unsigned, EA, 4);
+ 	PPC_INSN_INT_VR(0, RA_BITMASK | RB_BITMASK, VS_BITMASK, 0);
+ 
+ 
+ 0.31,6.VS,11.RA,16.RB,21.6,31.0:X::lvsl %VD, %RA, %RB:Load Vector for Shift Left
+ 	unsigned_word b;
+ 	unsigned_word addr;
+ 	int i, j;
+ 	if (RA_is_0) b = 0;
+ 	else         b = *rA;
+ 	addr = b + *rB;
+ 	j = addr & 0xf;
+ 	for (i = 16; i >= 0; i--)
+ 	  (*vS).b[i] = j++;
+ 	PPC_INSN_INT_VR(0, RA_BITMASK | RB_BITMASK, VS_BITMASK, 0);
+ 
+ 0.31,6.VS,11.RA,16.RB,21.38,31.0:X::lvsr %VD, %RA, %RB:Load Vector for Shift Right
+ 	unsigned_word b;
+ 	unsigned_word addr;
+ 	int i, j;
+ 	if (RA_is_0) b = 0;
+ 	else         b = *rA;
+ 	addr = b + *rB;
+ 	j = 0x10 - (addr & 0xf);
+ 	for (i = 16; i >= 0; i--)
+ 	  (*vS).b[i] = j++;
+ 	PPC_INSN_INT_VR(0, RA_BITMASK | RB_BITMASK, VS_BITMASK, 0);
+ 
+ 
+ 0.31,6.VS,11.RA,16.RB,21.103,31.0:X::lvx %VD, %RA, %RB:Load Vector Indexed
+ 	unsigned_word b;
+ 	unsigned_word EA;
+ 	if (RA_is_0) b = 0;
+ 	else         b = *rA;
+ 	EA = (b + *rB) & ~0xf;
+ 	if (CURRENT_TARGET_BYTE_ORDER == BIG_ENDIAN) {
+ 	  (*vS).w[0] = MEM(unsigned, EA + 0, 4);
+ 	  (*vS).w[1] = MEM(unsigned, EA + 4, 4);
+ 	  (*vS).w[2] = MEM(unsigned, EA + 8, 4);
+ 	  (*vS).w[3] = MEM(unsigned, EA + 12, 4);
+ 	} else {
+ 	  (*vS).w[0] = MEM(unsigned, EA + 12, 4);
+ 	  (*vS).w[1] = MEM(unsigned, EA + 8, 4);
+ 	  (*vS).w[2] = MEM(unsigned, EA + 4, 4);
+ 	  (*vS).w[3] = MEM(unsigned, EA + 0, 4);
+ 	}
+ 	PPC_INSN_INT_VR(0, RA_BITMASK | RB_BITMASK, VS_BITMASK, 0);
+ 
+ 0.31,6.VS,11.RA,16.RB,21.359,31.0:X::lvxl %VD, %RA, %RB:Load Vector Indexed LRU
+ 	unsigned_word b;
+ 	unsigned_word EA;
+ 	if (RA_is_0) b = 0;
+ 	else         b = *rA;
+ 	EA = (b + *rB) & ~0xf;
+ 	if (CURRENT_TARGET_BYTE_ORDER == BIG_ENDIAN) {
+ 	  (*vS).w[0] = MEM(unsigned, EA + 0, 4);
+ 	  (*vS).w[1] = MEM(unsigned, EA + 4, 4);
+ 	  (*vS).w[2] = MEM(unsigned, EA + 8, 4);
+ 	  (*vS).w[3] = MEM(unsigned, EA + 12, 4);
+ 	} else {
+ 	  (*vS).w[0] = MEM(unsigned, EA + 8, 4);
+ 	  (*vS).w[1] = MEM(unsigned, EA + 12, 4);
+ 	  (*vS).w[2] = MEM(unsigned, EA + 0, 4);
+ 	  (*vS).w[3] = MEM(unsigned, EA + 4, 4);
+ 	}
+ 	PPC_INSN_INT_VR(0, RA_BITMASK | RB_BITMASK, VS_BITMASK, 0);
+ 
+ #
+ # Move to/from VSCR instructions, 6-23 & 6-24.
+ #
+ 
+ 0.4,6.VS,11.0,16.0,21.1540:VX::mfvscr %VS:Move from Vector Status and Control Register
+ 	(*vS).w[0] = 0;
+ 	(*vS).w[1] = 0;
+ 	(*vS).w[2] = 0;
+ 	(*vS).w[3] = VSCR;
+ 	PPC_INSN_FROM_VSCR(VS_BITMASK);
+ 
+ 0.4,6.0,11.0,16.VB,21.1604:VX::mtvscr %VB:Move to Vector Status and Control Register
+ 	VSCR = (*vB).w[3];
+ 	PPC_INSN_TO_VSCR(VB_BITMASK);
+ 
+ #
+ # Store instructions, 6-25 ... 6-29.
+ #
+ 
+ 0.31,6.VS,11.RA,16.RB,21.135,31.0:X::stvebx %VD, %RA, %RB:Store Vector Element Byte Indexed
+ 	unsigned_word b;
+ 	unsigned_word EA;
+ 	unsigned_word eb;
+ 	if (RA_is_0) b = 0;
+ 	else         b = *rA;
+ 	EA = b + *rB;
+ 	eb = EA & 0xf;
+ 	if (CURRENT_TARGET_BYTE_ORDER == BIG_ENDIAN)
+ 	  STORE(EA, 1, (*vS).b[eb]);
+ 	else
+ 	  STORE(EA, 1, (*vS).b[15-eb]);
+ 	PPC_INSN_INT_VR(0, RA_BITMASK | RB_BITMASK, VS_BITMASK, 0);
+ 
+ 0.31,6.VS,11.RA,16.RB,21.167,31.0:X::stvehx %VD, %RA, %RB:Store Vector Element Half Word Indexed
+ 	unsigned_word b;
+ 	unsigned_word EA;
+ 	unsigned_word eb;
+ 	if (RA_is_0) b = 0;
+ 	else         b = *rA;
+ 	EA = (b + *rB) & ~1;
+ 	eb = EA & 0xf;
+ 	if (CURRENT_TARGET_BYTE_ORDER == BIG_ENDIAN)
+ 	  STORE(EA, 2, (*vS).h[eb/2]);
+ 	else
+ 	  STORE(EA, 2, (*vS).h[7-eb]);
+ 	PPC_INSN_INT_VR(0, RA_BITMASK | RB_BITMASK, VS_BITMASK, 0);
+ 
+ 0.31,6.VS,11.RA,16.RB,21.199,31.0:X::stvewx %VD, %RA, %RB:Store Vector Element Word Indexed
+ 	unsigned_word b;
+ 	unsigned_word EA;
+ 	unsigned_word eb;
+ 	if (RA_is_0) b = 0;
+ 	else         b = *rA;
+ 	EA = (b + *rB) & ~3;
+ 	eb = EA & 0xf;
+ 	if (CURRENT_TARGET_BYTE_ORDER == BIG_ENDIAN)
+ 	  STORE(EA, 4, (*vS).w[eb/4]);
+ 	else
+ 	  STORE(EA, 4, (*vS).w[3-(eb/4)]);
+ 	PPC_INSN_INT_VR(0, RA_BITMASK | RB_BITMASK, VS_BITMASK, 0);
+ 
+ 0.31,6.VS,11.RA,16.RB,21.231,31.0:X::stvx %VD, %RA, %RB:Store Vector Indexed
+ 	unsigned_word b;
+ 	unsigned_word EA;
+ 	if (RA_is_0) b = 0;
+ 	else         b = *rA;
+ 	EA = (b + *rB) & ~0xf;
+ 	if (CURRENT_TARGET_BYTE_ORDER == BIG_ENDIAN) {
+ 	  STORE(EA + 0, 4, (*vS).w[0]);
+ 	  STORE(EA + 4, 4, (*vS).w[1]);
+ 	  STORE(EA + 8, 4, (*vS).w[2]);
+ 	  STORE(EA + 12, 4, (*vS).w[3]);
+ 	} else {
+ 	  STORE(EA + 8, 4, (*vS).w[0]);
+ 	  STORE(EA + 12, 4, (*vS).w[1]);
+ 	  STORE(EA + 0, 4, (*vS).w[2]);
+ 	  STORE(EA + 4, 4, (*vS).w[3]);
+ 	}
+ 	PPC_INSN_INT_VR(0, RA_BITMASK | RB_BITMASK, VS_BITMASK, 0);
+ 
+ 0.31,6.VS,11.RA,16.RB,21.487,31.0:X::stvxl %VD, %RA, %RB:Store Vector Indexed LRU
+ 	unsigned_word b;
+ 	unsigned_word EA;
+ 	if (RA_is_0) b = 0;
+ 	else         b = *rA;
+ 	EA = (b + *rB) & ~0xf;
+ 	if (CURRENT_TARGET_BYTE_ORDER == BIG_ENDIAN) {
+ 	  STORE(EA + 0, 4, (*vS).w[0]);
+ 	  STORE(EA + 4, 4, (*vS).w[1]);
+ 	  STORE(EA + 8, 4, (*vS).w[2]);
+ 	  STORE(EA + 12, 4, (*vS).w[3]);
+ 	} else {
+ 	  STORE(EA + 8, 4, (*vS).w[0]);
+ 	  STORE(EA + 12, 4, (*vS).w[1]);
+ 	  STORE(EA + 0, 4, (*vS).w[2]);
+ 	  STORE(EA + 4, 4, (*vS).w[3]);
+ 	}
+ 	PPC_INSN_INT_VR(0, RA_BITMASK | RB_BITMASK, VS_BITMASK, 0);
+ 
+ #
+ # Vector Add instructions, 6-30 ... 6-40.
+ #
+ 
+ 0.4,6.VS,11.VA,16.VB,21.384:VX::vaddcuw %VD, %VA, %VB:Vector Add Carryout Unsigned Word
+ 	unsigned64 temp;
+ 	int i;
+ 	for (i = 0; i < 4; i++) {
+ 	  temp = (unsigned64)(*vA).w[i] + (unsigned64)(*vB).w[i];
+ 	  (*vS).w[i] = temp >> 32;
+ 	}
+ 	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+ 
+ 0.4,6.VS,11.VA,16.VB,21.10:VX::vaddfp %VD, %VA, %VB:Vector Add Floating Point
+ 	int i;
+ 	unsigned32 f;
+ 	sim_fpu a, b, d;
+ 	for (i = 0; i < 4; i++) {
+ 	  sim_fpu_32to (&a, (*vA).w[i]);
+ 	  sim_fpu_32to (&b, (*vB).w[i]);
+ 	  sim_fpu_add (&d, &a, &b);
+ 	  sim_fpu_to32 (&f, &d);
+ 	  (*vS).w[i] = f;
+ 	}
+ 	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+ 	
+ 0.4,6.VS,11.VA,16.VB,21.768:VX::vaddsbs %VD, %VA, %VB:Vector Add Signed Byte Saturate
+ 	int i, sat, tempsat;
+ 	signed16 temp;
+ 	for (i = 0; i < 16; i++) {
+ 	  temp = (signed16)(signed8)(*vA).b[i] + (signed16)(signed8)(*vB).b[i];
+ 	  (*vS).b[i] = altivec_signed_saturate_8(temp, &tempsat);
+ 	  sat |= tempsat;
+ 	}
+ 	ALTIVEC_SET_SAT(sat);
+ 	PPC_INSN_VR_VSCR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+ 
+ 0.4,6.VS,11.VA,16.VB,21.832:VX::vaddshs %VD, %VA, %VB:Vector Add Signed Half Word Saturate
+ 	int i, sat, tempsat;
+ 	signed32 temp, a, b;
+ 	for (i = 0; i < 8; i++) {
+ 	  a = (signed32)(signed16)(*vA).h[i];
+ 	  b = (signed32)(signed16)(*vB).h[i];
+ 	  temp = a + b;
+ 	  (*vS).h[i] = altivec_signed_saturate_16(temp, &tempsat);
+ 	  sat |= tempsat;
+ 	}
+ 	ALTIVEC_SET_SAT(sat);
+ 	PPC_INSN_VR_VSCR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+ 
+ 0.4,6.VS,11.VA,16.VB,21.896:VX::vaddsws %VD, %VA, %VB:Vector Add Signed Word Saturate
+ 	int i, sat, tempsat;
+ 	signed64 temp;
+ 	for (i = 0; i < 4; i++) {
+ 	  temp = (signed64)(signed32)(*vA).w[i] + (signed64)(signed32)(*vB).w[i];
+ 	  (*vS).w[i] = altivec_signed_saturate_32(temp, &tempsat);
+ 	  sat |= tempsat;
+ 	}
+ 	ALTIVEC_SET_SAT(sat);
+ 	PPC_INSN_VR_VSCR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+ 
+ 0.4,6.VS,11.VA,16.VB,21.0:VX::vaddubm %VD, %VA, %VB:Vector Add Unsigned Byte Modulo
+ 	int i;
+ 	for (i = 0; i < 16; i++)
+ 	  (*vS).b[i] = ((*vA).b[i] + (*vB).b[i]) & 0xff;
+ 	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+ 
+ 0.4,6.VS,11.VA,16.VB,21.512:VX::vaddubs %VD, %VA, %VB:Vector Add Unsigned Byte Saturate
+ 	int i, sat, tempsat;
+ 	signed16 temp;
+ 	sat = 0;
+ 	for (i = 0; i < 16; i++) {
+ 	  temp = (signed16)(unsigned8)(*vA).b[i] + (signed16)(unsigned8)(*vB).b[i];
+ 	  (*vS).b[i] = altivec_unsigned_saturate_8(temp, &tempsat);
+ 	  sat |= tempsat;
+ 	}
+ 	ALTIVEC_SET_SAT(sat);
+ 	PPC_INSN_VR_VSCR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+ 
+ 0.4,6.VS,11.VA,16.VB,21.64:VX::vadduhm %VD, %VA, %VB:Vector Add Unsigned Half Word Modulo
+ 	int i;
+ 	for (i = 0; i < 8; i++)
+ 	  (*vS).h[i] = ((*vA).h[i] + (*vB).h[i]) & 0xffff;
+ 	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+ 
+ 0.4,6.VS,11.VA,16.VB,21.576:VX::vadduhs %VD, %VA, %VB:Vector Add Unsigned Half Word Saturate
+ 	int i, sat, tempsat;
+ 	signed32 temp;
+ 	for (i = 0; i < 8; i++) {
+ 	  temp = (signed32)(unsigned16)(*vA).h[i] + (signed32)(unsigned16)(*vB).h[i];
+ 	  (*vS).h[i] = altivec_unsigned_saturate_16(temp, &tempsat);
+ 	  sat |= tempsat;
+ 	}
+ 	ALTIVEC_SET_SAT(sat);
+ 	PPC_INSN_VR_VSCR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+ 
+ 0.4,6.VS,11.VA,16.VB,21.128:VX::vadduwm %VD, %VA, %VB:Vector Add Unsigned Word Module
+ 	int i;
+ 	for (i = 0; i < 4; i++)
+ 	  (*vS).w[i] = (*vA).w[i] + (*vB).w[i];
+ 	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+ 
+ 0.4,6.VS,11.VA,16.VB,21.640:VX::vadduws %VD, %VA, %VB:Vector Add Unsigned Word Saturate
+ 	int i, sat, tempsat;
+ 	signed64 temp;
+ 	for (i = 0; i < 4; i++) {
+ 	  temp = (signed64)(unsigned32)(*vA).w[i] + (signed64)(unsigned32)(*vB).w[i];
+ 	  (*vS).w[i] = altivec_unsigned_saturate_32(temp, &tempsat);
+ 	  sat |= tempsat;
+ 	}
+ 	ALTIVEC_SET_SAT(sat);
+ 	PPC_INSN_VR_VSCR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+ 
+ #
+ # Vector ADD instructions, 6-41, 6-42
+ #
+ 
+ 0.4,6.VS,11.VA,16.VB,21.1028:VX::vand %VD, %VA, %VB:Vector Logical AND
+ 	int i;
+ 	for (i = 0; i < 4; i++)
+ 	  (*vS).w[i] = (*vA).w[i] & (*vB).w[i];
+ 	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+ 
+ 0.4,6.VS,11.VA,16.VB,21.1092:VX::vandc %VD, %VA, %VB:Vector Logical AND with Compliment
+ 	int i;
+ 	for (i = 0; i < 4; i++)
+ 	  (*vS).w[i] = (*vA).w[i] & ~((*vB).w[i]);
+ 	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+ 
+ 
+ #
+ # Vector Average instructions, 6-43, 6-48
+ #
+ 
+ 0.4,6.VS,11.VA,16.VB,21.1282:VX::vavgsb %VD, %VA, %VB:Vector Average Signed Byte
+ 	int i;
+ 	signed16 temp, a, b;
+ 	for (i = 0; i < 16; i++) {
+ 	  a = (signed16)(signed8)(*vA).b[i];
+ 	  b = (signed16)(signed8)(*vB).b[i];
+ 	  temp = a + b + 1;
+ 	  (*vS).b[i] = (temp >> 1) & 0xff;
+ 	}
+ 	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+ 
+ 0.4,6.VS,11.VA,16.VB,21.1346:VX::vavgsh %VD, %VA, %VB:Vector Average Signed Half Word
+ 	int i;
+ 	signed32 temp, a, b;
+ 	for (i = 0; i < 8; i++) {
+ 	  a = (signed32)(signed16)(*vA).h[i];
+ 	  b = (signed32)(signed16)(*vB).h[i];
+ 	  temp = a + b + 1;
+ 	  (*vS).h[i] = (temp >> 1) & 0xffff;
+ 	}
+ 	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+ 
+ 0.4,6.VS,11.VA,16.VB,21.1410:VX::vavgsw %VD, %VA, %VB:Vector Average Signed Word
+ 	int i;
+ 	signed64 temp, a, b;
+ 	for (i = 0; i < 4; i++) {
+ 	  a = (signed64)(signed32)(*vA).w[i];
+ 	  b = (signed64)(signed32)(*vB).w[i];
+ 	  temp = a + b + 1;
+ 	  (*vS).w[i] = (temp >> 1) & 0xffffffff;
+ 	}
+ 	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+ 
+ 0.4,6.VS,11.VA,16.VB,21.1026:VX::vavgub %VD, %VA, %VB:Vector Average Unsigned Byte
+ 	int i;
+ 	unsigned16 temp, a, b;
+ 	for (i = 0; i < 16; i++) {
+ 	  a = (*vA).b[i];
+ 	  b = (*vB).b[i];
+ 	  temp = a + b + 1;
+ 	  (*vS).b[i] = (temp >> 1) & 0xff;
+ 	}
+ 	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+ 
+ 0.4,6.VS,11.VA,16.VB,21.1090:VX::vavguh %VD, %VA, %VB:Vector Average Unsigned Half Word
+ 	int i;
+ 	unsigned32 temp, a, b;
+ 	for (i = 0; i < 8; i++) {
+ 	  a = (*vA).h[i];
+ 	  b = (*vB).h[i];
+ 	  temp = a + b + 1;
+ 	  (*vS).h[i] = (temp >> 1) & 0xffff;
+ 	}
+ 	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+ 
+ 0.4,6.VS,11.VA,16.VB,21.1154:VX::vavguw %VD, %VA, %VB:Vector Average Unsigned Word
+ 	int i;
+ 	unsigned64 temp, a, b;
+ 	for (i = 0; i < 4; i++) {
+ 	  a = (*vA).w[i];
+ 	  b = (*vB).w[i];
+ 	  temp = a + b + 1;
+ 	  (*vS).w[i] = (temp >> 1) & 0xffffffff;
+ 	}
+ 	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+ 
+ #
+ # Vector Fixed Point Convert instructions, 6-49, 6-50
+ #
+ 
+ 0.4,6.VS,11.UIMM,16.VB,21.842:VX::vcfsx %VD, %VB, %UIMM:Vector Convert From Signed Fixed-Point Word
+ 	int i;
+ 	unsigned32 f;
+ 	sim_fpu b, div, d;
+ 	for (i = 0; i < 4; i++) {
+ 	  sim_fpu_32to (&b, (*vB).w[i]);
+ 	  sim_fpu_u32to (&div, 2 << UIMM, sim_fpu_round_default);
+ 	  sim_fpu_div (&d, &b, &div);
+ 	  sim_fpu_to32 (&f, &d);
+ 	  (*vS).w[i] = f;
+ 	}
+ 	PPC_INSN_VR(VS_BITMASK, VB_BITMASK);
+ 
+ 0.4,6.VS,11.UIMM,16.VB,21.778:VX::vcfux %VD, %VA, %UIMM:Vector Convert From Unsigned Fixed-Point Word
+ 	int i;
+ 	unsigned32 f;
+ 	sim_fpu b, d, div;
+ 	for (i = 0; i < 4; i++) {
+ 	  sim_fpu_32to (&b, (*vB).w[i]);
+ 	  sim_fpu_u32to (&div, 2 << UIMM, sim_fpu_round_default);
+ 	  sim_fpu_div (&d, &b, &div);
+ 	  sim_fpu_to32u (&f, &d, sim_fpu_round_default);
+ 	  (*vS).w[i] = f;
+ 	}
+ 	PPC_INSN_VR(VS_BITMASK, VB_BITMASK);
+ 
+ #
+ # Vector Compare instructions, 6-51 ... 6-64
+ #
+ 
+ 0.4,6.VS,11.VA,16.VB,21.RC,22.966:VXR::vcmpbpfpx %VD, %VA, %VB:Vector Compare Bounds Floating Point
+ 	int i, le, ge;
+ 	sim_fpu a, b, d;
+ 	for (i = 0; i < 4; i++) {
+ 	  sim_fpu_32to (&a, (*vA).w[i]);
+ 	  sim_fpu_32to (&b, (*vB).w[i]);
+ 	  le = sim_fpu_is_le(&a, &b);
+ 	  ge = sim_fpu_is_ge(&a, &b);
+ 	  (*vS).w[i] = (le ? 0 : 1 << 31) | (ge ? 0 : 1 << 30);
+ 	}
+ 	if (RC)
+ 	  ALTIVEC_SET_CR6(vS, 0);
+ 	PPC_INSN_VR_CR(VS_BITMASK, VA_BITMASK | VB_BITMASK, RC ? 0x000000f0 : 0);
+ 
+ 0.4,6.VS,11.VA,16.VB,21.RC,22.198:VXR::vcmpeqfpx %VD, %VA, %VB:Vector Compare Equal-to-Floating Point
+ 	int i;
+ 	sim_fpu a, b;
+ 	for (i = 0; i < 4; i++) {
+ 	  sim_fpu_32to (&a, (*vA).w[i]);
+ 	  sim_fpu_32to (&b, (*vB).w[i]);
+ 	  if (sim_fpu_is_eq(&a, &b))
+ 	    (*vS).w[i] = 0xffffffff;
+ 	  else
+ 	    (*vS).w[i] = 0;
+ 	}
+ 	if (RC)
+ 	  ALTIVEC_SET_CR6(vS, 1);
+ 	PPC_INSN_VR_CR(VS_BITMASK, VA_BITMASK | VB_BITMASK, RC ? 0x000000f0 : 0);
+ 
+ 0.4,6.VS,11.VA,16.VB,21.RC,22.6:VXR::vcmpequbx %VD, %VA, %VB:Vector Compare Equal-to Unsigned Byte
+ 	int i;
+ 	for (i = 0; i < 16; i++)
+ 	  if ((*vA).b[i] == (*vB).b[i])
+ 	    (*vS).b[i] = 0xff;
+ 	  else
+ 	    (*vS).b[i] = 0;
+ 	if (RC)
+ 	  ALTIVEC_SET_CR6(vS, 1);
+ 	PPC_INSN_VR_CR(VS_BITMASK, VA_BITMASK | VB_BITMASK, RC ? 0x000000f0 : 0);
+ 
+ 0.4,6.VS,11.VA,16.VB,21.RC,22.70:VXR::vcmpequhx %VD, %VA, %VB:Vector Compare Equal-to Unsigned Half Word
+ 	int i;
+ 	for (i = 0; i < 8; i++)
+ 	  if ((*vA).h[i] == (*vB).h[i])
+ 	    (*vS).h[i] = 0xffff;
+ 	  else
+ 	    (*vS).h[i] = 0;
+ 	if (RC)
+ 	  ALTIVEC_SET_CR6(vS, 1);
+ 	PPC_INSN_VR_CR(VS_BITMASK, VA_BITMASK | VB_BITMASK, RC ? 0x000000f0 : 0);
+ 
+ 0.4,6.VS,11.VA,16.VB,21.RC,22.134:VXR::vcmpequwx %VD, %VA, %VB:Vector Compare Equal-to Unsigned Word
+ 	int i;
+ 	for (i = 0; i < 4; i++)
+ 	  if ((*vA).w[i] == (*vB).w[i])
+ 	    (*vS).w[i] = 0xffffffff;
+ 	  else
+ 	    (*vS).w[i] = 0;
+ 	if (RC)
+ 	  ALTIVEC_SET_CR6(vS, 1);
+ 	PPC_INSN_VR_CR(VS_BITMASK, VA_BITMASK | VB_BITMASK, RC ? 0x000000f0 : 0);
+ 
+ 0.4,6.VS,11.VA,16.VB,21.RC,22.454:VXR::vcmpgefpx %VD, %VA, %VB:Vector Compare Greater-Than-or-Equal-to Floating Point
+ 	int i;
+ 	sim_fpu a, b;
+ 	for (i = 0; i < 4; i++) {
+ 	  sim_fpu_32to (&a, (*vA).w[i]);
+ 	  sim_fpu_32to (&b, (*vB).w[i]);
+ 	  if (sim_fpu_is_ge(&a, &b))
+ 	    (*vS).w[i] = 0xffffffff;
+ 	  else
+ 	    (*vS).w[i] = 0;
+ 	}
+ 	if (RC)
+ 	  ALTIVEC_SET_CR6(vS, 1);
+ 	PPC_INSN_VR_CR(VS_BITMASK, VA_BITMASK | VB_BITMASK, RC ? 0x000000f0 : 0);
+ 
+ 0.4,6.VS,11.VA,16.VB,21.RC,22.710:VXR::vcmpgtfpx %VD, %VA, %VB:Vector Compare Greater-Than Floating Point
+ 	int i;
+ 	sim_fpu a, b;
+ 	for (i = 0; i < 4; i++) {
+ 	  sim_fpu_32to (&a, (*vA).w[i]);
+ 	  sim_fpu_32to (&b, (*vB).w[i]);
+ 	  if (sim_fpu_is_gt(&a, &b))
+ 	    (*vS).w[i] = 0xffffffff;
+ 	  else
+ 	    (*vS).w[i] = 0;
+ 	}
+ 	if (RC)
+ 	  ALTIVEC_SET_CR6(vS, 1);
+ 	PPC_INSN_VR_CR(VS_BITMASK, VA_BITMASK | VB_BITMASK, RC ? 0x000000f0 : 0);
+ 
+ 0.4,6.VS,11.VA,16.VB,21.RC,22.774:VXR::vcmpgtsbx %VD, %VA, %VB:Vector Compare Greater-Than Signed Byte
+ 	int i;
+ 	signed8 a, b;
+ 	for (i = 0; i < 16; i++) {
+ 	  a = (*vA).b[i];
+ 	  b = (*vB).b[i];
+ 	  if (a > b)
+ 	    (*vS).b[i] = 0xff;
+ 	  else
+ 	    (*vS).b[i] = 0;
+ 	}
+ 	if (RC)
+ 	  ALTIVEC_SET_CR6(vS, 1);
+ 	PPC_INSN_VR_CR(VS_BITMASK, VA_BITMASK | VB_BITMASK, RC ? 0x000000f0 : 0);
+ 
+ 0.4,6.VS,11.VA,16.VB,21.RC,22.838:VXR::vcmpgtshx %VD, %VA, %VB:Vector Compare Greater-Than Signed Half Word
+ 	int i;
+ 	signed16 a, b;
+ 	for (i = 0; i < 8; i++) {
+ 	  a = (*vA).h[i];
+ 	  b = (*vB).h[i];
+ 	  if (a > b)
+ 	    (*vS).h[i] = 0xffff;
+ 	  else
+ 	    (*vS).h[i] = 0;
+ 	}
+ 	if (RC)
+ 	  ALTIVEC_SET_CR6(vS, 1);
+ 	PPC_INSN_VR_CR(VS_BITMASK, VA_BITMASK | VB_BITMASK, RC ? 0x000000f0 : 0);
+ 
+ 0.4,6.VS,11.VA,16.VB,21.RC,22.902:VXR::vcmpgtswx %VD, %VA, %VB:Vector Compare Greater-Than Signed Word
+ 	int i;
+ 	signed32 a, b;
+ 	for (i = 0; i < 4; i++) {
+ 	  a = (*vA).w[i];
+ 	  b = (*vB).w[i];
+ 	  if (a > b)
+ 	    (*vS).w[i] = 0xffffffff;
+ 	  else
+ 	    (*vS).w[i] = 0;
+ 	}
+ 	if (RC)
+ 	  ALTIVEC_SET_CR6(vS, 1);
+ 	PPC_INSN_VR_CR(VS_BITMASK, VA_BITMASK | VB_BITMASK, RC ? 0x000000f0 : 0);
+ 
+ 0.4,6.VS,11.VA,16.VB,21.RC,22.518:VXR::vcmpgtubx %VD, %VA, %VB:Vector Compare Greater-Than Unsigned Byte
+ 	int i;
+ 	unsigned8 a, b;
+ 	for (i = 0; i < 16; i++) {
+ 	  a = (*vA).b[i];
+ 	  b = (*vB).b[i];
+ 	  if (a > b)
+ 	    (*vS).b[i] = 0xff;
+ 	  else
+ 	    (*vS).b[i] = 0;
+ 	}
+ 	if (RC)
+ 	  ALTIVEC_SET_CR6(vS, 1);
+ 	PPC_INSN_VR_CR(VS_BITMASK, VA_BITMASK | VB_BITMASK, RC ? 0x000000f0 : 0);
+ 
+ 0.4,6.VS,11.VA,16.VB,21.RC,22.582:VXR::vcmpgtuhx %VD, %VA, %VB:Vector Compare Greater-Than Unsigned Half Word
+ 	int i;
+ 	unsigned16 a, b;
+ 	for (i = 0; i < 8; i++) {
+ 	  a = (*vA).h[i];
+ 	  b = (*vB).h[i];
+ 	  if (a > b)
+ 	    (*vS).h[i] = 0xffff;
+ 	  else
+ 	    (*vS).h[i] = 0;
+ 	}
+ 	if (RC)
+ 	  ALTIVEC_SET_CR6(vS, 1);
+ 	PPC_INSN_VR_CR(VS_BITMASK, VA_BITMASK | VB_BITMASK, RC ? 0x000000f0 : 0);
+ 
+ 0.4,6.VS,11.VA,16.VB,21.RC,22.646:VXR::vcmpgtuwx %VD, %VA, %VB:Vector Compare Greater-Than Unsigned Word
+ 	int i;
+ 	unsigned32 a, b;
+ 	for (i = 0; i < 4; i++) {
+ 	  a = (*vA).w[i];
+ 	  b = (*vB).w[i];
+ 	  if (a > b)
+ 	    (*vS).w[i] = 0xffffffff;
+ 	  else
+ 	    (*vS).w[i] = 0;
+ 	}
+ 	if (RC)
+ 	  ALTIVEC_SET_CR6(vS, 1);
+ 	PPC_INSN_VR_CR(VS_BITMASK, VA_BITMASK | VB_BITMASK, RC ? 0x000000f0 : 0);
+ 
+ #
+ # Vector Convert instructions, 6-65, 6-66.
+ #
+ 
+ 0.4,6.VS,11.UIMM,16.VB,21.970:VX::vctsxs %VD, %VB, %UIMM:Vector Convert to Signed Fixed-Point Word Saturate
+ 	int i, sat, tempsat;
+ 	signed64 temp;
+ 	sim_fpu a, b, m;
+ 	sat = 0;
+ 	for (i = 0; i < 4; i++) {
+ 	  sim_fpu_32to (&b, (*vB).w[i]);
+ 	  sim_fpu_u32to (&m, 2 << UIMM, sim_fpu_round_default);
+ 	  sim_fpu_mul (&a, &b, &m);
+ 	  sim_fpu_to64i (&temp, &a, sim_fpu_round_default);
+ 	  (*vS).w[i] = altivec_signed_saturate_32(temp, &tempsat);
+ 	  sat |= tempsat;
+ 	}
+ 	ALTIVEC_SET_SAT(sat);
+ 	PPC_INSN_VR_VSCR(VS_BITMASK, VB_BITMASK);
+ 
+ 0.4,6.VS,11.UIMM,16.VB,21.906:VX::vctuxs %VD, %VB, %UIMM:Vector Convert to Unsigned Fixed-Point Word Saturate
+ 	int i, sat, tempsat;
+ 	signed64 temp;
+ 	sim_fpu a, b, m;
+ 	sat = 0;
+ 	for (i = 0; i < 4; i++) {
+ 	  sim_fpu_32to (&b, (*vB).w[i]);
+ 	  sim_fpu_u32to (&m, 2 << UIMM, sim_fpu_round_default);
+ 	  sim_fpu_mul (&a, &b, &m);
+ 	  sim_fpu_to64u (&temp, &a, sim_fpu_round_default);
+ 	  (*vS).w[i] = altivec_unsigned_saturate_32(temp, &tempsat);
+ 	  sat |= tempsat;
+ 	}
+ 	ALTIVEC_SET_SAT(sat);
+ 	PPC_INSN_VR_VSCR(VS_BITMASK, VB_BITMASK);
+ 
+ #
+ # Vector Estimate instructions, 6-67 ... 6-70.
+ #
+ 
+ 0.4,6.VS,11.0,16.VB,21.394:VX::vexptefp %VD, %VB:Vector 2 Raised to the Exponent Estimate Floating Point
+ 	int i;
+ 	unsigned32 f;
+ 	signed32 bi;
+ 	sim_fpu b, d;
+ 	for (i = 0; i < 4; i++) {
+ 	  /*HACK!*/
+ 	  sim_fpu_32to (&b, (*vB).w[i]);
+ 	  sim_fpu_to32i (&bi, &b, sim_fpu_round_default);
+ 	  bi = 2 ^ bi;
+ 	  sim_fpu_32to (&d, bi);
+ 	  sim_fpu_to32 (&f, &d);
+ 	  (*vS).w[i] = f;
+ 	}
+ 	PPC_INSN_VR_VSCR(VS_BITMASK, VB_BITMASK);
+ 
+ 0.4,6.VS,11.0,16.VB,21.458:VX::vlogefp %VD, %VB:Vector Log2 Estimate Floating Point
+ 	int i;
+ 	unsigned32 c, u, f;
+ 	sim_fpu b, cfpu, d;
+ 	for (i = 0; i < 4; i++) {
+ 	  /*HACK!*/
+ 	  sim_fpu_32to (&b, (*vB).w[i]);
+ 	  sim_fpu_to32u (&u, &b, sim_fpu_round_default);
+ 	  for (c = 0; (u /= 2) > 1; c++)
+ 	    ;
+ 	  sim_fpu_32to (&cfpu, c);
+ 	  sim_fpu_add (&d, &b, &cfpu);
+ 	  sim_fpu_to32 (&f, &d);
+ 	  (*vS).w[i] = f;
+ 	}
+ 	PPC_INSN_VR_VSCR(VS_BITMASK, VB_BITMASK);
+ 
+ #
+ # Vector Multiply Add instruction, 6-71
+ #
+ 
+ 0.4,6.VS,11.VA,16.VB,21.VC,26.46:VAX::vmaddfp %VD, %VA, %VB, %VC:Vector Multiply Add Floating Point
+ 	int i;
+ 	unsigned32 f;
+ 	sim_fpu a, b, c, d, e;
+ 	for (i = 0; i < 4; i++) {
+ 	  sim_fpu_32to (&a, (*vA).w[i]);
+ 	  sim_fpu_32to (&b, (*vB).w[i]);
+ 	  sim_fpu_32to (&c, (*vC).w[i]);
+ 	  sim_fpu_mul (&e, &a, &c);
+ 	  sim_fpu_add (&d, &e, &b);
+ 	  sim_fpu_to32 (&f, &d);
+ 	  (*vS).w[i] = f;
+ 	}
+ 	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK | VC_BITMASK);
+ 
+ 
+ #
+ # Vector Maximum instructions, 6-72 ... 6-78.
+ #
+ 
+ 0.4,6.VS,11.VA,16.VB,21.1034:VX::vmaxfp %VD, %VA, %VB:Vector Maximum Floating Point
+ 	int i;
+ 	unsigned32 f;
+ 	sim_fpu a, b, d;
+ 	for (i = 0; i < 4; i++) {
+ 	  sim_fpu_32to (&a, (*vA).w[i]);
+ 	  sim_fpu_32to (&b, (*vB).w[i]);
+ 	  sim_fpu_max (&d, &a, &b);
+ 	  sim_fpu_to32 (&f, &d);
+ 	  (*vS).w[i] = f;
+ 	}
+ 	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+ 
+ 0.4,6.VS,11.VA,16.VB,21.258:VX::vmaxsb %VD, %VA, %VB:Vector Maximum Signed Byte
+ 	int i;
+ 	signed8 a, b;
+ 	for (i = 0; i < 16; i++) {
+ 	  a = (*vA).b[i];
+ 	  b = (*vB).b[i];
+ 	  if (a > b)
+ 	    (*vS).b[i] = a;
+ 	  else
+ 	    (*vS).b[i] = b;
+ 	}
+ 	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+ 
+ 0.4,6.VS,11.VA,16.VB,21.322:VX::vmaxsh %VD, %VA, %VB:Vector Maximum Signed Half Word
+ 	int i;
+ 	signed16 a, b;
+ 	for (i = 0; i < 8; i++) {
+ 	  a = (*vA).h[i];
+ 	  b = (*vB).h[i];
+ 	  if (a > b)
+ 	    (*vS).h[i] = a;
+ 	  else
+ 	    (*vS).h[i] = b;
+ 	}
+ 	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+ 
+ 0.4,6.VS,11.VA,16.VB,21.386:VX::vmaxsw %VD, %VA, %VB:Vector Maximum Signed Word
+ 	int i;
+ 	signed32 a, b;
+ 	for (i = 0; i < 4; i++) {
+ 	  a = (*vA).w[i];
+ 	  b = (*vB).w[i];
+ 	  if (a > b)
+ 	    (*vS).w[i] = a;
+ 	  else
+ 	    (*vS).w[i] = b;
+ 	}
+ 	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+ 
+ 0.4,6.VS,11.VA,16.VB,21.2:VX::vmaxub %VD, %VA, %VB:Vector Maximum Unsigned Byte
+ 	int i;
+ 	unsigned8 a, b;
+ 	for (i = 0; i < 16; i++) {
+ 	  a = (*vA).b[i];
+ 	  b = (*vB).b[i];
+ 	  if (a > b)
+ 	    (*vS).b[i] = a;
+ 	  else
+ 	    (*vS).b[i] = b;
+ 	};
+ 	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+ 
+ 0.4,6.VS,11.VA,16.VB,21.66:VX::vmaxus %VD, %VA, %VB:Vector Maximum Unsigned Half Word
+ 	int i;
+ 	unsigned16 a, b;
+ 	for (i = 0; i < 8; i++) {
+ 	  a = (*vA).h[i];
+ 	  b = (*vB).h[i];
+ 	  if (a > b)
+ 	    (*vS).h[i] = a;
+ 	  else
+ 	    (*vS).h[i] = b;
+ 	}
+ 	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+ 
+ 0.4,6.VS,11.VA,16.VB,21.130:VX::vmaxuw %VD, %VA, %VB:Vector Maximum Unsigned Word
+ 	int i;
+ 	unsigned32 a, b;
+ 	for (i = 0; i < 4; i++) {
+ 	  a = (*vA).w[i];
+ 	  b = (*vB).w[i];
+ 	  if (a > b)
+ 	    (*vS).w[i] = a;
+ 	  else
+ 	    (*vS).w[i] = b;
+ 	}
+ 	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+ 
+ 
+ #
+ # Vector Multiple High instructions, 6-79, 6-80.
+ #
+ 
+ 0.4,6.VS,11.VA,16.VB,21.VC,26.32:VAX::vmhaddshs %VD, %VA, %VB, %VC:Vector Multiple High and Add Signed Half Word Saturate
+ 	int i, sat, tempsat;
+ 	signed16 a, b;
+ 	signed32 prod, temp, c;
+ 	for (i = 0; i < 8; i++) {
+ 	  a = (*vA).h[i];
+ 	  b = (*vB).h[i];
+ 	  c = (signed32)(signed16)(*vC).h[i];
+ 	  prod = (signed32)a * (signed32)b;
+ 	  temp = (prod >> 15) + c;
+ 	  (*vS).h[i] = altivec_signed_saturate_16(temp, &tempsat);
+ 	  sat |= tempsat;
+ 	}
+ 	ALTIVEC_SET_SAT(sat);
+ 	PPC_INSN_VR_VSCR(VS_BITMASK, VA_BITMASK | VB_BITMASK | VC_BITMASK);
+ 
+ 0.4,6.VS,11.VA,16.VB,21.VC,26.33:VAX::vmhraddshs %VD, %VA, %VB, %VC:Vector Multiple High Round and Add Signed Half Word Saturate
+ 	int i, sat, tempsat;
+ 	signed16 a, b;
+ 	signed32 prod, temp, c;
+ 	for (i = 0; i < 8; i++) {
+ 	  a = (*vA).h[i];
+ 	  b = (*vB).h[i];
+ 	  c = (signed32)(signed16)(*vC).h[i];
+ 	  prod = (signed32)a * (signed32)b;
+ 	  prod += 0x4000;
+ 	  temp = (prod >> 15) + c;
+ 	  (*vS).h[i] = altivec_signed_saturate_16(temp, &tempsat);
+ 	  sat |= tempsat;
+ 	}
+ 	ALTIVEC_SET_SAT(sat);
+ 	PPC_INSN_VR_VSCR(VS_BITMASK, VA_BITMASK | VB_BITMASK | VC_BITMASK);
+ 
+ 
+ #
+ # Vector Minimum instructions, 6-81 ... 6-87
+ #
+ 
+ 0.4,6.VS,11.VA,16.VB,21.1098:VX::vminfp %VD, %VA, %VB:Vector Minimum Floating Point
+ 	int i;
+ 	unsigned32 f;
+ 	sim_fpu a, b, d;
+ 	for (i = 0; i < 4; i++) {
+ 	  sim_fpu_32to (&a, (*vA).w[i]);
+ 	  sim_fpu_32to (&b, (*vB).w[i]);
+ 	  sim_fpu_min (&d, &a, &b);
+ 	  sim_fpu_to32 (&f, &d);
+ 	  (*vS).w[i] = f;
+ 	}
+ 	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+ 
+ 0.4,6.VS,11.VA,16.VB,21.770:VX::vminsb %VD, %VA, %VB:Vector Minimum Signed Byte
+ 	int i;
+ 	signed8 a, b;
+ 	for (i = 0; i < 16; i++) {
+ 	  a = (*vA).b[i];
+ 	  b = (*vB).b[i];
+ 	  if (a < b)
+ 	    (*vS).b[i] = a;
+ 	  else
+ 	    (*vS).b[i] = b;
+ 	}
+ 	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+ 
+ 0.4,6.VS,11.VA,16.VB,21.834:VX::vminsh %VD, %VA, %VB:Vector Minimum Signed Half Word
+ 	int i;
+ 	signed16 a, b;
+ 	for (i = 0; i < 8; i++) {
+ 	  a = (*vA).h[i];
+ 	  b = (*vB).h[i];
+ 	  if (a < b)
+ 	    (*vS).h[i] = a;
+ 	  else
+ 	    (*vS).h[i] = b;
+ 	}
+ 	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+ 
+ 0.4,6.VS,11.VA,16.VB,21.898:VX::vminsw %VD, %VA, %VB:Vector Minimum Signed Word
+ 	int i;
+ 	signed32 a, b;
+ 	for (i = 0; i < 4; i++) {
+ 	  a = (*vA).w[i];
+ 	  b = (*vB).w[i];
+ 	  if (a < b)
+ 	    (*vS).w[i] = a;
+ 	  else
+ 	    (*vS).w[i] = b;
+ 	}
+ 	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+ 
+ 0.4,6.VS,11.VA,16.VB,21.514:VX::vminub %VD, %VA, %VB:Vector Minimum Unsigned Byte
+ 	int i;
+ 	unsigned8 a, b;
+ 	for (i = 0; i < 16; i++) {
+ 	  a = (*vA).b[i];
+ 	  b = (*vB).b[i];
+ 	  if (a < b)
+ 	    (*vS).b[i] = a;
+ 	  else
+ 	    (*vS).b[i] = b;
+ 	};
+ 	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+ 
+ 0.4,6.VS,11.VA,16.VB,21.578:VX::vminuh %VD, %VA, %VB:Vector Minimum Unsigned Half Word
+ 	int i;
+ 	unsigned16 a, b;
+ 	for (i = 0; i < 8; i++) {
+ 	  a = (*vA).h[i];
+ 	  b = (*vB).h[i];
+ 	  if (a < b)
+ 	    (*vS).h[i] = a;
+ 	  else
+ 	    (*vS).h[i] = b;
+ 	}
+ 	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+ 
+ 0.4,6.VS,11.VA,16.VB,21.642:VX::vminuw %VD, %VA, %VB:Vector Minimum Unsigned Word
+ 	int i;
+ 	unsigned32 a, b;
+ 	for (i = 0; i < 4; i++) {
+ 	  a = (*vA).w[i];
+ 	  b = (*vB).w[i];
+ 	  if (a < b)
+ 	    (*vS).w[i] = a;
+ 	  else
+ 	    (*vS).w[i] = b;
+ 	}
+ 	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+ 
+ 
+ #
+ # Vector Multiply Low instruction, 6-88
+ #
+ 
+ 0.4,6.VS,11.VA,16.VB,21.VC,26.34:VAX::vmladduhm %VD, %VA, %VB, %VC:Vector Multiply Low and Add Unsigned Half Word Modulo
+ 	int i;
+ 	unsigned16 a, b, c;
+ 	unsigned32 prod;
+ 	for (i = 0; i < 8; i++) {
+ 	  a = (*vA).h[i];
+ 	  b = (*vB).h[i];
+ 	  c = (*vC).h[i];
+ 	  prod = (unsigned32)a * (unsigned32)b;
+ 	  (*vS).h[i] = (prod + c) & 0xffff;
+ 	}
+ 	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK | VC_BITMASK);
+ 
+ 
+ #
+ # Vector Merge instructions, 6-89 ... 6-94
+ #
+ 
+ 0.4,6.VS,11.VA,16.VB,21.12:VX::vmrghb %VD, %VA, %VB:Vector Merge High Byte
+ 	int i;
+ 	for (i = 0; i < 16; i += 2) {
+ 	  (*vS).b[AV_BINDEX(i)] = (*vA).b[AV_BINDEX(i/2)];
+ 	  (*vS).b[AV_BINDEX(i+1)] = (*vB).b[AV_BINDEX(i/2)]; 
+ 	}
+ 	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+ 
+ 0.4,6.VS,11.VA,16.VB,21.76:VX::vmrghh %VD, %VA, %VB:Vector Merge High Half Word
+ 	int i;
+ 	for (i = 0; i < 8; i += 2) {
+ 	  (*vS).h[AV_HINDEX(i)] = (*vA).h[AV_HINDEX(i/2)];
+ 	  (*vS).h[AV_HINDEX(i+1)] = (*vB).h[AV_HINDEX(i/2)]; 
+ 	}
+ 	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+ 
+ 0.4,6.VS,11.VA,16.VB,21.140:VX::vmrghw %VD, %VA, %VB:Vector Merge High Word
+ 	int i;
+ 	for (i = 0; i < 4; i += 2) {
+ 	  (*vS).w[i] = (*vA).w[i/2];
+ 	  (*vS).w[i+1] = (*vB).w[i/2]; 
+ 	}
+ 	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+ 
+ 0.4,6.VS,11.VA,16.VB,21.268:VX::vmrglb %VD, %VA, %VB:Vector Merge Low Byte
+ 	int i;
+ 	for (i = 0; i < 16; i += 2) {
+ 	  (*vS).b[AV_BINDEX(i)] = (*vA).b[AV_BINDEX((i/2) + 8)];
+ 	  (*vS).b[AV_BINDEX(i+1)] = (*vB).b[AV_BINDEX((i/2) + 8)]; 
+ 	}
+ 	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+ 
+ 0.4,6.VS,11.VA,16.VB,21.332:VX::vmrglh %VD, %VA, %VB:Vector Merge Low Half Word
+ 	int i;
+ 	for (i = 0; i < 8; i += 2) {
+ 	  (*vS).h[AV_HINDEX(i)] = (*vA).h[AV_HINDEX((i/2) + 4)];
+ 	  (*vS).h[AV_HINDEX(i+1)] = (*vB).h[AV_HINDEX((i/2) + 4)]; 
+ 	}
+ 	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+ 
+ 0.4,6.VS,11.VA,16.VB,21.396:VX::vmrglw %VD, %VA, %VB:Vector Merge Low Word
+ 	int i;
+ 	for (i = 0; i < 4; i += 2) {
+ 	  (*vS).w[i] = (*vA).w[(i/2) + 2];
+ 	  (*vS).w[i+1] = (*vB).w[(i/2) + 2]; 
+ 	}
+ 	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+ 
+ 
+ #
+ # Vector Multiply Sum instructions, 6-95 ... 6-100
+ #
+ 
+ 0.4,6.VS,11.VA,16.VB,21.VC,26.37:VAX::vmsummbm %VD, %VA, %VB, %VC:Vector Multiply Sum Mixed-Sign Byte Modulo
+ 	int i, j;
+ 	signed32 temp;
+ 	signed16 prod, a;
+ 	unsigned16 b;
+ 	for (i = 0; i < 4; i++) {
+ 	  temp = (*vC).w[i];
+ 	  for (j = 0; j < 4; j++) {
+ 	    a = (signed16)(signed8)(*vA).b[i*4+j]; 
+ 	    b = (*vB).b[i*4+j];
+ 	    prod = a * b;
+ 	    temp += (signed32)prod;
+ 	  }
+ 	  (*vS).w[i] = temp;
+ 	}
+ 	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK | VC_BITMASK);
+ 
+ 0.4,6.VS,11.VA,16.VB,21.VC,26.40:VAX::vmsumshm %VD, %VA, %VB, %VC:Vector Multiply Sum Signed Half Word Modulo
+ 	int i, j;
+ 	signed32 temp, prod, a, b;
+ 	for (i = 0; i < 4; i++) {
+ 	  temp = (*vC).w[i];
+ 	  for (j = 0; j < 2; j++) {
+ 	    a = (signed32)(signed16)(*vA).h[i*2+j]; 
+ 	    b = (signed32)(signed16)(*vB).h[i*2+j];
+ 	    prod = a * b;
+ 	    temp += prod;
+ 	  }
+ 	  (*vS).w[i] = temp;
+ 	}
+ 	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK | VC_BITMASK);
+ 
+ 0.4,6.VS,11.VA,16.VB,21.VC,26.41:VAX::vmsumshs %VD, %VA, %VB, %VC:Vector Multiply Sum Signed Half Word Saturate
+ 	int i, j, sat, tempsat;
+ 	signed64 temp;
+ 	signed32 prod, a, b;
+ 	sat = 0;
+ 	for (i = 0; i < 4; i++) {
+ 	  temp = (signed64)(signed32)(*vC).w[i];
+ 	  for (j = 0; j < 2; j++) {
+ 	    a = (signed32)(signed16)(*vA).h[i*2+j]; 
+ 	    b = (signed32)(signed16)(*vB).h[i*2+j];
+ 	    prod = a * b;
+ 	    temp += (signed64)prod;
+ 	  }
+ 	  (*vS).w[i] = altivec_signed_saturate_32(temp, &tempsat);
+ 	  sat |= tempsat;
+ 	}
+ 	ALTIVEC_SET_SAT(sat);
+ 	PPC_INSN_VR_VSCR(VS_BITMASK, VA_BITMASK | VB_BITMASK | VC_BITMASK);
+ 
+ 0.4,6.VS,11.VA,16.VB,21.VC,26.36:VAX::vmsumubm %VD, %VA, %VB, %VC:Vector Multiply Sum Unsigned Byte Modulo
+ 	int i, j;
+ 	unsigned32 temp;
+ 	unsigned16 prod, a, b;
+ 	for (i = 0; i < 4; i++) {
+ 	  temp = (*vC).w[i];
+ 	  for (j = 0; j < 4; j++) {
+ 	    a = (*vA).b[i*4+j]; 
+ 	    b = (*vB).b[i*4+j];
+ 	    prod = a * b;
+ 	    temp += prod;
+ 	  }
+ 	  (*vS).w[i] = temp;
+ 	}
+ 	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK | VC_BITMASK);
+ 
+ 0.4,6.VS,11.VA,16.VB,21.VC,26.38:VAX::vmsumuhm %VD, %VA, %VB, %VC:Vector Multiply Sum Unsigned Half Word Modulo
+ 	int i, j;
+ 	unsigned32 temp, prod, a, b;
+ 	for (i = 0; i < 4; i++) {
+ 	  temp = (*vC).w[i];
+ 	  for (j = 0; j < 2; j++) {
+ 	    a = (*vA).h[i*2+j]; 
+ 	    b = (*vB).h[i*2+j];
+ 	    prod = a * b;
+ 	    temp += prod;
+ 	  }
+ 	  (*vS).w[i] = temp;
+ 	}
+ 	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK | VC_BITMASK);
+ 
+ 0.4,6.VS,11.VA,16.VB,21.VC,26.39:VAX::vmsumuhs %VD, %VA, %VB, %VC:Vector Multiply Sum Unsigned Half Word Saturate
+ 	int i, j, sat, tempsat;
+ 	unsigned32 temp, prod, a, b;
+ 	sat = 0;
+ 	for (i = 0; i < 4; i++) {
+ 	  temp = (*vC).w[i];
+ 	  for (j = 0; j < 2; j++) {
+ 	    a = (*vA).h[i*2+j]; 
+ 	    b = (*vB).h[i*2+j];
+ 	    prod = a * b;
+ 	    temp += prod;
+ 	  }
+ 	  (*vS).w[i] = altivec_unsigned_saturate_32(temp, &tempsat);
+ 	  sat |= tempsat;
+ 	}
+ 	ALTIVEC_SET_SAT(sat);
+ 	PPC_INSN_VR_VSCR(VS_BITMASK, VA_BITMASK | VB_BITMASK | VC_BITMASK);
+ 
+ 
+ #
+ # Vector Multiply Even/Odd instructions, 6-101 ... 6-108
+ #
+ 
+ 0.4,6.VS,11.VA,16.VB,21.776:VX::vmulesb %VD, %VA, %VB:Vector Multiply Even Signed Byte
+ 	int i;
+ 	signed8 a, b;
+ 	signed16 prod;
+ 	for (i = 0; i < 8; i++) {
+ 	  a = (*vA).b[AV_BINDEX(i*2)]; 
+ 	  b = (*vB).b[AV_BINDEX(i*2)];
+ 	  prod = a * b;
+ 	  (*vS).h[AV_HINDEX(i)] = prod;
+ 	}
+ 	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+ 
+ 0.4,6.VS,11.VA,16.VB,21.840:VX::vmulesh %VD, %VA, %VB:Vector Multiply Even Signed Half Word
+ 	int i;
+ 	signed16 a, b;
+ 	signed32 prod;
+ 	for (i = 0; i < 4; i++) {
+ 	  a = (*vA).h[AV_HINDEX(i*2)]; 
+ 	  b = (*vB).h[AV_HINDEX(i*2)];
+ 	  prod = a * b;
+ 	  (*vS).w[i] = prod;
+ 	}
+ 	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+ 
+ 0.4,6.VS,11.VA,16.VB,21.520:VX::vmuleub %VD, %VA, %VB:Vector Multiply Even Unsigned Byte
+ 	int i;
+ 	unsigned8 a, b;
+ 	unsigned16 prod;
+ 	for (i = 0; i < 8; i++) {
+ 	  a = (*vA).b[AV_BINDEX(i*2)]; 
+ 	  b = (*vB).b[AV_BINDEX(i*2)];
+ 	  prod = a * b;
+ 	  (*vS).h[AV_HINDEX(i)] = prod;
+ 	}
+ 	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+ 
+ 0.4,6.VS,11.VA,16.VB,21.584:VX::vmuleuh %VD, %VA, %VB:Vector Multiply Even Unsigned Half Word
+ 	int i;
+ 	unsigned16 a, b;
+ 	unsigned32 prod;
+ 	for (i = 0; i < 4; i++) {
+ 	  a = (*vA).h[AV_HINDEX(i*2)]; 
+ 	  b = (*vB).h[AV_HINDEX(i*2)];
+ 	  prod = a * b;
+ 	  (*vS).w[i] = prod;
+ 	}
+ 	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+ 
+ 0.4,6.VS,11.VA,16.VB,21.264:VX::vmulosb %VD, %VA, %VB:Vector Multiply Odd Signed Byte
+ 	int i;
+ 	signed8 a, b;
+ 	signed16 prod;
+ 	for (i = 0; i < 8; i++) {
+ 	  a = (*vA).b[AV_BINDEX((i*2)+1)]; 
+ 	  b = (*vB).b[AV_BINDEX((i*2)+1)];
+ 	  prod = a * b;
+ 	  (*vS).h[AV_HINDEX(i)] = prod;
+ 	}
+ 	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+ 
+ 0.4,6.VS,11.VA,16.VB,21.328:VX::vmulosh %VD, %VA, %VB:Vector Multiply Odd Signed Half Word
+ 	int i;
+ 	signed16 a, b;
+ 	signed32 prod;
+ 	for (i = 0; i < 4; i++) {
+ 	  a = (*vA).h[AV_HINDEX((i*2)+1)]; 
+ 	  b = (*vB).h[AV_HINDEX((i*2)+1)];
+ 	  prod = a * b;
+ 	  (*vS).w[i] = prod;
+ 	}
+ 	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+ 
+ 0.4,6.VS,11.VA,16.VB,21.8:VX::vmuloub %VD, %VA, %VB:Vector Multiply Odd Unsigned Byte
+ 	int i;
+ 	unsigned8 a, b;
+ 	unsigned16 prod;
+ 	for (i = 0; i < 8; i++) {
+ 	  a = (*vA).b[AV_BINDEX((i*2)+1)]; 
+ 	  b = (*vB).b[AV_BINDEX((i*2)+1)];
+ 	  prod = a * b;
+ 	  (*vS).h[AV_HINDEX(i)] = prod;
+ 	}
+ 	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+ 
+ 0.4,6.VS,11.VA,16.VB,21.72:VX::vmulouh %VD, %VA, %VB:Vector Multiply Odd Unsigned Half Word
+ 	int i;
+ 	unsigned16 a, b;
+ 	unsigned32 prod;
+ 	for (i = 0; i < 4; i++) {
+ 	  a = (*vA).h[AV_HINDEX((i*2)+1)]; 
+ 	  b = (*vB).h[AV_HINDEX((i*2)+1)];
+ 	  prod = a * b;
+ 	  (*vS).w[i] = prod;
+ 	}
+ 	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+ 
+ 
+ #
+ # Vector Negative Multiply-Subtract instruction, 6-109
+ #
+ 
+ 0.4,6.VS,11.VA,16.VB,21.VC,26.47:VX::vnmsubfp %VD, %VA, %VB, %VC:Vector Negative Multiply-Subtract Floating Point
+ 	int i;
+ 	unsigned32 f;
+ 	sim_fpu a, b, c, d, i1, i2;
+ 	for (i = 0; i < 4; i++) {
+ 	  sim_fpu_32to (&a, (*vA).w[i]);
+ 	  sim_fpu_32to (&b, (*vB).w[i]);
+ 	  sim_fpu_32to (&c, (*vC).w[i]);
+ 	  sim_fpu_mul (&i1, &a, &c);
+ 	  sim_fpu_sub (&i2, &i1, &b);
+ 	  sim_fpu_neg (&d, &i2);
+ 	  sim_fpu_to32 (&f, &d);
+ 	  (*vS).w[i] = f;
+ 	}
+ 	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK | VC_BITMASK);
+ 
+ 
+ #
+ # Vector Logical OR instructions, 6-110, 6-111, 6-177
+ #
+ 
+ 0.4,6.VS,11.VA,16.VB,21.1284:VX::vnor %VD, %VA, %VB:Vector Logical NOR
+ 	int i;
+ 	for (i = 0; i < 4; i++)
+ 	  (*vS).w[i] = ~((*vA).w[i] | (*vB).w[i]);
+ 	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+ 
+ 0.4,6.VS,11.VA,16.VB,21.1156:VX::vor %VD, %VA, %VB:Vector Logical OR
+ 	int i;
+ 	for (i = 0; i < 4; i++)
+ 	  (*vS).w[i] = (*vA).w[i] | (*vB).w[i];
+ 	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+ 
+ 0.4,6.VS,11.VA,16.VB,21.1220:VX::vxor %VD, %VA, %VB:Vector Logical XOR
+ 	int i;
+ 	for (i = 0; i < 4; i++)
+ 	  (*vS).w[i] = (*vA).w[i] ^ (*vB).w[i];
+ 	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+ 
+ 
+ #
+ # Vector Permute instruction, 6-112
+ #
+ 
+ 0.4,6.VS,11.VA,16.VB,21.VC,26.43:VX::vperm %VD, %VA, %VB, %VC:Vector Permute
+ 	int i, who;
+ 	for (i = 0; i < 16; i++) {
+ 	  who = (*vC).b[AV_BINDEX(i)] & 0x1f;
+ 	  if (who & 0x10)
+ 	    (*vS).b[AV_BINDEX(i)] = (*vB).b[AV_BINDEX(who & 0xf)];
+ 	  else
+ 	    (*vS).b[AV_BINDEX(i)] = (*vA).b[AV_BINDEX(who & 0xf)];
+ 	}
+ 	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK | VC_BITMASK);
+ 
+ 
+ #
+ # Vector Pack instructions, 6-113 ... 6-121
+ #
+ 
+ 0.4,6.VS,11.VA,16.VB,21.782:VX::vpkpx %VD, %VA, %VB:Vector Pack Pixel32
+ 	int i;
+ 	for (i = 0; i < 4; i++) {
+ 	  (*vS).h[AV_HINDEX(i+4)] = ((((*vB).w[i]) >> 9) & 0xfc00)
+ 	               | ((((*vB).w[i]) >> 6) & 0x03e0)
+ 	               | ((((*vB).w[i]) >> 3) & 0x001f);
+ 	  (*vS).h[AV_HINDEX(i)] = ((((*vA).w[i]) >> 9) & 0xfc00)
+ 	             | ((((*vA).w[i]) >> 6) & 0x03e0)
+ 	             | ((((*vA).w[i]) >> 3) & 0x001f);
+ 	}
+ 	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+ 
+ 0.4,6.VS,11.VA,16.VB,21.398:VX::vpkshss %VD, %VA, %VB:Vector Pack Signed Half Word Signed Saturate
+ 	int i, sat, tempsat;
+ 	signed16 temp;
+ 	sat = 0;
+ 	for (i = 0; i < 16; i++) {
+ 	  if (i < 8)
+ 	    temp = (*vA).h[AV_HINDEX(i)];
+ 	  else
+ 	    temp = (*vB).h[AV_HINDEX(i-8)];
+ 	  (*vS).b[AV_BINDEX(i)] = altivec_signed_saturate_8(temp, &tempsat);
+ 	  sat |= tempsat;
+ 	}
+ 	ALTIVEC_SET_SAT(sat);
+ 	PPC_INSN_VR_VSCR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+ 
+ 0.4,6.VS,11.VA,16.VB,21.270:VX::vpkshus %VD, %VA, %VB:Vector Pack Signed Half Word Unsigned Saturate
+ 	int i, sat, tempsat;
+ 	signed16 temp;
+ 	sat = 0;
+ 	for (i = 0; i < 16; i++) {
+ 	  if (i < 8)
+ 	    temp = (*vA).h[AV_HINDEX(i)];
+ 	  else
+ 	    temp = (*vB).h[AV_HINDEX(i-8)];
+ 	  (*vS).b[AV_BINDEX(i)] = altivec_unsigned_saturate_8(temp, &tempsat);
+ 	  sat |= tempsat;
+ 	}
+ 	ALTIVEC_SET_SAT(sat);
+ 	PPC_INSN_VR_VSCR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+ 
+ 0.4,6.VS,11.VA,16.VB,21.462:VX::vpkswss %VD, %VA, %VB:Vector Pack Signed Word Signed Saturate
+ 	int i, sat, tempsat;
+ 	signed32 temp;
+ 	sat = 0;
+ 	for (i = 0; i < 8; i++) {
+ 	  if (i < 4)
+ 	    temp = (*vA).w[i];
+ 	  else
+ 	    temp = (*vB).w[i-4];
+ 	  (*vS).h[AV_HINDEX(i)] = altivec_signed_saturate_16(temp, &tempsat);
+ 	  sat |= tempsat;
+ 	}
+ 	ALTIVEC_SET_SAT(sat);
+ 	PPC_INSN_VR_VSCR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+ 
+ 0.4,6.VS,11.VA,16.VB,21.334:VX::vpkswus %VD, %VA, %VB:Vector Pack Signed Word Unsigned Saturate
+ 	int i, sat, tempsat;
+ 	signed32 temp;
+ 	sat = 0;
+ 	for (i = 0; i < 8; i++) {
+ 	  if (i < 4)
+ 	    temp = (*vA).w[i];
+ 	  else
+ 	    temp = (*vB).w[i-4];
+ 	  (*vS).h[AV_HINDEX(i)] = altivec_unsigned_saturate_16(temp, &tempsat);
+ 	  sat |= tempsat;
+ 	}
+ 	ALTIVEC_SET_SAT(sat);
+ 	PPC_INSN_VR_VSCR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+ 
+ 0.4,6.VS,11.VA,16.VB,21.14:VX::vpkuhum %VD, %VA, %VB:Vector Pack Unsigned Half Word Unsigned Modulo
+ 	int i;
+ 	for (i = 0; i < 16; i++)
+ 	  if (i < 8)
+ 	    (*vS).b[AV_BINDEX(i)] = (*vA).h[AV_HINDEX(i)];
+ 	  else
+ 	    (*vS).b[AV_BINDEX(i)] = (*vB).h[AV_HINDEX(i-8)];
+ 	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+ 
+ 0.4,6.VS,11.VA,16.VB,21.142:VX::vpkuhus %VD, %VA, %VB:Vector Pack Unsigned Half Word Unsigned Saturate
+ 	int i, sat, tempsat;
+ 	signed16 temp;
+ 	sat = 0;
+ 	for (i = 0; i < 16; i++) {
+ 	  if (i < 8)
+ 	    temp = (*vA).h[AV_HINDEX(i)];
+ 	  else
+ 	    temp = (*vB).h[AV_HINDEX(i-8)];
+ 	  /* force positive in signed16, ok as we'll toss the bit away anyway */
+ 	  temp &= ~0x8000;
+ 	  (*vS).b[AV_BINDEX(i)] = altivec_unsigned_saturate_8(temp, &tempsat);
+ 	  sat |= tempsat;
+ 	}
+ 	ALTIVEC_SET_SAT(sat);
+ 	PPC_INSN_VR_VSCR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+ 
+ 0.4,6.VS,11.VA,16.VB,21.78:VX::vpkuwum %VD, %VA, %VB:Vector Pack Unsigned Word Unsigned Modulo
+ 	int i;
+ 	for (i = 0; i < 8; i++)
+ 	  if (i < 8)
+ 	    (*vS).h[AV_HINDEX(i)] = (*vA).w[i];
+ 	  else
+ 	    (*vS).h[AV_HINDEX(i)] = (*vB).w[i-8];
+ 	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+ 
+ 0.4,6.VS,11.VA,16.VB,21.206:VX::vpkuwus %VD, %VA, %VB:Vector Pack Unsigned Word Unsigned Saturate
+ 	int i, sat, tempsat;
+ 	signed32 temp;
+ 	sat = 0;
+ 	for (i = 0; i < 8; i++) {
+ 	  if (i < 4)
+ 	    temp = (*vA).w[i];
+ 	  else
+ 	    temp = (*vB).w[i-4];
+ 	  /* force positive in signed32, ok as we'll toss the bit away anyway */
+ 	  temp &= ~0x80000000;
+ 	  (*vS).h[AV_HINDEX(i)] = altivec_unsigned_saturate_16(temp, &tempsat);
+ 	  sat |= tempsat;
+ 	}
+ 	ALTIVEC_SET_SAT(sat);
+ 	PPC_INSN_VR_VSCR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+ 
+ 
+ #
+ # Vector Reciprocal instructions, 6-122, 6-123, 6-131
+ #
+ 
+ 0.4,6.VS,11.0,16.VB,21.266:VX::vrefp %VD, %VB:Vector Reciprocal Estimate Floating Point
+ 	int i;
+ 	unsigned32 f;
+ 	sim_fpu op, d;
+ 	for (i = 0; i < 4; i++) {
+ 	  sim_fpu_32to (&op, (*vB).w[i]);
+ 	  sim_fpu_div (&d, &sim_fpu_one, &op);
+ 	  sim_fpu_to32 (&f, &d);
+ 	  (*vS).w[i] = f;
+ 	}
+ 	PPC_INSN_VR(VS_BITMASK, VB_BITMASK);
+ 
+ 0.4,6.VS,11.0,16.VB,21.330:VX::vrsqrtefp %VD, %VB:Vector Reciprocal Square Root Estimate Floating Point
+ 	int i;
+ 	unsigned32 f;
+ 	sim_fpu op, i1, one, d;
+ 	for (i = 0; i < 4; i++) {
+ 	  sim_fpu_32to (&op, (*vB).w[i]);
+ 	  sim_fpu_sqrt (&i1, &op);
+ 	  sim_fpu_div (&d, &sim_fpu_one, &i1);
+ 	  sim_fpu_to32 (&f, &d);
+ 	  (*vS).w[i] = f;
+ 	}
+ 	PPC_INSN_VR(VS_BITMASK, VB_BITMASK);
+ 
+ 
+ #
+ # Vector Round instructions, 6-124 ... 6-127
+ #
+ 
+ 0.4,6.VS,11.0,16.VB,21.714:VX::vrfim %VD, %VB:Vector Round to Floating-Point Integer towards Minus Infinity
+ 	int i;
+ 	unsigned32 f;
+ 	sim_fpu op;
+ 	for (i = 0; i < 4; i++) {
+ 	  sim_fpu_32to (&op, (*vB).w[i]);
+ 	  sim_fpu_round_32(&op, sim_fpu_round_down, sim_fpu_denorm_default);
+ 	  sim_fpu_to32 (&f, &op);
+ 	  (*vS).w[i] = f;
+ 	}
+ 	PPC_INSN_VR(VS_BITMASK, VB_BITMASK);
+ 
+ 0.4,6.VS,11.0,16.VB,21.522:VX::vrfin %VD, %VB:Vector Round to Floating-Point Integer Nearest
+ 	int i;
+ 	unsigned32 f;
+ 	sim_fpu op;
+ 	for (i = 0; i < 4; i++) {
+ 	  sim_fpu_32to (&op, (*vB).w[i]);
+ 	  sim_fpu_round_32(&op, sim_fpu_round_near, sim_fpu_denorm_default);
+ 	  sim_fpu_to32 (&f, &op);
+ 	  (*vS).w[i] = f;
+ 	}
+ 	PPC_INSN_VR(VS_BITMASK, VB_BITMASK);
+ 
+ 0.4,6.VS,11.0,16.VB,21.650:VX::vrfip %VD, %VB:Vector Round to Floating-Point Integer towards Plus Infinity
+ 	int i;
+ 	unsigned32 f;
+ 	sim_fpu op;
+ 	for (i = 0; i < 4; i++) {
+ 	  sim_fpu_32to (&op, (*vB).w[i]);
+ 	  sim_fpu_round_32(&op, sim_fpu_round_up, sim_fpu_denorm_default);
+ 	  sim_fpu_to32 (&f, &op);
+ 	  (*vS).w[i] = f;
+ 	}
+ 	PPC_INSN_VR(VS_BITMASK, VB_BITMASK);
+ 
+ 0.4,6.VS,11.0,16.VB,21.586:VX::vrfiz %VD, %VB:Vector Round to Floating-Point Integer towards Zero
+ 	int i;
+ 	unsigned32 f;
+ 	sim_fpu op;
+ 	for (i = 0; i < 4; i++) {
+ 	  sim_fpu_32to (&op, (*vB).w[i]);
+ 	  sim_fpu_round_32(&op, sim_fpu_round_zero, sim_fpu_denorm_default);
+ 	  sim_fpu_to32 (&f, &op);
+ 	  (*vS).w[i] = f;
+ 	}
+ 	PPC_INSN_VR(VS_BITMASK, VB_BITMASK);
+ 
+ 
+ #
+ # Vector Rotate Left instructions, 6-128 ... 6-130
+ #
+ 
+ 0.4,6.VS,11.VA,16.VB,21.4:VX::vrlb %VD, %VA, %VB:Vector Rotate Left Integer Byte
+ 	int i;
+ 	unsigned16 temp;
+ 	for (i = 0; i < 16; i++) {
+ 	  temp = (unsigned16)(*vA).b[i] << (((*vB).b[i]) & 7);
+ 	  (*vS).b[i] = (temp & 0xff) | ((temp >> 8) & 0xff);
+ 	}
+ 	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+ 
+ 0.4,6.VS,11.VA,16.VB,21.68:VX::vrlh %VD, %VA, %VB:Vector Rotate Left Integer Half Word
+ 	int i;
+ 	unsigned32 temp;
+ 	for (i = 0; i < 8; i++) {
+ 	  temp = (unsigned32)(*vA).h[i] << (((*vB).h[i]) & 0xf);
+ 	  (*vS).h[i] = (temp & 0xffff) | ((temp >> 16) & 0xffff);
+ 	}
+ 	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+ 
+ 0.4,6.VS,11.VA,16.VB,21.132:VX::vrlw %VD, %VA, %VB:Vector Rotate Left Integer Word
+ 	int i;
+ 	unsigned64 temp;
+ 	for (i = 0; i < 4; i++) {
+ 	  temp = (unsigned64)(*vA).w[i] << (((*vB).w[i]) & 0x1f);
+ 	  (*vS).w[i] = (temp & 0xffffffff) | ((temp >> 32) & 0xffffffff);
+ 	}
+ 	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+ 
+ 
+ #
+ # Vector Conditional Select instruction, 6-133
+ #
+ 
+ 0.4,6.VS,11.VA,16.VB,21.VC,26.42:VAX::vsel %VD, %VA, %VB, %VC:Vector Conditional Select
+ 	int i;
+ 	unsigned32 c;
+ 	for (i = 0; i < 4; i++) {
+ 	  c = (*vC).w[i];
+ 	  (*vS).w[i] = ((*vB).w[i] & c) | ((*vA).w[i] & ~c);
+ 	}
+ 	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK | VC_BITMASK);
+ 
+ #
+ # Vector Shift Left instructions, 6-134 ... 6-139
+ #
+ 
+ 0.4,6.VS,11.VA,16.VB,21.452:VX::vsl %VD, %VA, %VB:Vector Shift Left
+ 	int sh, i, j, carry, new_carry;
+ 	sh = (*vB).b[0] & 7;	/* don't bother checking everything */
+ 	carry = 0;
+ 	for (j = 3; j >= 0; j--) {
+ 	  if (CURRENT_TARGET_BYTE_ORDER == BIG_ENDIAN)
+ 	    i = j;
+ 	  else
+ 	    i = (j + 2) % 4;
+ 	  new_carry = (*vA).w[i] >> (32 - sh);
+ 	  (*vS).w[i] = ((*vA).w[i] << sh) | carry;
+ 	  carry = new_carry;
+ 	}
+ 	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+ 
+ 0.4,6.VS,11.VA,16.VB,21.260:VX::vslb %VD, %VA, %VB:Vector Shift Left Integer Byte
+ 	int i, sh;
+ 	for (i = 0; i < 16; i++) {
+ 	  sh = ((*vB).b[i]) & 7;
+ 	  (*vS).b[i] = (*vA).b[i] << sh;
+ 	}
+ 	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+ 
+ 0.4,6.VS,11.VA,16.VB,21.0,22.SH,26.44:VX::vsldol %VD, %VA, %VB:Vector Shift Left Double by Octet Immediate
+ 	int i, j;
+ 	for (j = 0, i = SH; i < 16; i++)
+ 	  (*vS).b[j++] = (*vA).b[i];
+ 	for (i = 0; i < SH; i++)
+ 	  (*vS).b[j++] = (*vB).b[i];
+ 	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+ 
+ 0.4,6.VS,11.VA,16.VB,21.324:VX::vslh %VD, %VA, %VB:Vector Shift Left Half Word
+ 	int i, sh;
+ 	for (i = 0; i < 8; i++) {
+ 	  sh = ((*vB).h[i]) & 0xf;
+ 	  (*vS).h[i] = (*vA).h[i] << sh;
+ 	}
+ 	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+ 
+ 0.4,6.VS,11.VA,16.VB,21.1036:VX::vslo %VD, %VA, %VB:Vector Shift Left by Octet
+ 	int i, sh;
+ 	if (CURRENT_TARGET_BYTE_ORDER == BIG_ENDIAN)
+ 	  sh = ((*vB).b[AV_BINDEX(15)] >> 3) & 0xf;
+ 	else
+ 	  sh = ((*vB).b[AV_BINDEX(0)] >> 3) & 0xf;
+ 	for (i = 0; i < 16; i++) {
+ 	  if (15 - i > sh)
+ 	    (*vS).b[AV_BINDEX(i)] = (*vA).b[AV_BINDEX(i + sh)];
+ 	  else
+ 	    (*vS).b[AV_BINDEX(i)] = 0;
+ 	}
+ 	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+ 
+ 0.4,6.VS,11.VA,16.VB,21.388:VX::vslw %VD, %VA, %VB:Vector Shift Left Integer Word
+ 	int i, sh;
+ 	for (i = 0; i < 4; i++) {
+ 	  sh = ((*vB).w[i]) & 0x1f;
+ 	  (*vS).w[i] = (*vA).w[i] << sh;
+ 	}
+ 	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+ 
+ 
+ #
+ # Vector Splat instructions, 6-140 ... 6-145
+ #
+ 
+ 0.4,6.VS,11.UIMM,16.VB,21.524:VX::vspltb %VD, %VB, %UIMM:Vector Splat Byte
+ 	int i;
+ 	unsigned8 b;
+ 	b = (*vB).b[AV_BINDEX(UIMM & 0xf)];
+ 	for (i = 0; i < 16; i++)
+ 	  (*vS).b[i] = b;
+ 	PPC_INSN_VR(VS_BITMASK, VB_BITMASK);
+ 
+ 0.4,6.VS,11.UIMM,16.VB,21.588:VX::vsplth %VD, %VB, %UIMM:Vector Splat Half Word
+ 	int i;
+ 	unsigned16 h;
+ 	h = (*vB).h[AV_HINDEX(UIMM & 0x7)];
+ 	for (i = 0; i < 8; i++)
+ 	  (*vS).h[i] = h;
+ 	PPC_INSN_VR(VS_BITMASK, VB_BITMASK);
+ 
+ 0.4,6.VS,11.SIMM,16.0,21.780:VX::vspltlsb %VD, %SIMM:Vector Splat Immediate Signed Byte
+ 	int i;
+ 	signed8 b = SIMM;
+ 	/* manual 5-bit signed extension */
+ 	if (b & 0x10)
+ 	  b |= 0xe;
+ 	for (i = 0; i < 16; i++)
+ 	  (*vS).b[i] = b;
+ 	PPC_INSN_VR(VS_BITMASK, 0);
+ 
+ 0.4,6.VS,11.SIMM,16.0,21.844:VX::vspltlsh %VD, %SIMM:Vector Splat Immediate Signed Half Word
+ 	int i;
+ 	signed16 h = SIMM;
+ 	/* manual 5-bit signed extension */
+ 	if (h & 0x10)
+ 	  h |= 0xffe;
+ 	for (i = 0; i < 8; i++)
+ 	  (*vS).h[i] = h;
+ 	PPC_INSN_VR(VS_BITMASK, 0);
+ 
+ 0.4,6.VS,11.SIMM,16.0,21.908:VX::vspltlsw %VD, %SIMM:Vector Splat Immediate Signed Word
+ 	int i;
+ 	signed32 w = SIMM;
+ 	/* manual 5-bit signed extension */
+ 	if (w & 0x10)
+ 	  w |= 0xffffffe;
+ 	for (i = 0; i < 4; i++)
+ 	  (*vS).w[i] = w;
+ 	PPC_INSN_VR(VS_BITMASK, 0);
+ 
+ 0.4,6.VS,11.UIMM,16.VB,21.652:VX::vspltw %VD, %VB, %UIMM:Vector Splat Word
+ 	int i;
+ 	unsigned32 w;
+ 	w = (*vB).w[UIMM & 0x3];
+ 	for (i = 0; i < 4; i++)
+ 	  (*vS).w[i] = w;
+ 	PPC_INSN_VR(VS_BITMASK, VB_BITMASK);
+ 
+ 
+ #
+ # Vector Shift Right instructions, 6-146 ... 6-154
+ #
+ 
+ 0.4,6.VS,11.VA,16.VB,21.708:VX::vsr %VD, %VA, %VB:Vector Shift Right
+ 	int sh, i, j, carry, new_carry;
+ 	sh = (*vB).b[0] & 7;	/* don't bother checking everything */
+ 	carry = 0;
+ 	for (j = 0; j < 4; j++) {
+ 	  if (CURRENT_TARGET_BYTE_ORDER == BIG_ENDIAN)
+ 	    i = j;
+ 	  else
+ 	    i = (j + 2) % 4;
+ 	  new_carry = (*vA).w[i] << (32 - sh);
+ 	  (*vS).w[i] = ((*vA).w[i] >> sh) | carry;
+ 	  carry = new_carry;
+ 	}
+ 	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+ 
+ 0.4,6.VS,11.VA,16.VB,21.772:VX::vsrab %VD, %VA, %VB:Vector Shift Right Algebraic Byte
+ 	int i, sh;
+ 	signed16 a;
+ 	for (i = 0; i < 16; i++) {
+ 	  sh = ((*vB).b[i]) & 7;
+ 	  a = (signed16)(signed8)(*vA).b[i];
+ 	  (*vS).b[i] = (a >> sh) & 0xff;
+ 	}
+ 	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+ 
+ 0.4,6.VS,11.VA,16.VB,21.836:VX::vsrah %VD, %VA, %VB:Vector Shift Right Algebraic Half Word
+ 	int i, sh;
+ 	signed32 a;
+ 	for (i = 0; i < 8; i++) {
+ 	  sh = ((*vB).h[i]) & 0xf;
+ 	  a = (signed32)(signed16)(*vA).h[i];
+ 	  (*vS).h[i] = (a >> sh) & 0xffff;
+ 	}
+ 	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+ 
+ 0.4,6.VS,11.VA,16.VB,21.900:VX::vsraw %VD, %VA, %VB:Vector Shift Right Algebraic Word
+ 	int i, sh;
+ 	signed64 a;
+ 	for (i = 0; i < 4; i++) {
+ 	  sh = ((*vB).w[i]) & 0xf;
+ 	  a = (signed64)(signed32)(*vA).w[i];
+ 	  (*vS).w[i] = (a >> sh) & 0xffffffff;
+ 	}
+ 	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+ 
+ 0.4,6.VS,11.VA,16.VB,21.516:VX::vsrb %VD, %VA, %VB:Vector Shift Right Byte
+ 	int i, sh;
+ 	for (i = 0; i < 16; i++) {
+ 	  sh = ((*vB).b[i]) & 7;
+ 	  (*vS).b[i] = (*vA).b[i] >> sh;
+ 	}
+ 	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+ 
+ 0.4,6.VS,11.VA,16.VB,21.580:VX::vsrh %VD, %VA, %VB:Vector Shift Right Half Word
+ 	int i, sh;
+ 	for (i = 0; i < 8; i++) {
+ 	  sh = ((*vB).h[i]) & 0xf;
+ 	  (*vS).h[i] = (*vA).h[i] >> sh;
+ 	}
+ 	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+ 
+ 0.4,6.VS,11.VA,16.VB,21.1100:VX::vsro %VD, %VA, %VB:Vector Shift Right Octet
+ 	int i, sh;
+ 	if (CURRENT_TARGET_BYTE_ORDER == BIG_ENDIAN)
+ 	  sh = ((*vB).b[AV_BINDEX(15)] >> 3) & 0xf;
+ 	else
+ 	  sh = ((*vB).b[AV_BINDEX(0)] >> 3) & 0xf;
+ 	for (i = 0; i < 16; i++) {
+ 	  if (i < sh)
+ 	    (*vS).b[AV_BINDEX(i)] = 0;
+ 	  else
+ 	    (*vS).b[AV_BINDEX(i)] = (*vA).b[AV_BINDEX(i - sh)];
+ 	}
+ 	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+ 
+ 0.4,6.VS,11.VA,16.VB,21.644:VX::vsrw %VD, %VA, %VB:Vector Shift Right Word
+ 	int i, sh;
+ 	for (i = 0; i < 4; i++) {
+ 	  sh = ((*vB).w[i]) & 0x1f;
+ 	  (*vS).w[i] = (*vA).w[i] >> sh;
+ 	}
+ 	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+ 
+ 
+ #
+ # Vector Subtract instructions, 6-155 ... 6-165
+ #
+ 
+ 0.4,6.VS,11.VA,16.VB,21.1408:VX::vsubcuw %VD, %VA, %VB:Vector Subtract Carryout Unsigned Word
+ 	int i;
+ 	signed64 temp, a, b;
+ 	for (i = 0; i < 4; i++) {
+ 	  a = (signed64)(unsigned32)(*vA).w[i];
+ 	  b = (signed64)(unsigned32)(*vB).w[i];
+ 	  temp = a - b;
+ 	  (*vS).w[i] = ~(temp >> 32) & 1;
+ 	}
+ 	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+ 
+ 0.4,6.VS,11.VA,16.VB,21.74:VX::vsubfp %VD, %VA, %VB:Vector Subtract Floating Point
+ 	int i;
+ 	unsigned32 f;
+ 	sim_fpu a, b, d;
+ 	for (i = 0; i < 4; i++) {
+ 	  sim_fpu_32to (&a, (*vA).w[i]);
+ 	  sim_fpu_32to (&b, (*vB).w[i]);
+ 	  sim_fpu_sub (&d, &a, &b);
+ 	  sim_fpu_to32 (&f, &d);
+ 	  (*vS).w[i] = f;
+ 	}
+ 	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+ 
+ 0.4,6.VS,11.VA,16.VB,21.1792:VX::vsubsbs %VD, %VA, %VB:Vector Subtract Signed Byte Saturate
+ 	int i, sat, tempsat;
+ 	signed16 temp;
+ 	sat = 0;
+ 	for (i = 0; i < 16; i++) {
+ 	  temp = (signed16)(signed8)(*vA).b[i] - (signed16)(signed8)(*vB).b[i];
+ 	  (*vS).b[i] = altivec_signed_saturate_8(temp, &tempsat);
+ 	  sat |= tempsat;
+ 	}
+ 	ALTIVEC_SET_SAT(sat);
+ 	PPC_INSN_VR_VSCR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+ 
+ 0.4,6.VS,11.VA,16.VB,21.1856:VX::vsubshs %VD, %VA, %VB:Vector Subtract Signed Half Word Saturate
+ 	int i, sat, tempsat;
+ 	signed32 temp;
+ 	sat = 0;
+ 	for (i = 0; i < 8; i++) {
+ 	  temp = (signed32)(signed16)(*vA).h[i] - (signed32)(signed16)(*vB).h[i];
+ 	  (*vS).h[i] = altivec_signed_saturate_16(temp, &tempsat);
+ 	  sat |= tempsat;
+ 	}
+ 	ALTIVEC_SET_SAT(sat);
+ 	PPC_INSN_VR_VSCR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+ 
+ 0.4,6.VS,11.VA,16.VB,21.1920:VX::vsubsws %VD, %VA, %VB:Vector Subtract Signed Word Saturate
+ 	int i, sat, tempsat;
+ 	signed64 temp;
+ 	sat = 0;
+ 	for (i = 0; i < 4; i++) {
+ 	  temp = (signed64)(signed32)(*vA).w[i] - (signed64)(signed32)(*vB).w[i];
+ 	  (*vS).w[i] = altivec_signed_saturate_32(temp, &tempsat);
+ 	  sat |= tempsat;
+ 	}
+ 	ALTIVEC_SET_SAT(sat);
+ 	PPC_INSN_VR_VSCR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+ 
+ 0.4,6.VS,11.VA,16.VB,21.1024:VX::vsububm %VD, %VA, %VB:Vector Subtract Unsigned Byte Modulo
+ 	int i;
+ 	for (i = 0; i < 16; i++)
+ 	  (*vS).b[i] = (*vA).b[i] - (*vB).b[i];
+ 	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+ 
+ 0.4,6.VS,11.VA,16.VB,21.1536:VX::vsububs %VD, %VA, %VB:Vector Subtract Unsigned Byte Saturate
+ 	int i, sat, tempsat;
+ 	signed16 temp;
+ 	sat = 0;
+ 	for (i = 0; i < 16; i++) {
+ 	  temp = (signed16)(unsigned8)(*vA).b[i] - (signed16)(unsigned8)(*vB).b[i];
+ 	  (*vS).b[i] = altivec_unsigned_saturate_8(temp, &tempsat);
+ 	  sat |= tempsat;
+ 	}
+ 	ALTIVEC_SET_SAT(sat);
+ 	PPC_INSN_VR_VSCR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+ 
+ 0.4,6.VS,11.VA,16.VB,21.1088:VX::vsubuhm %VD, %VA, %VB:Vector Subtract Unsigned Half Word Modulo
+ 	int i;
+ 	for (i = 0; i < 8; i++)
+ 	  (*vS).h[i] = ((*vA).h[i] - (*vB).h[i]) & 0xffff;
+ 	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+ 
+ 0.4,6.VS,11.VA,16.VB,21.1600:VX::vsubuhs %VD, %VA, %VB:Vector Subtract Unsigned Half Word Saturate
+ 	int i, sat, tempsat;
+ 	signed32 temp;
+ 	for (i = 0; i < 8; i++) {
+ 	  temp = (signed32)(unsigned16)(*vA).h[i] - (signed32)(unsigned16)(*vB).h[i];
+ 	  (*vS).h[i] = altivec_unsigned_saturate_16(temp, &tempsat);
+ 	  sat |= tempsat;
+ 	}
+ 	ALTIVEC_SET_SAT(sat);
+ 	PPC_INSN_VR_VSCR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+ 
+ 0.4,6.VS,11.VA,16.VB,21.1152:VX::vsubuwm %VD, %VA, %VB:Vector Subtract Unsigned Word Modulo
+ 	int i;
+ 	for (i = 0; i < 4; i++)
+ 	  (*vS).w[i] = (*vA).w[i] - (*vB).w[i];
+ 	PPC_INSN_VR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+ 
+ 0.4,6.VS,11.VA,16.VB,21.1664:VX::vsubuws %VD, %VA, %VB:Vector Subtract Unsigned Word Saturate
+ 	int i, sat, tempsat;
+ 	signed64 temp;
+ 	for (i = 0; i < 4; i++) {
+ 	  temp = (signed64)(unsigned32)(*vA).w[i] - (signed64)(unsigned32)(*vB).w[i];
+ 	  (*vS).w[i] = altivec_unsigned_saturate_32(temp, &tempsat);
+ 	  sat |= tempsat;
+ 	}
+ 	ALTIVEC_SET_SAT(sat);
+ 	PPC_INSN_VR_VSCR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+ 
+ 
+ #
+ # Vector Sum instructions, 6-166 ... 6-170
+ #
+ 
+ 0.4,6.VS,11.VA,16.VB,21.1928:VX::vsumsws %VD, %VA, %VB:Vector Sum Across Signed Word Saturate
+ 	int i, sat;
+ 	signed64 temp;
+ 	temp = (signed64)(signed32)(*vB).w[3];
+ 	for (i = 0; i < 4; i++)
+ 	  temp += (signed64)(signed32)(*vA).w[i];
+ 	(*vS).w[3] = altivec_signed_saturate_32(temp, &sat);
+ 	(*vS).w[0] = (*vS).w[1] = (*vS).w[2] = 0;
+ 	ALTIVEC_SET_SAT(sat);
+ 	PPC_INSN_VR_VSCR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+ 
+ 0.4,6.VS,11.VA,16.VB,21.1672:VX::vsum2sws %VD, %VA, %VB:Vector Sum Across Partial (1/2) Signed Word Saturate
+ 	int i, j, sat, tempsat;
+ 	signed64 temp;
+ 	for (j = 0; j < 4; j += 2) {
+ 	  temp = (signed64)(signed32)(*vB).w[j+1];
+ 	  temp += (signed64)(signed32)(*vA).w[j] + (signed64)(signed32)(*vA).w[j+1];
+ 	  (*vS).w[j+1] = altivec_signed_saturate_32(temp, &tempsat);
+ 	  sat |= tempsat;
+ 	}
+ 	(*vS).w[0] = (*vS).w[2] = 0;
+ 	ALTIVEC_SET_SAT(sat);
+ 	PPC_INSN_VR_VSCR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+ 
+ 0.4,6.VS,11.VA,16.VB,21.1800:VX::vsum4sbs %VD, %VA, %VB:Vector Sum Across Partial (1/4) Signed Byte Saturate
+ 	int i, j, sat, tempsat;
+ 	signed64 temp;
+ 	for (j = 0; j < 4; j++) {
+ 	  temp = (signed64)(signed32)(*vB).w[j];
+ 	  for (i = 0; i < 4; i++)
+ 	    temp += (signed64)(signed8)(*vA).b[i+(j*4)];
+ 	  (*vS).w[j] = altivec_signed_saturate_32(temp, &tempsat);
+ 	  sat |= tempsat;
+ 	}
+ 	ALTIVEC_SET_SAT(sat);
+ 	PPC_INSN_VR_VSCR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+ 
+ 0.4,6.VS,11.VA,16.VB,21.1608:VX::vsum4shs %VD, %VA, %VB:Vector Sum Across Partial (1/4) Signed Half Word Saturate
+ 	int i, j, sat, tempsat;
+ 	signed64 temp;
+ 	for (j = 0; j < 4; j++) {
+ 	  temp = (signed64)(signed32)(*vB).w[j];
+ 	  for (i = 0; i < 2; i++)
+ 	    temp += (signed64)(signed16)(*vA).h[i+(j*2)];
+ 	  (*vS).w[j] = altivec_signed_saturate_32(temp, &tempsat);
+ 	  sat |= tempsat;
+ 	}
+ 	ALTIVEC_SET_SAT(sat);
+ 	PPC_INSN_VR_VSCR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+ 
+ 0.4,6.VS,11.VA,16.VB,21.1544:VX::vsum4ubs %VD, %VA, %VB:Vector Sum Across Partial (1/4) Unsigned Byte Saturate
+ 	int i, j, sat, tempsat;
+ 	signed64 utemp;
+ 	signed64 temp;
+ 	for (j = 0; j < 4; j++) {
+ 	  utemp = (signed64)(unsigned32)(*vB).w[j];
+ 	  for (i = 0; i < 4; i++)
+ 	    utemp += (signed64)(unsigned16)(*vA).b[i+(j*4)];
+ 	  temp = utemp;
+ 	  (*vS).w[j] = altivec_unsigned_saturate_32(temp, &tempsat);
+ 	  sat |= tempsat;
+ 	}
+ 	ALTIVEC_SET_SAT(sat);
+ 	PPC_INSN_VR_VSCR(VS_BITMASK, VA_BITMASK | VB_BITMASK);
+ 
+ 
+ #
+ # Vector Unpack instructions, 6-171 ... 6-176
+ #
+ 
+ 0.4,6.VS,11.0,16.VB,21.846:VX::vupkhpx %VD, %VB:Vector Unpack High Pixel16
+ 	int i;
+ 	unsigned16 h;
+ 	for (i = 0; i < 4; i++) {
+ 	  h = (*vB).h[AV_HINDEX(i)];
+ 	  (*vS).w[i] = ((h & 0x8000) ? 0xff000000 : 0)
+ 		     | ((h & 0x7c00) << 6)
+ 		     | ((h & 0x03e0) << 3)
+ 		     | ((h & 0x001f));
+ 	}
+ 	PPC_INSN_VR(VS_BITMASK, VB_BITMASK);
+ 
+ 0.4,6.VS,11.0,16.VB,21.526:VX::vupkhsb %VD, %VB:Vector Unpack High Signed Byte
+ 	int i;
+ 	for (i = 0; i < 8; i++)
+ 	  (*vS).h[AV_HINDEX(i)] = (signed16)(signed8)(*vB).b[AV_BINDEX(i)];
+ 	PPC_INSN_VR(VS_BITMASK, VB_BITMASK);
+ 
+ 0.4,6.VS,11.0,16.VB,21.590:VX::vupkhsh %VD, %VB:Vector Unpack High Signed Half Word
+ 	int i;
+ 	for (i = 0; i < 4; i++)
+ 	  (*vS).w[i] = (signed32)(signed16)(*vB).h[AV_HINDEX(i)];
+ 	PPC_INSN_VR(VS_BITMASK, VB_BITMASK);
+ 
+ 0.4,6.VS,11.0,16.VB,21.974:VX::vupklpx %VD, %VB:Vector Unpack Low Pixel16
+ 	int i;
+ 	unsigned16 h;
+ 	for (i = 0; i < 4; i++) {
+ 	  h = (*vB).h[AV_HINDEX(i + 4)];
+ 	  (*vS).w[i] = ((h & 0x8000) ? 0xff000000 : 0)
+ 		     | ((h & 0x7c00) << 6)
+ 		     | ((h & 0x03e0) << 3)
+ 		     | ((h & 0x001f));
+ 	}
+ 	PPC_INSN_VR(VS_BITMASK, VB_BITMASK);
+ 
+ 0.4,6.VS,11.0,16.VB,21.654:VX::vupklsb %VD, %VB:Vector Unpack Low Signed Byte
+ 	int i;
+ 	for (i = 0; i < 8; i++)
+ 	  (*vS).h[AV_HINDEX(i)] = (signed16)(signed8)(*vB).b[AV_BINDEX(i + 8)];
+ 	PPC_INSN_VR(VS_BITMASK, VB_BITMASK);
+ 
+ 0.4,6.VS,11.0,16.VB,21.718:VX::vupklsh %VD, %VB:Vector Unpack Low Signed Half Word
+ 	int i;
+ 	for (i = 0; i < 4; i++)
+ 	  (*vS).w[i] = (signed32)(signed16)(*vB).h[AV_HINDEX(i + 4)];
+ 	PPC_INSN_VR(VS_BITMASK, VB_BITMASK);
Index: sim/ppc/ppc-spr-table
===================================================================
RCS file: /cvs/src/src/sim/ppc/ppc-spr-table,v
retrieving revision 1.2
diff -p -r1.2 ppc-spr-table
*** ppc-spr-table	2001/12/01 18:56:36	1.2
--- ppc-spr-table	2001/12/27 05:53:17
*************** DEC:22:0:0
*** 31,36 ****
--- 31,37 ----
  SDR1:25:0:0
  SRR0:26:0:0
  SRR1:27:0:0
+ VRSAVE:256:0:0
  SPRG0:272:0:0
  SPRG1:273:0:0
  SPRG2:274:0:0
Index: sim/ppc/psim.c
===================================================================
RCS file: /cvs/src/src/sim/ppc/psim.c,v
retrieving revision 1.1.1.2
diff -p -r1.1.1.2 psim.c
*** psim.c	1999/04/26 18:33:32	1.1.1.2
--- psim.c	2001/12/27 05:53:17
*************** psim_read_register(psim *system,
*** 761,767 ****
  		   transfer_mode mode)
  {
    register_descriptions description;
!   char cooked_buf[sizeof(unsigned_8)];
    cpu *processor;
  
    /* find our processor */
--- 761,767 ----
  		   transfer_mode mode)
  {
    register_descriptions description;
!   char cooked_buf[sizeof(vreg)];
    cpu *processor;
  
    /* find our processor */
*************** psim_read_register(psim *system,
*** 833,838 ****
--- 833,846 ----
      *(unsigned_word*)cooked_buf = model_get_number_of_cycles(cpu_model(processor));
      break;
  
+   case reg_vr:
+     *(vreg*)cooked_buf = cpu_registers(processor)->vr[description.index];
+     break;
+ 
+   case reg_vscr:
+     *(vscreg*)cooked_buf = cpu_registers(processor)->vscr;
+     break;
+ 
    default:
      printf_filtered("psim_read_register(processor=0x%lx,buf=0x%lx,reg=%s) %s\n",
  		    (unsigned long)processor, (unsigned long)buf, reg,
*************** psim_read_register(psim *system,
*** 858,863 ****
--- 866,884 ----
      case 8:
        *(unsigned_8*)buf = H2T_8(*(unsigned_8*)cooked_buf);
        break;
+     case 16:
+       if (CURRENT_HOST_BYTE_ORDER != current_host_byte_order)
+         {
+ 	  union { vreg v; unsigned_8 d[2]; } h, t;
+           memcpy(&h.v/*dest*/, cooked_buf/*src*/, description.size);
+ 	  { _SWAP_8(t.d[0] =, h.d[1]); }
+ 	  { _SWAP_8(t.d[1] =, h.d[0]); }
+           memcpy(buf/*dest*/, &t/*src*/, description.size);
+           break;
+         }
+       else
+         memcpy(buf/*dest*/, cooked_buf/*src*/, description.size);
+       break;
      }
    }
    else {
*************** psim_write_register(psim *system,
*** 878,884 ****
  {
    cpu *processor;
    register_descriptions description;
!   char cooked_buf[sizeof(unsigned_8)];
  
    /* find our processor */
    if (which_cpu == MAX_NR_PROCESSORS) {
--- 899,905 ----
  {
    cpu *processor;
    register_descriptions description;
!   char cooked_buf[sizeof(vreg)];
  
    /* find our processor */
    if (which_cpu == MAX_NR_PROCESSORS) {
*************** psim_write_register(psim *system,
*** 919,924 ****
--- 940,957 ----
      case 8:
        *(unsigned_8*)cooked_buf = T2H_8(*(unsigned_8*)buf);
        break;
+     case 16:
+       if (CURRENT_HOST_BYTE_ORDER != current_host_byte_order)
+         {
+ 	  union { vreg v; unsigned_8 d[2]; } h, t;
+           memcpy(&t.v/*dest*/, buf/*src*/, description.size);
+ 	  { _SWAP_8(h.d[0] =, t.d[1]); }
+ 	  { _SWAP_8(h.d[1] =, t.d[0]); }
+           memcpy(cooked_buf/*dest*/, &h/*src*/, description.size);
+           break;
+         }
+       else
+         memcpy(cooked_buf/*dest*/, buf/*src*/, description.size);
      }
    }
    else {
*************** psim_write_register(psim *system,
*** 958,963 ****
--- 991,1004 ----
  
    case reg_fpscr:
      cpu_registers(processor)->fpscr = *(fpscreg*)cooked_buf;
+     break;
+ 
+   case reg_vr:
+     cpu_registers(processor)->vr[description.index] = *(vreg*)cooked_buf;
+     break;
+ 
+   case reg_vscr:
+     cpu_registers(processor)->vscr = *(vscreg*)cooked_buf;
      break;
  
    default:
Index: sim/ppc/registers.c
===================================================================
RCS file: /cvs/src/src/sim/ppc/registers.c,v
retrieving revision 1.1.1.1
diff -p -r1.1.1.1 registers.c
*** registers.c	1999/04/16 01:35:11	1.1.1.1
--- registers.c	2001/12/27 05:53:18
*************** register_description(const char reg[])
*** 150,155 ****
--- 150,165 ----
      description.index = spr_ctr;
      description.size = sizeof(unsigned_word);
    }
+   else if (reg[0] == 'v' && reg[1] == 'r' && are_digits(reg + 2)) {
+     description.type = reg_vr;
+     description.index = atoi(reg+2);
+     description.size = sizeof(vreg);
+   }
+   else if (!strcmp(reg, "vscr")) {
+     description.type = reg_vscr;
+     description.index = 0;
+     description.size = sizeof(vscreg);
+   }
    else {
      sprs spr = find_spr(reg);
      if (spr != nr_of_sprs) {
Index: sim/ppc/registers.h
===================================================================
RCS file: /cvs/src/src/sim/ppc/registers.h,v
retrieving revision 1.1.1.1
diff -p -r1.1.1.1 registers.h
*** registers.h	1999/04/16 01:35:11	1.1.1.1
--- registers.h	2001/12/27 05:53:18
*************** enum {
*** 228,233 ****
--- 228,245 ----
    srr1_subsequent_instruction = BIT(47)
  };
  
+ /**
+  ** AltiVec regsiters
+  **/
+ 
+ /* Manage this as 4 32-bit entities, 8 16-bit entities or 16 8-bit entities.  */
+ typedef union {
+ 	unsigned8 b[16];
+ 	unsigned16 h[8];
+ 	unsigned32 w[4];
+ } vreg;
+ 
+ typedef unsigned32 vscreg;
  
  /**
   ** storage interrupt registers
*************** typedef struct _registers {
*** 264,269 ****
--- 276,286 ----
    /* Segment Registers */
    sreg sr[nr_of_srs];
  
+   /* AltiVec Registers */
+   vreg vr[32];
+ 
+   vscreg vscr;
+ 
  } registers;
  
  
*************** typedef enum {
*** 281,286 ****
--- 298,304 ----
    reg_gpr, reg_fpr, reg_spr, reg_msr,
    reg_cr, reg_fpscr, reg_pc, reg_sr,
    reg_insns, reg_stalls, reg_cycles,
+   reg_vr, reg_vscr,
    nr_register_types
  } register_types;
  
*************** INLINE_REGISTERS\
*** 320,324 ****
--- 338,348 ----
  
  /* floating-point status condition register */
  #define FPSCR		cpu_registers(processor)->fpscr
+ 
+ /* AltiVec registers */
+ #define VR(N)		cpu_registers(processor)->vr[N]
+ 
+ /* AltiVec vector status and control register */
+ #define VSCR		cpu_registers(processor)->vscr
  
  #endif /* _REGISTERS_H_ */
Index: sim/ppc/sim-endian.h
===================================================================
RCS file: /cvs/src/src/sim/ppc/sim-endian.h,v
retrieving revision 1.3
diff -p -r1.3 sim-endian.h
*** sim-endian.h	2001/12/14 00:22:13	1.3
--- sim-endian.h	2001/12/27 05:53:18
*************** do { \
*** 406,409 ****
--- 406,427 ----
  # include "sim-endian.c"
  #endif
  
+ /*
+  * AltiVec endian helpers, wrong endian hosts need to be sure 
+  * to get the right bytes/halfs/words when the order matters.
+  * Note that many AltiVec instructions do not depend on byte order
+  * and work on N independant bits of data.  This is only for the
+  * instructions that actually move data around.
+  */
+ #if (WITH_HOST_BYTE_ORDER == BIG_ENDIAN)
+ #define AV_BINDEX(x)	((x) & 15)
+ #define AV_HINDEX(x)	((x) & 7)
+ #else
+ static char endian_b2l_bindex[16] = { 3, 2, 1, 0, 7, 6, 5, 4,
+ 			     11, 10, 9, 8, 15, 14, 13, 12 };
+ static char endian_b2l_hindex[16] = { 1, 0, 3, 2, 5, 4, 7, 6 };
+ #define AV_BINDEX(x)	endian_b2l_bindex[(x) & 15]
+ #define AV_HINDEX(x)	endian_b2l_hindex[(x) & 7]
+ #endif
+ 
  #endif /* _SIM_ENDIAN_H_ */
Index: sim/testsuite/sim/ppc/lvebx.s
===================================================================
RCS file: lvebx.s
diff -N lvebx.s
*** /dev/null	Tue May  5 13:32:27 1998
--- lvebx.s	Wed Dec 26 21:53:18 2001
***************
*** 0 ****
--- 1,45 ----
+ # PSIM altivec lvebx testcase
+ # mach: all
+ # as(powerpcle): -mlittle
+ # ld(powerpcle): -EL
+ 
+ 	.include "testutils.inc"
+ 
+ 	start
+ 
+ 	.data
+ 	.p2align 4
+ ld_foo:
+ 	.byte 42
+ 	.byte 43
+ 	.byte 44
+ 	.byte 45
+ 	.byte 46
+ 	.byte 13
+ 	.byte 48
+ 	.byte 49
+ 	.byte 42
+ 	.byte 43
+ 	.byte 44
+ 	.byte 45
+ 	.byte 46
+ 	.byte 13
+ 	.byte 48
+ 	.byte 49
+ 
+ 	.text
+ 	.global lvebx
+ lvebx:
+ 	lis		%r3, ld_foo@ha
+ 	addi		%r3, %r3, ld_foo@l
+ 	li		%r4, 5
+ 	lvebx		%v1, %r3, %r4
+ 	vspltb		%v2, %v1, 5
+ 	vspltisb	%v3, 13
+ 	vcmpequb.	%v4, %v2, %v3
+ 	bc		12, 24, pass
+ fail:
+ 	fail
+ 
+ pass:
+ 	pass
Index: sim/testsuite/sim/ppc/lvehx.s
===================================================================
RCS file: lvehx.s
diff -N lvehx.s
*** /dev/null	Tue May  5 13:32:27 1998
--- lvehx.s	Wed Dec 26 21:53:18 2001
***************
*** 0 ****
--- 1,37 ----
+ # PSIM altivec lvehx testcase
+ # mach: all
+ # as(powerpcle): -mlittle
+ # ld(powerpcle): -EL
+ 
+ 	.include "testutils.inc"
+ 
+ 	start
+ 
+ 	.data
+ 	.p2align 4
+ ld_foo:
+ 	.short 4
+ 	.short 5
+ 	.short 6
+ 	.short 7
+ 	.short 8
+ 	.short 9
+ 	.short 10
+ 	.short 11
+ 
+ 	.text
+ 	.global lvehx
+ lvehx:
+ 	lis		%r3, ld_foo@ha
+ 	addi		%r3, %r3, ld_foo@l
+ 	li		%r4, 6
+ 	lvehx		%v1, %r3, %r4
+ 	vsplth		%v2, %v1, 3
+ 	vspltish	%v3, 7
+ 	vcmpequh.	%v4, %v2, %v3
+ 	bc		12, 24, pass
+ fail:
+ 	fail
+ 
+ pass:
+ 	pass
Index: sim/testsuite/sim/ppc/lvewx.s
===================================================================
RCS file: lvewx.s
diff -N lvewx.s
*** /dev/null	Tue May  5 13:32:27 1998
--- lvewx.s	Wed Dec 26 21:53:18 2001
***************
*** 0 ****
--- 1,33 ----
+ # PSIM altivec lvewx testcase
+ # mach: all
+ # as(powerpcle): -mlittle
+ # ld(powerpcle): -EL
+ 
+ 	.include "testutils.inc"
+ 
+ 	start
+ 
+ 	.data
+ 	.p2align 4
+ ld_foo:
+ 	.long 4
+ 	.long 5
+ 	.long 6
+ 	.long 7
+ 
+ 	.text
+ 	.global lvehx
+ lvehx:
+ 	lis		%r3, ld_foo@ha
+ 	addi		%r3, %r3, ld_foo@l
+ 	li		%r4, 8
+ 	lvewx		%v1, %r3, %r4
+ 	vspltw		%v2, %v1, 2
+ 	vspltisw	%v3, 6
+ 	vcmpequw.	%v4, %v2, %v3
+ 	bc		12, 24, pass
+ fail:
+ 	fail
+ 
+ pass:
+ 	pass
Index: sim/testsuite/sim/ppc/psim.exp
===================================================================
RCS file: psim.exp
diff -N psim.exp
*** /dev/null	Tue May  5 13:32:27 1998
--- psim.exp	Wed Dec 26 21:53:18 2001
***************
*** 0 ****
--- 1,16 ----
+ # PSIM testsuite.
+ 
+ if { [istarget powerpc*-*-*] || [istarget ppc*-*-*] } {
+     # all machines
+     set all_machs "powerpcle powerpc"
+ 
+     foreach src [lsort [glob -nocomplain $srcdir/$subdir/*.s]] {
+ 	# If we're only testing specific files and this isn't one of them,
+ 	# skip it.
+ 	if ![runtest_file_p $runtests $src] {
+ 	    continue
+ 	}
+ 
+ 	run_sim_test $src $all_machs
+     }
+ }
Index: sim/testsuite/sim/ppc/testutils.inc
===================================================================
RCS file: testutils.inc
diff -N testutils.inc
*** /dev/null	Tue May  5 13:32:27 1998
--- testutils.inc	Wed Dec 26 21:53:18 2001
***************
*** 0 ****
--- 1,64 ----
+ # Support macros for the assembly test cases.
+ 
+ 	.macro start
+ 	.text
+ 	.global _start
+ _start:
+ 	.endm
+ 
+ 	.data
+ 
+ 	.globl pass_str
+ 	.globl fail_str
+ 	.globl ok_str
+ pass_str:
+ 	.ascii "pass\n"
+ fail_str:
+ 	.ascii "fail\n"
+ ok_str:
+ 	.ascii "ok\n"
+ 
+ 	.text
+ 
+ 	.macro pass
+ 	li	%r0, 4	# write syscall
+ 	li	%r3, 1	# stdout
+ 	lis	%r4, pass_str@ha
+ 	addi	%r4, %r4, pass_str@l
+ 	li	%r5, 5
+ 	sc
+ 	li	%r0, 1	# exit syscall
+ 	li	%r3, 0
+ 	sc
+ 	.endm
+ 
+ 	.macro fail
+ 	li	%r0, 4	# write syscall
+ 	li	%r3, 1	# stdout
+ 	lis	%r4, fail_str@ha
+ 	addi	%r4, %r4, fail_str@l
+ 	li	%r5, 5
+ 	sc
+ 	li	%r0, 1	# exit syscall
+ 	li	%r3, 1
+ 	sc
+ 	.endm
+ 
+ 	.macro ok
+ 	li	%r0, 4	# write syscall
+ 	li	%r3, 1	# stdout
+ 	lis	%r4, ok_str@ha
+ 	addi	%r4, %r4, ok_str@l
+ 	li	%r5, 3
+ 	sc
+ 	.endm
+ 
+ # AltiVec tests with "# mach: powerpc" produce seemingly correct results
+ # on little endian targets, but different to big endian targets.  For now
+ # just don't run them on little endian.  Maybe copy them and use the
+ # alternate answers.  See vmrghb.le.s.
+ 	.macro load_vr vr gpr addr
+ 	lis	\gpr, \addr@ha
+ 	addi	\gpr, \gpr, \addr@l
+ 	lvx	\vr, 0, \gpr
+ 	.endm
Index: sim/testsuite/sim/ppc/vaddcuw.s
===================================================================
RCS file: vaddcuw.s
diff -N vaddcuw.s
*** /dev/null	Tue May  5 13:32:27 1998
--- vaddcuw.s	Wed Dec 26 21:53:18 2001
***************
*** 0 ****
--- 1,41 ----
+ # PSIM altivec vaddcuw testcase
+ # mach: all
+ # as(powerpcle): -mlittle
+ # ld(powerpcle): -EL
+ 
+ 	.include "testutils.inc"
+ 
+ 	start
+ 
+ 	.data
+ 	.p2align 4
+ a_foo:
+ 	.long 0x80000000
+ 	.long 0x7fffffff
+ 	.long 0x7fffffff
+ 	.long 0x7fffffff
+ b_foo:
+ 	.long 0x80000000
+ 	.long 0x7fffffff
+ 	.long 0x80000000
+ 	.long 0x80000001
+ d_foo:
+ 	.long 0x00000001
+ 	.long 0x00000000
+ 	.long 0x00000000
+ 	.long 0x00000001
+ 
+ 	.text
+ 	.global vaddcuw
+ vaddcuw:
+ 	load_vr		%v2, %r3, a_foo
+ 	load_vr		%v3, %r3, b_foo
+ 	load_vr		%v5, %r3, d_foo
+ 	vaddcuw		%v4, %v2, %v3
+ 	vcmpequw.	%v6, %v4, %v5
+ 	bc		12, 24, pass
+ fail:
+ 	fail
+ 
+ pass:
+ 	pass
Index: sim/testsuite/sim/ppc/vaddsbs.s
===================================================================
RCS file: vaddsbs.s
diff -N vaddsbs.s
*** /dev/null	Tue May  5 13:32:27 1998
--- vaddsbs.s	Wed Dec 26 21:53:18 2001
***************
*** 0 ****
--- 1,80 ----
+ # PSIM altivec vaddsbs testcase
+ # mach: all
+ # as(powerpcle): -mlittle
+ # ld(powerpcle): -EL
+ 
+ 	.include "testutils.inc"
+ 
+ 	start
+ 
+ 	.data
+ 	.p2align 4
+ a_foo:	# v2
+ 	.byte 10, -11, 12, -13
+ 	.byte 14, -15, 16, -17
+ 	.byte 18, -19, 20, -21
+ 	.byte 22, -23, 24, -25
+ b_foo:	# v3
+ 	.byte -10, 11, -12, 13
+ 	.byte -14, 15, -16, 17
+ 	.byte -18, 19, -20, 21
+ 	.byte -22, 23, -24, 25
+ c_foo:	# v4
+ 	.byte -110, 111, -112, 113
+ 	.byte -114, 115, -116, 117
+ 	.byte -118, 119, -120, 121
+ 	.byte -122, 123, -124, 125
+ d1_foo:		# a + b (v5)
+ 	.byte 0, 0, 0, 0
+ 	.byte 0, 0, 0, 0
+ 	.byte 0, 0, 0, 0
+ 	.byte 0, 0, 0, 0
+ d2_foo:		# a + a (v6)
+ 	.byte 20, -22, 24, -26
+ 	.byte 28, -30, 32, -34
+ 	.byte 36, -38, 40, -42
+ 	.byte 44, -46, 48, -50
+ d3_foo:		# a + c (v7)
+ 	.byte -100, 100, -100, 100
+ 	.byte -100, 100, -100, 100
+ 	.byte -100, 100, -100, 100
+ 	.byte -100, 100, -100, 100
+ d4_foo:		# b + c (v8)
+ 	.byte -120, 122, -124, 126
+ 	.byte -128, 127, -128, 127
+ 	.byte -128, 127, -128, 127
+ 	.byte -128, 127, -128, 127
+ 
+ 	.text
+ 	.global vaddsbs
+ vaddsbs:
+ 	load_vr		%v2, %r3, a_foo
+ 	load_vr		%v3, %r3, b_foo
+ 	load_vr		%v4, %r3, c_foo
+ 	load_vr		%v5, %r3, d1_foo
+ 	load_vr		%v6, %r3, d2_foo
+ 	load_vr		%v7, %r3, d3_foo
+ 	load_vr		%v8, %r3, d4_foo
+ 
+ 	vaddsbs		%v9, %v2, %v3
+ 	vcmpequb.	%v10, %v9, %v5
+ 	bc		4, 24, fail
+ 
+ 	vaddsbs		%v9, %v2, %v2
+ 	vcmpequb.	%v10, %v9, %v6
+ 	bc		4, 24, fail
+ 
+ 	vaddsbs		%v9, %v2, %v4
+ 	vcmpequb.	%v10, %v9, %v7
+ 	bc		4, 24, fail
+ 
+ 	vaddsbs		%v9, %v3, %v4
+ 	vcmpequb.	%v10, %v9, %v8
+ 	bc		4, 24, fail
+ 
+ 	b		pass
+ fail:
+ 	fail
+ 
+ pass:
+ 	pass
Index: sim/testsuite/sim/ppc/vaddshs.s
===================================================================
RCS file: vaddshs.s
diff -N vaddshs.s
*** /dev/null	Tue May  5 13:32:27 1998
--- vaddshs.s	Wed Dec 26 21:53:18 2001
***************
*** 0 ****
--- 1,68 ----
+ # PSIM altivec vaddshs testcase
+ # mach: all
+ # as(powerpcle): -mlittle
+ # ld(powerpcle): -EL
+ 
+ 	.include "testutils.inc"
+ 
+ 	start
+ 
+ 	.data
+ 	.p2align 4
+ 
+ a_foo:	# v2
+ 	.short 32020, -32021, 32022, -32023
+ 	.short 32024, -32025, 32026, -32027
+ b_foo:	# v3
+ 	.short -32020, 32021, -32022, 32023
+ 	.short -32024, 32025, -32026, 32027
+ c_foo:	# v4
+ 	.short -740, 741, -742, 743
+ 	.short -744, 745, -746, 747
+ 
+ d1_foo:		# a + b (v5)
+ 	.short 0, 0, 0, 0
+ 	.short 0, 0, 0, 0
+ d2_foo:		# a + a (v6)
+ 	.short 32767, -32768, 32767, -32768
+ 	.short 32767, -32768, 32767, -32768
+ d3_foo:		# a + c (v7)
+ 	.short 31280, -31280, 31280, -31280
+ 	.short 31280, -31280, 31280, -31280
+ d4_foo:		# b + c (v8)
+ 	.short -32760, 32762, -32764, 32766
+ 	.short -32768, 32767, -32768, 32767
+ 
+ 	.text
+ 	.global vaddshs
+ vaddshs:
+ 	load_vr		%v2, %r3, a_foo
+ 	load_vr		%v3, %r3, b_foo
+ 	load_vr		%v4, %r3, c_foo
+ 	load_vr		%v5, %r3, d1_foo
+ 	load_vr		%v6, %r3, d2_foo
+ 	load_vr		%v7, %r3, d3_foo
+ 	load_vr		%v8, %r3, d4_foo
+ 
+ 	vaddshs		%v9, %v2, %v3
+ 	vcmpequh.	%v10, %v9, %v5
+ 	bc		4, 24, fail
+ 
+ 	vaddshs		%v9, %v2, %v2
+ 	vcmpequh.	%v10, %v9, %v6
+ 	bc		4, 24, fail
+ 
+ 	vaddshs		%v9, %v2, %v4
+ 	vcmpequh.	%v10, %v9, %v7
+ 	bc		4, 24, fail
+ 
+ 	vaddshs		%v9, %v3, %v4
+ 	vcmpequh.	%v10, %v9, %v8
+ 	bc		4, 24, fail
+ 
+ 	b		pass
+ fail:
+ 	fail
+ 
+ pass:
+ 	pass
Index: sim/testsuite/sim/ppc/vaddsws.s
===================================================================
RCS file: vaddsws.s
diff -N vaddsws.s
*** /dev/null	Tue May  5 13:32:27 1998
--- vaddsws.s	Wed Dec 26 21:53:18 2001
***************
*** 0 ****
--- 1,61 ----
+ # PSIM altivec vaddsws testcase
+ # mach: all
+ # as(powerpcle): -mlittle
+ # ld(powerpcle): -EL
+ 
+ 	.include "testutils.inc"
+ 
+ 	start
+ 
+ 	.data
+ 	.p2align 4
+ 
+ a_foo:	# v2
+ 	.long 2147483640, -2147483641, 2147483642, -2147483643
+ b_foo:	# v3
+ 	.long -2147483640, 2147483641, -2147483642, 2147483643
+ c_foo:	# v4
+ 	.long -6, 7, -8, 9
+ 
+ d1_foo:		# a + b (v5)
+ 	.long 0, 0, 0, 0
+ d2_foo:		# a + a (v6)
+ 	.long 2147483647, -2147483648, 2147483647, -2147483648
+ d3_foo:		# a + c (v7)
+ 	.long 2147483634, -2147483634, 2147483634, -2147483634
+ d4_foo:		# b + c (v8)
+ 	.long -2147483646, 2147483647, -2147483648, 2147483647
+ 
+ 	.text
+ 	.global vaddsws
+ vaddsws:
+ 	load_vr		%v2, %r3, a_foo
+ 	load_vr		%v3, %r3, b_foo
+ 	load_vr		%v4, %r3, c_foo
+ 	load_vr		%v5, %r3, d1_foo
+ 	load_vr		%v6, %r3, d2_foo
+ 	load_vr		%v7, %r3, d3_foo
+ 	load_vr		%v8, %r3, d4_foo
+ 
+ 	vaddsws		%v9, %v2, %v3
+ 	vcmpequw.	%v10, %v9, %v5
+ 	bc		4, 24, fail
+ 
+ 	vaddsws		%v9, %v2, %v2
+ 	vcmpequw.	%v10, %v9, %v6
+ 	bc		4, 24, fail
+ 
+ 	vaddsws		%v9, %v2, %v4
+ 	vcmpequw.	%v10, %v9, %v7
+ 	bc		4, 24, fail
+ 
+ 	vaddsws		%v9, %v3, %v4
+ 	vcmpequw.	%v10, %v9, %v8
+ 	bc		4, 24, fail
+ 
+ 	b		pass
+ fail:
+ 	fail
+ 
+ pass:
+ 	pass
Index: sim/testsuite/sim/ppc/vaddubm.s
===================================================================
RCS file: vaddubm.s
diff -N vaddubm.s
*** /dev/null	Tue May  5 13:32:27 1998
--- vaddubm.s	Wed Dec 26 21:53:18 2001
***************
*** 0 ****
--- 1,43 ----
+ # PSIM altivec vaddubm testcase
+ # mach: all
+ # as(powerpcle): -mlittle
+ # ld(powerpcle): -EL
+ 
+ 	.include "testutils.inc"
+ 
+ 	start
+ 
+ 	.data
+ 	.p2align 4
+ a_foo:	# v2
+ 	.byte 10, 11, 12, 13
+ 	.byte 14, 15, 16, 17
+ 	.byte 18, 19,  0,  2
+ 	.byte  4,  6,  8, 10
+ b_foo:	# v3
+ 	.byte 245, 246, 247, 248
+ 	.byte 249, 250, 251, 252
+ 	.byte 253, 254, 255, 254
+ 	.byte 253, 252, 251, 250
+ d_foo:	# v5
+ 	.byte 255, 1, 3, 5
+ 	.byte 7, 9, 11, 13
+ 	.byte 15, 17, 255, 0
+ 	.byte 1, 2, 3, 4
+ 
+ 	.text
+ 	.global vaddubm
+ vaddubm:
+ 	load_vr		%v2, %r3, a_foo
+ 	load_vr		%v3, %r3, b_foo
+ 	load_vr		%v5, %r3, d_foo
+ 
+ 	vaddubm		%v9, %v2, %v3
+ 	vcmpequb.	%v10, %v9, %v5
+ 	bc		12, 24, pass
+ 
+ fail:
+ 	fail
+ 
+ pass:
+ 	pass
Index: sim/testsuite/sim/ppc/vaddubs.s
===================================================================
RCS file: vaddubs.s
diff -N vaddubs.s
*** /dev/null	Tue May  5 13:32:27 1998
--- vaddubs.s	Wed Dec 26 21:53:18 2001
***************
*** 0 ****
--- 1,43 ----
+ # PSIM altivec vaddubs testcase
+ # mach: all
+ # as(powerpcle): -mlittle
+ # ld(powerpcle): -EL
+ 
+ 	.include "testutils.inc"
+ 
+ 	start
+ 
+ 	.data
+ 	.p2align 4
+ a_foo:	# v2
+ 	.byte 10, 11, 12, 13
+ 	.byte 14, 15, 16, 17
+ 	.byte 18, 19,  0,  2
+ 	.byte  4,  6,  8, 10
+ b_foo:	# v3
+ 	.byte 245, 246, 247, 248
+ 	.byte 249, 250, 251, 252
+ 	.byte 253, 254, 255, 254
+ 	.byte 253, 252, 251, 250
+ d_foo:	# v5
+ 	.byte 255, 255, 255, 255
+ 	.byte 255, 255, 255, 255
+ 	.byte 255, 255, 255, 255
+ 	.byte 255, 255, 255, 255
+ 
+ 	.text
+ 	.global vaddubs
+ vaddubs:
+ 	load_vr		%v2, %r3, a_foo
+ 	load_vr		%v3, %r3, b_foo
+ 	load_vr		%v5, %r3, d_foo
+ 
+ 	vaddubs		%v9, %v2, %v3
+ 	vcmpequb.	%v10, %v9, %v5
+ 	bc		12, 24, pass
+ 
+ fail:
+ 	fail
+ 
+ pass:
+ 	pass
Index: sim/testsuite/sim/ppc/vadduhm.s
===================================================================
RCS file: vadduhm.s
diff -N vadduhm.s
*** /dev/null	Tue May  5 13:32:27 1998
--- vadduhm.s	Wed Dec 26 21:53:18 2001
***************
*** 0 ****
--- 1,37 ----
+ # PSIM altivec vadduhm testcase
+ # mach: all
+ # as(powerpcle): -mlittle
+ # ld(powerpcle): -EL
+ 
+ 	.include "testutils.inc"
+ 
+ 	start
+ 
+ 	.data
+ 	.p2align 4
+ a_foo:	# v2
+ 	.short 32820, 32021, 32822, 32023
+ 	.short 32024, 32025, 32026, 32927
+ b_foo:	# v3
+ 	.short 32820, 32021, 32822, 32023
+ 	.short 32024, 32025, 32026, 32927
+ d_foo:	# v5
+ 	.short 104, 64042, 108, 64046
+ 	.short 64048, 64050, 64052, 318
+ 
+ 	.text
+ 	.global vadduhm
+ vadduhm:
+ 	load_vr		%v2, %r3, a_foo
+ 	load_vr		%v3, %r3, b_foo
+ 	load_vr		%v5, %r3, d_foo
+ 
+ 	vadduhm		%v9, %v2, %v3
+ 	vcmpequh.	%v10, %v9, %v5
+ 	bc		12, 24, pass
+ 
+ fail:
+ 	fail
+ 
+ pass:
+ 	pass
Index: sim/testsuite/sim/ppc/vadduhs.s
===================================================================
RCS file: vadduhs.s
diff -N vadduhs.s
*** /dev/null	Tue May  5 13:32:27 1998
--- vadduhs.s	Wed Dec 26 21:53:18 2001
***************
*** 0 ****
--- 1,37 ----
+ # PSIM altivec vadduhs testcase
+ # mach: all
+ # as(powerpcle): -mlittle
+ # ld(powerpcle): -EL
+ 
+ 	.include "testutils.inc"
+ 
+ 	start
+ 
+ 	.data
+ 	.p2align 4
+ a_foo:	# v2
+ 	.short 32820, 32021, 32822, 32023
+ 	.short 32024, 32025, 32026, 32927
+ b_foo:	# v3
+ 	.short 32820, 32021, 32822, 32023
+ 	.short 32024, 32025, 32026, 32927
+ d_foo:	# v5
+ 	.short 65535, 64042, 65535, 64046
+ 	.short 64048, 64050, 64052, 65535
+ 
+ 	.text
+ 	.global vadduhs
+ vadduhs:
+ 	load_vr		%v2, %r3, a_foo
+ 	load_vr		%v3, %r3, b_foo
+ 	load_vr		%v5, %r3, d_foo
+ 
+ 	vadduhs		%v9, %v2, %v3
+ 	vcmpequh.	%v10, %v9, %v5
+ 	bc		12, 24, pass
+ 
+ fail:
+ 	fail
+ 
+ pass:
+ 	pass
Index: sim/testsuite/sim/ppc/vadduwm.s
===================================================================
RCS file: vadduwm.s
diff -N vadduwm.s
*** /dev/null	Tue May  5 13:32:27 1998
--- vadduwm.s	Wed Dec 26 21:53:18 2001
***************
*** 0 ****
--- 1,34 ----
+ # PSIM altivec vadduwm testcase
+ # mach: all
+ # as(powerpcle): -mlittle
+ # ld(powerpcle): -EL
+ 
+ 	.include "testutils.inc"
+ 
+ 	start
+ 
+ 	.data
+ 	.p2align 4
+ a_foo:	# v2
+ 	.long 3000000000, 3000000100, 3000000200, 3000000300
+ b_foo:	# v3
+ 	.long 1000000000, 1500000200, 2000000400, 2500000600
+ d_foo:	# v5
+ 	.long 4000000000, 205033004, 705033304, 1205033604
+ 
+ 	.text
+ 	.global vadduwm
+ vadduwm:
+ 	load_vr		%v2, %r3, a_foo
+ 	load_vr		%v3, %r3, b_foo
+ 	load_vr		%v5, %r3, d_foo
+ 
+ 	vadduwm		%v9, %v2, %v3
+ 	vcmpequw.	%v10, %v9, %v5
+ 	bc		12, 24, pass
+ 
+ fail:
+ 	fail
+ 
+ pass:
+ 	pass
Index: sim/testsuite/sim/ppc/vadduws.s
===================================================================
RCS file: vadduws.s
diff -N vadduws.s
*** /dev/null	Tue May  5 13:32:27 1998
--- vadduws.s	Wed Dec 26 21:53:18 2001
***************
*** 0 ****
--- 1,34 ----
+ # PSIM altivec vadduws testcase
+ # mach: all
+ # as(powerpcle): -mlittle
+ # ld(powerpcle): -EL
+ 
+ 	.include "testutils.inc"
+ 
+ 	start
+ 
+ 	.data
+ 	.p2align 4
+ a_foo:	# v2
+ 	.long 3000000000, 3000000100, 3000000200, 3000000300
+ b_foo:	# v3
+ 	.long 1000000000, 1500000200, 2000000400, 2500000600
+ d_foo:	# v5
+ 	.long 4000000000, 4294967295, 4294967295, 4294967295
+ 
+ 	.text
+ 	.global vadduws
+ vadduws:
+ 	load_vr		%v2, %r3, a_foo
+ 	load_vr		%v3, %r3, b_foo
+ 	load_vr		%v5, %r3, d_foo
+ 
+ 	vadduws		%v9, %v2, %v3
+ 	vcmpequw.	%v10, %v9, %v5
+ 	bc		12, 24, pass
+ 
+ fail:
+ 	fail
+ 
+ pass:
+ 	pass
Index: sim/testsuite/sim/ppc/vavgsb.s
===================================================================
RCS file: vavgsb.s
diff -N vavgsb.s
*** /dev/null	Tue May  5 13:32:27 1998
--- vavgsb.s	Wed Dec 26 21:53:18 2001
***************
*** 0 ****
--- 1,42 ----
+ # PSIM altivec vavgsb testcase
+ # mach: all
+ # as(powerpcle): -mlittle
+ # ld(powerpcle): -EL
+ 
+ 	.include "testutils.inc"
+ 
+ 	start
+ 
+ 	.data
+ 	.p2align 4
+ a_foo:	# v2
+ 	.byte  10, -11,  12, -13
+ 	.byte  14, -15,  16, -17
+ 	.byte  18, -19,  20, -21
+ 	.byte  22, -23,  24, -25
+ b_foo:	# v3
+ 	.byte -18,  19, -20,  21
+ 	.byte  22, -23,  24, -25
+ 	.byte -14,  15, -16,  17
+ 	.byte  10, -11,  12, -13
+ d_foo:	# v5
+ 	.byte  -4,   4,  -4,   4
+ 	.byte  18, -19,  20, -21
+ 	.byte   2,  -2,   2,  -2
+ 	.byte  16, -17,  18, -19
+ 
+ 	.text
+ 	.global vavgsb
+ vavgsb:
+ 	load_vr		%v2, %r3, a_foo
+ 	load_vr		%v3, %r3, b_foo
+ 	load_vr		%v5, %r3, d_foo
+ 
+ 	vavgsb		%v9, %v2, %v3
+ 	vcmpequb.	%v10, %v9, %v5
+ 	bc		12, 24, pass
+ fail:
+ 	fail
+ 
+ pass:
+ 	pass
Index: sim/testsuite/sim/ppc/vavgsh.s
===================================================================
RCS file: vavgsh.s
diff -N vavgsh.s
*** /dev/null	Tue May  5 13:32:27 1998
--- vavgsh.s	Wed Dec 26 21:53:18 2001
***************
*** 0 ****
--- 1,36 ----
+ # PSIM altivec vavgsh testcase
+ # mach: all
+ # as(powerpcle): -mlittle
+ # ld(powerpcle): -EL
+ 
+ 	.include "testutils.inc"
+ 
+ 	start
+ 
+ 	.data
+ 	.p2align 4
+ a_foo:	# v2
+ 	.short     10,    -11,     12,    -13
+ 	.short  32014, -32015,  32016, -32017
+ b_foo:	# v3
+ 	.short -32018,  32019, -32020,  32021
+ 	.short     23,    -22,     25,    -24
+ d_foo:	# v5
+ 	.short -16004,  16004, -16004,  16004
+ 	.short  16019, -16018,  16021, -16020
+ 
+ 	.text
+ 	.global vavgsh
+ vavgsh:
+ 	load_vr		%v2, %r3, a_foo
+ 	load_vr		%v3, %r3, b_foo
+ 	load_vr		%v5, %r3, d_foo
+ 
+ 	vavgsh		%v9, %v2, %v3
+ 	vcmpequh.	%v10, %v9, %v5
+ 	bc		12, 24, pass
+ fail:
+ 	fail
+ 
+ pass:
+ 	pass
Index: sim/testsuite/sim/ppc/vavgsw.s
===================================================================
RCS file: vavgsw.s
diff -N vavgsw.s
*** /dev/null	Tue May  5 13:32:27 1998
--- vavgsw.s	Wed Dec 26 21:53:18 2001
***************
*** 0 ****
--- 1,33 ----
+ # PSIM altivec vavgsw testcase
+ # mach: all
+ # as(powerpcle): -mlittle
+ # ld(powerpcle): -EL
+ 
+ 	.include "testutils.inc"
+ 
+ 	start
+ 
+ 	.data
+ 	.p2align 4
+ a_foo:	# v2
+ 	.long     10,    -11,     12,    -13
+ b_foo:	# v3
+ 	.long -32018,  32019, -32020,  32021
+ d_foo:	# v5
+ 	.long -16004,  16004, -16004,  16004
+ 
+ 	.text
+ 	.global vavgsw
+ vavgsw:
+ 	load_vr		%v2, %r3, a_foo
+ 	load_vr		%v3, %r3, b_foo
+ 	load_vr		%v5, %r3, d_foo
+ 
+ 	vavgsw		%v9, %v2, %v3
+ 	vcmpequw.	%v10, %v9, %v5
+ 	bc		12, 24, pass
+ fail:
+ 	fail
+ 
+ pass:
+ 	pass
Index: sim/testsuite/sim/ppc/vavgub.s
===================================================================
RCS file: vavgub.s
diff -N vavgub.s
*** /dev/null	Tue May  5 13:32:27 1998
--- vavgub.s	Wed Dec 26 21:53:18 2001
***************
*** 0 ****
--- 1,42 ----
+ # PSIM altivec vavgub testcase
+ # mach: all
+ # as(powerpcle): -mlittle
+ # ld(powerpcle): -EL
+ 
+ 	.include "testutils.inc"
+ 
+ 	start
+ 
+ 	.data
+ 	.p2align 4
+ a_foo:	# v2
+ 	.byte  10,  11,  12,  13
+ 	.byte  14,  15,  16,  17
+ 	.byte  18,  19,  20,  21
+ 	.byte  22,  23,  24,  25
+ b_foo:	# v3
+ 	.byte  18,  19,  20,  21
+ 	.byte  22,  23,  24,  25
+ 	.byte  14,  15,  16,  17
+ 	.byte  10,  11,  12,  13
+ d_foo:	# v5
+ 	.byte  14,  15,  16,  17
+ 	.byte  18,  19,  20,  21
+ 	.byte  16,  17,  18,  19
+ 	.byte  16,  17,  18,  19
+ 
+ 	.text
+ 	.global vavgub
+ vavgub:
+ 	load_vr		%v2, %r3, a_foo
+ 	load_vr		%v3, %r3, b_foo
+ 	load_vr		%v5, %r3, d_foo
+ 
+ 	vavgub		%v9, %v2, %v3
+ 	vcmpequb.	%v10, %v9, %v5
+ 	bc		12, 24, pass
+ fail:
+ 	fail
+ 
+ pass:
+ 	pass
Index: sim/testsuite/sim/ppc/vavguh.s
===================================================================
RCS file: vavguh.s
diff -N vavguh.s
*** /dev/null	Tue May  5 13:32:27 1998
--- vavguh.s	Wed Dec 26 21:53:18 2001
***************
*** 0 ****
--- 1,36 ----
+ # PSIM altivec vavguh testcase
+ # mach: all
+ # as(powerpcle): -mlittle
+ # ld(powerpcle): -EL
+ 
+ 	.include "testutils.inc"
+ 
+ 	start
+ 
+ 	.data
+ 	.p2align 4
+ a_foo:	# v2
+ 	.short     10,     11,     12,     13
+ 	.short  32014,  32015,  32016,  32017
+ b_foo:	# v3
+ 	.short  32018,  32019,  32020,  32021
+ 	.short     23,     22,     25,     24
+ d_foo:	# v5
+ 	.short  16014,  16015,  16016,  16017
+ 	.short  16019,  16019,  16021,  16021
+ 
+ 	.text
+ 	.global vavguh
+ vavguh:
+ 	load_vr		%v2, %r3, a_foo
+ 	load_vr		%v3, %r3, b_foo
+ 	load_vr		%v5, %r3, d_foo
+ 
+ 	vavguh		%v9, %v2, %v3
+ 	vcmpequh.	%v10, %v9, %v5
+ 	bc		12, 24, pass
+ fail:
+ 	fail
+ 
+ pass:
+ 	pass
Index: sim/testsuite/sim/ppc/vavguw.s
===================================================================
RCS file: vavguw.s
diff -N vavguw.s
*** /dev/null	Tue May  5 13:32:27 1998
--- vavguw.s	Wed Dec 26 21:53:18 2001
***************
*** 0 ****
--- 1,33 ----
+ # PSIM altivec vavguw testcase
+ # mach: all
+ # as(powerpcle): -mlittle
+ # ld(powerpcle): -EL
+ 
+ 	.include "testutils.inc"
+ 
+ 	start
+ 
+ 	.data
+ 	.p2align 4
+ a_foo:	# v2
+ 	.long  32014,  32015,  32016,  32017
+ b_foo:	# v3
+ 	.long     23,     22,     25,     24
+ d_foo:	# v5
+ 	.long  16019,  16019,  16021,  16021
+ 
+ 	.text
+ 	.global vavguw
+ vavguw:
+ 	load_vr		%v2, %r3, a_foo
+ 	load_vr		%v3, %r3, b_foo
+ 	load_vr		%v5, %r3, d_foo
+ 
+ 	vavguw		%v9, %v2, %v3
+ 	vcmpequw.	%v10, %v9, %v5
+ 	bc		12, 24, pass
+ fail:
+ 	fail
+ 
+ pass:
+ 	pass
Index: sim/testsuite/sim/ppc/vcmpgtsb.s
===================================================================
RCS file: vcmpgtsb.s
diff -N vcmpgtsb.s
*** /dev/null	Tue May  5 13:32:27 1998
--- vcmpgtsb.s	Wed Dec 26 21:53:18 2001
***************
*** 0 ****
--- 1,36 ----
+ # PSIM altivec vcmpgtsb testcase
+ # mach: all
+ # as(powerpcle): -mlittle
+ # ld(powerpcle): -EL
+ 
+ 	.include "testutils.inc"
+ 
+ 	start
+ 
+ 	.data
+ 	.p2align 4
+ a_foo:	# v2
+ 	.byte 10, -11, 12, -13
+ 	.byte 14, -15, 16, -17
+ 	.byte 18, -19, 20, -21
+ 	.byte 22, -23, 24, -25
+ b_foo:	# v3
+ 	.byte -10,-22, -12,-23
+ 	.byte -14,-25, -16,-27
+ 	.byte -18,-29, -20,-31
+ 	.byte -22,-33, -24,-35
+ 
+ 	.text
+ 	.global vcmpgtsb
+ vcmpgtsb:
+ 	load_vr		%v2, %r3, a_foo
+ 	load_vr		%v3, %r3, b_foo
+ 
+ 	vcmpgtsb.	%v10, %v2, %v3
+ 	bc		12, 24, pass
+ 
+ fail:
+ 	fail
+ 
+ pass:
+ 	pass
Index: sim/testsuite/sim/ppc/vcmpgtsh.s
===================================================================
RCS file: vcmpgtsh.s
diff -N vcmpgtsh.s
*** /dev/null	Tue May  5 13:32:27 1998
--- vcmpgtsh.s	Wed Dec 26 21:53:18 2001
***************
*** 0 ****
--- 1,32 ----
+ # PSIM altivec vcmpgtsh testcase
+ # mach: all
+ # as(powerpcle): -mlittle
+ # ld(powerpcle): -EL
+ 
+ 	.include "testutils.inc"
+ 
+ 	start
+ 
+ 	.data
+ 	.p2align 4
+ a_foo:	# v2
+ 	.short 10000, -11000, 12000, -13000
+ 	.short 14000, -15000, 16000, -17000
+ b_foo:	# v3
+ 	.short -10000,-22000, -12000,-23000
+ 	.short -14000,-25000, -16000,-27000
+ 
+ 	.text
+ 	.global vcmpgtsh
+ vcmpgtsh:
+ 	load_vr		%v2, %r3, a_foo
+ 	load_vr		%v3, %r3, b_foo
+ 
+ 	vcmpgtsh.	%v10, %v2, %v3
+ 	bc		12, 24, pass
+ 
+ fail:
+ 	fail
+ 
+ pass:
+ 	pass
Index: sim/testsuite/sim/ppc/vcmpgtsw.s
===================================================================
RCS file: vcmpgtsw.s
diff -N vcmpgtsw.s
*** /dev/null	Tue May  5 13:32:27 1998
--- vcmpgtsw.s	Wed Dec 26 21:53:18 2001
***************
*** 0 ****
--- 1,30 ----
+ # PSIM altivec vcmpgtsw testcase
+ # mach: all
+ # as(powerpcle): -mlittle
+ # ld(powerpcle): -EL
+ 
+ 	.include "testutils.inc"
+ 
+ 	start
+ 
+ 	.data
+ 	.p2align 4
+ a_foo:	# v2
+ 	.long 10000000, -11333000, 12023400, -1302300
+ b_foo:	# v3
+ 	.long -10000000,-22333000, -12023400,-2302300
+ 
+ 	.text
+ 	.global vcmpgtsw
+ vcmpgtsw:
+ 	load_vr		%v2, %r3, a_foo
+ 	load_vr		%v3, %r3, b_foo
+ 
+ 	vcmpgtsw.	%v10, %v2, %v3
+ 	bc		12, 24, pass
+ 
+ fail:
+ 	fail
+ 
+ pass:
+ 	pass
Index: sim/testsuite/sim/ppc/vcmpgtub.s
===================================================================
RCS file: vcmpgtub.s
diff -N vcmpgtub.s
*** /dev/null	Tue May  5 13:32:27 1998
--- vcmpgtub.s	Wed Dec 26 21:53:18 2001
***************
*** 0 ****
--- 1,36 ----
+ # PSIM altivec vcmpgtub testcase
+ # mach: all
+ # as(powerpcle): -mlittle
+ # ld(powerpcle): -EL
+ 
+ 	.include "testutils.inc"
+ 
+ 	start
+ 
+ 	.data
+ 	.p2align 4
+ a_foo:	# v2
+ 	.byte 110, 111, 112, 113
+ 	.byte 114, 115, 116, 117
+ 	.byte 118, 119, 120, 121
+ 	.byte 122, 123, 124, 125
+ b_foo:	# v3
+ 	.byte 10, 22, 12, 23
+ 	.byte 14, 25, 16, 27
+ 	.byte 18, 29, 20, 31
+ 	.byte 22, 33, 24, 35
+ 
+ 	.text
+ 	.global vcmpgtub
+ vcmpgtub:
+ 	load_vr		%v2, %r3, a_foo
+ 	load_vr		%v3, %r3, b_foo
+ 
+ 	vcmpgtub.	%v10, %v2, %v3
+ 	bc		12, 24, pass
+ 
+ fail:
+ 	fail
+ 
+ pass:
+ 	pass
Index: sim/testsuite/sim/ppc/vcmpgtuh.s
===================================================================
RCS file: vcmpgtuh.s
diff -N vcmpgtuh.s
*** /dev/null	Tue May  5 13:32:27 1998
--- vcmpgtuh.s	Wed Dec 26 21:53:18 2001
***************
*** 0 ****
--- 1,32 ----
+ # PSIM altivec vcmpgtuh testcase
+ # mach: all
+ # as(powerpcle): -mlittle
+ # ld(powerpcle): -EL
+ 
+ 	.include "testutils.inc"
+ 
+ 	start
+ 
+ 	.data
+ 	.p2align 4
+ a_foo:	# v2
+ 	.short 40000, 41000, 42000, 43000
+ 	.short 44000, 45000, 46000, 47000
+ b_foo:	# v3
+ 	.short 10000, 22000, 12000, 23000
+ 	.short 14000, 25000, 16000, 27000
+ 
+ 	.text
+ 	.global vcmpgtuh
+ vcmpgtuh:
+ 	load_vr		%v2, %r3, a_foo
+ 	load_vr		%v3, %r3, b_foo
+ 
+ 	vcmpgtuh.	%v10, %v2, %v3
+ 	bc		12, 24, pass
+ 
+ fail:
+ 	fail
+ 
+ pass:
+ 	pass
Index: sim/testsuite/sim/ppc/vcmpgtuw.s
===================================================================
RCS file: vcmpgtuw.s
diff -N vcmpgtuw.s
*** /dev/null	Tue May  5 13:32:27 1998
--- vcmpgtuw.s	Wed Dec 26 21:53:18 2001
***************
*** 0 ****
--- 1,30 ----
+ # PSIM altivec vcmpgtuw testcase
+ # mach: all
+ # as(powerpcle): -mlittle
+ # ld(powerpcle): -EL
+ 
+ 	.include "testutils.inc"
+ 
+ 	start
+ 
+ 	.data
+ 	.p2align 4
+ a_foo:	# v2
+ 	.long 150000000, 151333000, 152023400, 51302300
+ b_foo:	# v3
+ 	.long 10000000, 22333000, 12023400, 2302300
+ 
+ 	.text
+ 	.global vcmpgtuw
+ vcmpgtuw:
+ 	load_vr		%v2, %r3, a_foo
+ 	load_vr		%v3, %r3, b_foo
+ 
+ 	vcmpgtuw.	%v10, %v2, %v3
+ 	bc		12, 24, pass
+ 
+ fail:
+ 	fail
+ 
+ pass:
+ 	pass
Index: sim/testsuite/sim/ppc/vmaxsb.s
===================================================================
RCS file: vmaxsb.s
diff -N vmaxsb.s
*** /dev/null	Tue May  5 13:32:27 1998
--- vmaxsb.s	Wed Dec 26 21:53:18 2001
***************
*** 0 ****
--- 1,42 ----
+ # PSIM altivec vmaxsb testcase
+ # mach: all
+ # as(powerpcle): -mlittle
+ # ld(powerpcle): -EL
+ 
+ 	.include "testutils.inc"
+ 
+ 	start
+ 
+ 	.data
+ 	.p2align 4
+ a_foo:	# v2
+ 	.byte  10, -11,  12, -13
+ 	.byte  14, -15,  16, -17
+ 	.byte  18, -19,  20, -21
+ 	.byte  22, -23,  24, -25
+ b_foo:	# v3
+ 	.byte -18,  19, -20,  21
+ 	.byte  22, -23,  24, -25
+ 	.byte -14,  15, -16,  17
+ 	.byte  10, -11,  12, -13
+ d_foo:	# v5
+ 	.byte  10,  19,  12,  21
+ 	.byte  22, -15,  24, -17
+ 	.byte  18,  15,  20,  17
+ 	.byte  22, -11,  24, -13
+ 
+ 	.text
+ 	.global vmaxsb
+ vmaxsb:
+ 	load_vr		%v2, %r3, a_foo
+ 	load_vr		%v3, %r3, b_foo
+ 	load_vr		%v5, %r3, d_foo
+ 
+ 	vmaxsb		%v9, %v2, %v3
+ 	vcmpequb.	%v10, %v9, %v5
+ 	bc		12, 24, pass
+ fail:
+ 	fail
+ 
+ pass:
+ 	pass
Index: sim/testsuite/sim/ppc/vmaxsh.s
===================================================================
RCS file: vmaxsh.s
diff -N vmaxsh.s
*** /dev/null	Tue May  5 13:32:27 1998
--- vmaxsh.s	Wed Dec 26 21:53:18 2001
***************
*** 0 ****
--- 1,36 ----
+ # PSIM altivec vmaxsh testcase
+ # mach: all
+ # as(powerpcle): -mlittle
+ # ld(powerpcle): -EL
+ 
+ 	.include "testutils.inc"
+ 
+ 	start
+ 
+ 	.data
+ 	.p2align 4
+ a_foo:	# v2
+ 	.short     10,    -11,     12,    -13
+ 	.short  32014, -32015,  32016, -32017
+ b_foo:	# v3
+ 	.short -32018,  32019, -32020,  32021
+ 	.short     23,    -22,     25,    -24
+ d_foo:	# v5
+ 	.short     10,  32019,     12,  32021
+ 	.short  32014,    -22,  32016,    -24
+ 
+ 	.text
+ 	.global vmaxsh
+ vmaxsh:
+ 	load_vr		%v2, %r3, a_foo
+ 	load_vr		%v3, %r3, b_foo
+ 	load_vr		%v5, %r3, d_foo
+ 
+ 	vmaxsh		%v9, %v2, %v3
+ 	vcmpequh.	%v10, %v9, %v5
+ 	bc		12, 24, pass
+ fail:
+ 	fail
+ 
+ pass:
+ 	pass
Index: sim/testsuite/sim/ppc/vmaxsw.s
===================================================================
RCS file: vmaxsw.s
diff -N vmaxsw.s
*** /dev/null	Tue May  5 13:32:27 1998
--- vmaxsw.s	Wed Dec 26 21:53:18 2001
***************
*** 0 ****
--- 1,33 ----
+ # PSIM altivec vmaxsw testcase
+ # mach: all
+ # as(powerpcle): -mlittle
+ # ld(powerpcle): -EL
+ 
+ 	.include "testutils.inc"
+ 
+ 	start
+ 
+ 	.data
+ 	.p2align 4
+ a_foo:	# v2
+ 	.long     10,    -11,     12,    -13
+ b_foo:	# v3
+ 	.long -32018,  32019, -32020,  32021
+ d_foo:	# v5
+ 	.long     10,  32019,     12,  32021
+ 
+ 	.text
+ 	.global vmaxsw
+ vmaxsw:
+ 	load_vr		%v2, %r3, a_foo
+ 	load_vr		%v3, %r3, b_foo
+ 	load_vr		%v5, %r3, d_foo
+ 
+ 	vmaxsw		%v9, %v2, %v3
+ 	vcmpequw.	%v10, %v9, %v5
+ 	bc		12, 24, pass
+ fail:
+ 	fail
+ 
+ pass:
+ 	pass
Index: sim/testsuite/sim/ppc/vmaxub.s
===================================================================
RCS file: vmaxub.s
diff -N vmaxub.s
*** /dev/null	Tue May  5 13:32:27 1998
--- vmaxub.s	Wed Dec 26 21:53:18 2001
***************
*** 0 ****
--- 1,42 ----
+ # PSIM altivec vmaxub testcase
+ # mach: all
+ # as(powerpcle): -mlittle
+ # ld(powerpcle): -EL
+ 
+ 	.include "testutils.inc"
+ 
+ 	start
+ 
+ 	.data
+ 	.p2align 4
+ a_foo:	# v2
+ 	.byte  10, 11, 12, 13
+ 	.byte  14, 15, 16, 17
+ 	.byte  18, 19, 20, 21
+ 	.byte  22, 23, 24, 25
+ b_foo:	# v3
+ 	.byte  18, 19, 20, 21
+ 	.byte  22, 23, 24, 25
+ 	.byte  14, 15, 16, 17
+ 	.byte  10, 11, 12, 13
+ d_foo:	# v5
+ 	.byte  18, 19, 20, 21
+ 	.byte  22, 23, 24, 25
+ 	.byte  18, 19, 20, 21
+ 	.byte  22, 23, 24, 25
+ 
+ 	.text
+ 	.global vmaxub
+ vmaxub:
+ 	load_vr		%v2, %r3, a_foo
+ 	load_vr		%v3, %r3, b_foo
+ 	load_vr		%v5, %r3, d_foo
+ 
+ 	vmaxub		%v9, %v2, %v3
+ 	vcmpequb.	%v10, %v9, %v5
+ 	bc		12, 24, pass
+ fail:
+ 	fail
+ 
+ pass:
+ 	pass
Index: sim/testsuite/sim/ppc/vmaxuh.s
===================================================================
RCS file: vmaxuh.s
diff -N vmaxuh.s
*** /dev/null	Tue May  5 13:32:27 1998
--- vmaxuh.s	Wed Dec 26 21:53:18 2001
***************
*** 0 ****
--- 1,36 ----
+ # PSIM altivec vmaxuh testcase
+ # mach: all
+ # as(powerpcle): -mlittle
+ # ld(powerpcle): -EL
+ 
+ 	.include "testutils.inc"
+ 
+ 	start
+ 
+ 	.data
+ 	.p2align 4
+ a_foo:	# v2
+ 	.short     10,     11,     12,     13
+ 	.short  32014,  32015,  32016,  32017
+ b_foo:	# v3
+ 	.short  32018,  32019,  32020,  32021
+ 	.short     23,     22,     25,     24
+ d_foo:	# v5
+ 	.short  32018,  32019,  32020,  32021
+ 	.short  32014,  32015,  32016,  32017
+ 
+ 	.text
+ 	.global vmaxuh
+ vmaxuh:
+ 	load_vr		%v2, %r3, a_foo
+ 	load_vr		%v3, %r3, b_foo
+ 	load_vr		%v5, %r3, d_foo
+ 
+ 	vmaxuh		%v9, %v2, %v3
+ 	vcmpequh.	%v10, %v9, %v5
+ 	bc		12, 24, pass
+ fail:
+ 	fail
+ 
+ pass:
+ 	pass
Index: sim/testsuite/sim/ppc/vmaxuw.s
===================================================================
RCS file: vmaxuw.s
diff -N vmaxuw.s
*** /dev/null	Tue May  5 13:32:27 1998
--- vmaxuw.s	Wed Dec 26 21:53:18 2001
***************
*** 0 ****
--- 1,33 ----
+ # PSIM altivec vmaxuw testcase
+ # mach: all
+ # as(powerpcle): -mlittle
+ # ld(powerpcle): -EL
+ 
+ 	.include "testutils.inc"
+ 
+ 	start
+ 
+ 	.data
+ 	.p2align 4
+ a_foo:	# v2
+ 	.long     10,     11,     12,     13
+ b_foo:	# v3
+ 	.long  32018,  32019,  32020,  32021
+ d_foo:	# v5
+ 	.long  32018,  32019,  32020,  32021
+ 
+ 	.text
+ 	.global vmaxuw
+ vmaxuw:
+ 	load_vr		%v2, %r3, a_foo
+ 	load_vr		%v3, %r3, b_foo
+ 	load_vr		%v5, %r3, d_foo
+ 
+ 	vmaxuw		%v9, %v2, %v3
+ 	vcmpequw.	%v10, %v9, %v5
+ 	bc		12, 24, pass
+ fail:
+ 	fail
+ 
+ pass:
+ 	pass
Index: sim/testsuite/sim/ppc/vmhaddshs.s
===================================================================
RCS file: vmhaddshs.s
diff -N vmhaddshs.s
*** /dev/null	Tue May  5 13:32:27 1998
--- vmhaddshs.s	Wed Dec 26 21:53:18 2001
***************
*** 0 ****
--- 1,42 ----
+ # PSIM altivec vmhaddshs testcase
+ # mach: all
+ # as(powerpcle): -mlittle
+ # ld(powerpcle): -EL
+ 
+ 	.include "testutils.inc"
+ 
+ 	start
+ 
+ 	.data
+ 	.p2align 4
+ 
+ a_foo:	# v2
+ 	.short   8000,  -4000,  16384, -20000
+ 	.short  16024, -16025,    -32,  8191
+ b_foo:	# v3
+ 	.short     -3,      3,     -2,      2
+ 	.short     -1,      1,     -4,      4
+ c_foo:	# v4
+ 	.short    -20,     21,    -22,     23
+ 	.short    -24,     25,    -26,     27
+ d_foo:	# v5
+ 	.short	  -21,	   20,	  -23,	   21
+ 	.short	  -25, 	   24,	  -26,     27
+ 
+ 	.text
+ 	.global vmhaddshs
+ vmhaddshs:
+ 	load_vr		%v2, %r3, a_foo
+ 	load_vr		%v3, %r3, b_foo
+ 	load_vr		%v4, %r3, c_foo
+ 	load_vr		%v5, %r3, d_foo
+ 
+ 	vmhaddshs	%v9, %v2, %v3, %v4
+ 	vcmpequh.	%v10, %v9, %v5
+ 	bc		12, 24, pass
+ 
+ fail:
+ 	fail
+ 
+ pass:
+ 	pass
Index: sim/testsuite/sim/ppc/vmhraddshs.s
===================================================================
RCS file: vmhraddshs.s
diff -N vmhraddshs.s
*** /dev/null	Tue May  5 13:32:27 1998
--- vmhraddshs.s	Wed Dec 26 21:53:18 2001
***************
*** 0 ****
--- 1,45 ----
+ # PSIM altivec vmhraddshs testcase
+ # mach: all
+ # as(powerpcle): -mlittle
+ # ld(powerpcle): -EL
+ 
+ 	.include "testutils.inc"
+ 
+ 	start
+ 
+ 	.data
+ 	.p2align 4
+ 
+ a_foo:	# v2
+ 	.short   8000,  -4000,  18384, -24590
+ 	.short  16024, -16025,    -32,  13191
+ b_foo:	# v3
+ 	.short     -3,      3,     -2,      2
+ 	.short     -1,      1,     -4,      4
+ c_foo:	# v4
+ 	.short    -20,     21,    -22,     23
+ 	.short    -24,     25,    -26,     27
+ d_foo:	# v5
+ 	.short    -21,     21,    -23,     21
+ 	.short    -24,     25,    -26,     29
+ 	#.short  -7636,   4405, -16406, -23593
+ 	#.short    336,    384,  16486,  32767
+ 
+ 
+ 	.text
+ 	.global vmhraddshs
+ vmhraddshs:
+ 	load_vr		%v2, %r3, a_foo
+ 	load_vr		%v3, %r3, b_foo
+ 	load_vr		%v4, %r3, c_foo
+ 	load_vr		%v5, %r3, d_foo
+ 
+ 	vmhraddshs	%v9, %v2, %v3, %v4
+ 	vcmpequh.	%v10, %v9, %v5
+ 	bc		12, 24, pass
+ 
+ fail:
+ 	fail
+ 
+ pass:
+ 	pass
Index: sim/testsuite/sim/ppc/vminsb.s
===================================================================
RCS file: vminsb.s
diff -N vminsb.s
*** /dev/null	Tue May  5 13:32:27 1998
--- vminsb.s	Wed Dec 26 21:53:18 2001
***************
*** 0 ****
--- 1,42 ----
+ # PSIM altivec vminsb testcase
+ # mach: all
+ # as(powerpcle): -mlittle
+ # ld(powerpcle): -EL
+ 
+ 	.include "testutils.inc"
+ 
+ 	start
+ 
+ 	.data
+ 	.p2align 4
+ a_foo:	# v2
+ 	.byte  10, -11,  12, -13
+ 	.byte  14, -15,  16, -17
+ 	.byte  18, -19,  20, -21
+ 	.byte  22, -23,  24, -25
+ b_foo:	# v3
+ 	.byte -18,  19, -20,  21
+ 	.byte  22, -23,  24, -25
+ 	.byte -14,  15, -16,  17
+ 	.byte  10, -11,  12, -13
+ d_foo:	# v5
+ 	.byte -18, -11, -20, -13
+ 	.byte  14, -23,  16, -25
+ 	.byte -14, -19, -16, -21
+ 	.byte  10, -23,  12, -25
+ 
+ 	.text
+ 	.global vminsb
+ vminsb:
+ 	load_vr		%v2, %r3, a_foo
+ 	load_vr		%v3, %r3, b_foo
+ 	load_vr		%v5, %r3, d_foo
+ 
+ 	vminsb		%v9, %v2, %v3
+ 	vcmpequb.	%v10, %v9, %v5
+ 	bc		12, 24, pass
+ fail:
+ 	fail
+ 
+ pass:
+ 	pass
Index: sim/testsuite/sim/ppc/vminsh.s
===================================================================
RCS file: vminsh.s
diff -N vminsh.s
*** /dev/null	Tue May  5 13:32:27 1998
--- vminsh.s	Wed Dec 26 21:53:18 2001
***************
*** 0 ****
--- 1,36 ----
+ # PSIM altivec vminsh testcase
+ # mach: all
+ # as(powerpcle): -mlittle
+ # ld(powerpcle): -EL
+ 
+ 	.include "testutils.inc"
+ 
+ 	start
+ 
+ 	.data
+ 	.p2align 4
+ a_foo:	# v2
+ 	.short     10,    -11,     12,    -13
+ 	.short  32014, -32015,  32016, -32017
+ b_foo:	# v3
+ 	.short -32018,  32019, -32020,  32021
+ 	.short     23,    -22,     25,    -24
+ d_foo:	# v5
+ 	.short -32018,    -11, -32020,    -13
+ 	.short     23, -32015,     25, -32017
+ 
+ 	.text
+ 	.global vminsh
+ vminsh:
+ 	load_vr		%v2, %r3, a_foo
+ 	load_vr		%v3, %r3, b_foo
+ 	load_vr		%v5, %r3, d_foo
+ 
+ 	vminsh		%v9, %v2, %v3
+ 	vcmpequh.	%v10, %v9, %v5
+ 	bc		12, 24, pass
+ fail:
+ 	fail
+ 
+ pass:
+ 	pass
Index: sim/testsuite/sim/ppc/vminsw.s
===================================================================
RCS file: vminsw.s
diff -N vminsw.s
*** /dev/null	Tue May  5 13:32:27 1998
--- vminsw.s	Wed Dec 26 21:53:18 2001
***************
*** 0 ****
--- 1,33 ----
+ # PSIM altivec vminsw testcase
+ # mach: all
+ # as(powerpcle): -mlittle
+ # ld(powerpcle): -EL
+ 
+ 	.include "testutils.inc"
+ 
+ 	start
+ 
+ 	.data
+ 	.p2align 4
+ a_foo:	# v2
+ 	.long     10,    -11,     12,    -13
+ b_foo:	# v3
+ 	.long -32018,  32019, -32020,  32021
+ d_foo:	# v5
+ 	.long -32018,    -11, -32020,    -13
+ 
+ 	.text
+ 	.global vminsw
+ vminsw:
+ 	load_vr		%v2, %r3, a_foo
+ 	load_vr		%v3, %r3, b_foo
+ 	load_vr		%v5, %r3, d_foo
+ 
+ 	vminsw		%v9, %v2, %v3
+ 	vcmpequw.	%v10, %v9, %v5
+ 	bc		12, 24, pass
+ fail:
+ 	fail
+ 
+ pass:
+ 	pass
Index: sim/testsuite/sim/ppc/vminub.s
===================================================================
RCS file: vminub.s
diff -N vminub.s
*** /dev/null	Tue May  5 13:32:27 1998
--- vminub.s	Wed Dec 26 21:53:18 2001
***************
*** 0 ****
--- 1,42 ----
+ # PSIM altivec vminub testcase
+ # mach: all
+ # as(powerpcle): -mlittle
+ # ld(powerpcle): -EL
+ 
+ 	.include "testutils.inc"
+ 
+ 	start
+ 
+ 	.data
+ 	.p2align 4
+ a_foo:	# v2
+ 	.byte  10, 11, 12, 13
+ 	.byte  14, 15, 16, 17
+ 	.byte  18, 19, 20, 21
+ 	.byte  22, 23, 24, 25
+ b_foo:	# v3
+ 	.byte  18, 19, 20, 21
+ 	.byte  22, 23, 24, 25
+ 	.byte  14, 15, 16, 17
+ 	.byte  10, 11, 12, 13
+ d_foo:	# v5
+ 	.byte  10, 11, 12, 13
+ 	.byte  14, 15, 16, 17
+ 	.byte  14, 15, 16, 17
+ 	.byte  10, 11, 12, 13
+ 
+ 	.text
+ 	.global vminub
+ vminub:
+ 	load_vr		%v2, %r3, a_foo
+ 	load_vr		%v3, %r3, b_foo
+ 	load_vr		%v5, %r3, d_foo
+ 
+ 	vminub		%v9, %v2, %v3
+ 	vcmpequb.	%v10, %v9, %v5
+ 	bc		12, 24, pass
+ fail:
+ 	fail
+ 
+ pass:
+ 	pass
Index: sim/testsuite/sim/ppc/vminuh.s
===================================================================
RCS file: vminuh.s
diff -N vminuh.s
*** /dev/null	Tue May  5 13:32:27 1998
--- vminuh.s	Wed Dec 26 21:53:18 2001
***************
*** 0 ****
--- 1,36 ----
+ # PSIM altivec vminuh testcase
+ # mach: all
+ # as(powerpcle): -mlittle
+ # ld(powerpcle): -EL
+ 
+ 	.include "testutils.inc"
+ 
+ 	start
+ 
+ 	.data
+ 	.p2align 4
+ a_foo:	# v2
+ 	.short     10,     11,     12,     13
+ 	.short  32014,  32015,  32016,  32017
+ b_foo:	# v3
+ 	.short  32018,  32019,  32020,  32021
+ 	.short     23,     22,     25,     24
+ d_foo:	# v5
+ 	.short     10,     11,     12,     13
+ 	.short     23,     22,     25,     24
+ 
+ 	.text
+ 	.global vminuh
+ vminuh:
+ 	load_vr		%v2, %r3, a_foo
+ 	load_vr		%v3, %r3, b_foo
+ 	load_vr		%v5, %r3, d_foo
+ 
+ 	vminuh		%v9, %v2, %v3
+ 	vcmpequh.	%v10, %v9, %v5
+ 	bc		12, 24, pass
+ fail:
+ 	fail
+ 
+ pass:
+ 	pass
Index: sim/testsuite/sim/ppc/vminuw.s
===================================================================
RCS file: vminuw.s
diff -N vminuw.s
*** /dev/null	Tue May  5 13:32:27 1998
--- vminuw.s	Wed Dec 26 21:53:18 2001
***************
*** 0 ****
--- 1,33 ----
+ # PSIM altivec vminuw testcase
+ # mach: all
+ # as(powerpcle): -mlittle
+ # ld(powerpcle): -EL
+ 
+ 	.include "testutils.inc"
+ 
+ 	start
+ 
+ 	.data
+ 	.p2align 4
+ a_foo:	# v2
+ 	.long     10,     11,     12,     13
+ b_foo:	# v3
+ 	.long  32018,  32019,  32020,  32021
+ d_foo:	# v5
+ 	.long     10,     11,     12,     13
+ 
+ 	.text
+ 	.global vminuw
+ vminuw:
+ 	load_vr		%v2, %r3, a_foo
+ 	load_vr		%v3, %r3, b_foo
+ 	load_vr		%v5, %r3, d_foo
+ 
+ 	vminuw		%v9, %v2, %v3
+ 	vcmpequw.	%v10, %v9, %v5
+ 	bc		12, 24, pass
+ fail:
+ 	fail
+ 
+ pass:
+ 	pass
Index: sim/testsuite/sim/ppc/vmladduhm.s
===================================================================
RCS file: vmladduhm.s
diff -N vmladduhm.s
*** /dev/null	Tue May  5 13:32:27 1998
--- vmladduhm.s	Wed Dec 26 21:53:18 2001
***************
*** 0 ****
--- 1,42 ----
+ # PSIM altivec vmladduhm testcase
+ # mach: all
+ # as(powerpcle): -mlittle
+ # ld(powerpcle): -EL
+ 
+ 	.include "testutils.inc"
+ 
+ 	start
+ 
+ 	.data
+ 	.p2align 4
+ 
+ a_foo:	# v2
+ 	.short   8000,  4000, 16384, 20000
+ 	.short  16024, 16025,    32,  8191
+ b_foo:	# v3
+ 	.short      3,     3,     2,     2
+ 	.short      1,     1,     4,     4
+ c_foo:	# v4
+ 	.short     20,    21,    22,    23
+ 	.short     24,    25,    26,    27
+ d_foo:	# v5
+ 	.short  24020, 12021, 32790, 40023
+ 	.short  16048, 16050,   154, 32791
+ 
+ 	.text
+ 	.global vmladduhm
+ vmladduhm:
+ 	load_vr		%v2, %r3, a_foo
+ 	load_vr		%v3, %r3, b_foo
+ 	load_vr		%v4, %r3, c_foo
+ 	load_vr		%v5, %r3, d_foo
+ 
+ 	vmladduhm	%v9, %v2, %v3, %v4
+ 	vcmpequh.	%v10, %v9, %v5
+ 	bc		12, 24, pass
+ 
+ fail:
+ 	fail
+ 
+ pass:
+ 	pass
Index: sim/testsuite/sim/ppc/vmrghb.le.s
===================================================================
RCS file: vmrghb.le.s
diff -N vmrghb.le.s
*** /dev/null	Tue May  5 13:32:27 1998
--- vmrghb.le.s	Wed Dec 26 21:53:18 2001
***************
*** 0 ****
--- 1,43 ----
+ # PSIM altivec vmrghb little endian testcase
+ # mach: powerpcle
+ # as(powerpcle): -mlittle
+ # ld(powerpcle): -EL
+ 
+ 	.include "testutils.inc"
+ 
+ 	start
+ 
+ 	.data
+ 	.p2align 4
+ a_foo:	# v2
+ 	.byte	0,  1,  2,  3
+ 	.byte	4,  5,  6,  7
+ 	.byte   8,  9,  10, 11
+ 	.byte   12, 13, 14, 15
+ b_foo:	# v3
+ 	.byte	16, 17, 18, 19
+ 	.byte	20, 21, 22, 23
+ 	.byte	24, 25, 26, 27
+ 	.byte	28, 29, 30, 31
+ d_foo:	# v5
+ 	.byte	24, 8, 25, 9
+ 	.byte	26, 10, 27, 11
+ 	.byte	28, 12, 29, 13
+ 	.byte	30, 14, 31, 15
+ 
+ 	.text
+ 	.global vmrghb
+ vmrghb:
+ 	load_vr		%v2, %r3, a_foo
+ 	load_vr		%v3, %r3, b_foo
+ 	load_vr		%v5, %r3, d_foo
+ 
+ 	vmrghb		%v9, %v2, %v3
+ 	vcmpequb.	%v10, %v9, %v5
+ 	bc		12, 24, pass
+ 
+ fail:
+ 	fail
+ 
+ pass:
+ 	pass
Index: sim/testsuite/sim/ppc/vmrghb.s
===================================================================
RCS file: vmrghb.s
diff -N vmrghb.s
*** /dev/null	Tue May  5 13:32:27 1998
--- vmrghb.s	Wed Dec 26 21:53:18 2001
***************
*** 0 ****
--- 1,43 ----
+ # PSIM altivec vmrghb testcase
+ # mach: powerpc
+ # as(powerpcle): -mlittle
+ # ld(powerpcle): -EL
+ 
+ 	.include "testutils.inc"
+ 
+ 	start
+ 
+ 	.data
+ 	.p2align 4
+ a_foo:	# v2
+ 	.byte	0,  1,  2,  3
+ 	.byte	4,  5,  6,  7
+ 	.byte   8,  9,  10, 11
+ 	.byte   12, 13, 14, 15
+ b_foo:	# v3
+ 	.byte	16, 17, 18, 19
+ 	.byte	20, 21, 22, 23
+ 	.byte	24, 25, 26, 27
+ 	.byte	28, 29, 30, 31
+ d_foo:	# v5
+ 	.byte   0,  16, 1,  17
+ 	.byte	2,  18, 3,  19
+ 	.byte   4,  20, 5,  21
+ 	.byte	6,  22, 7,  23
+ 
+ 	.text
+ 	.global vmrghb
+ vmrghb:
+ 	load_vr		%v2, %r3, a_foo
+ 	load_vr		%v3, %r3, b_foo
+ 	load_vr		%v5, %r3, d_foo
+ 
+ 	vmrghb		%v9, %v2, %v3
+ 	vcmpequb.	%v10, %v9, %v5
+ 	bc		12, 24, pass
+ 
+ fail:
+ 	fail
+ 
+ pass:
+ 	pass
Index: sim/testsuite/sim/ppc/vmrghh.s
===================================================================
RCS file: vmrghh.s
diff -N vmrghh.s
*** /dev/null	Tue May  5 13:32:27 1998
--- vmrghh.s	Wed Dec 26 21:53:18 2001
***************
*** 0 ****
--- 1,37 ----
+ # PSIM altivec vmrghh testcase
+ # mach: powerpc
+ # as(powerpcle): -mlittle
+ # ld(powerpcle): -EL
+ 
+ 	.include "testutils.inc"
+ 
+ 	start
+ 
+ 	.data
+ 	.p2align 4
+ a_foo:	# v2
+ 	.short	0,  1,  2,  3
+ 	.short	4,  5,  6,  7
+ b_foo:	# v3
+ 	.short	16, 17, 18, 19
+ 	.short	20, 21, 22, 23
+ d_foo:	# v5
+ 	.short  0,  16, 1,  17
+ 	.short	2,  18, 3,  19
+ 
+ 	.text
+ 	.global vmrghh
+ vmrghh:
+ 	load_vr		%v2, %r3, a_foo
+ 	load_vr		%v3, %r3, b_foo
+ 	load_vr		%v5, %r3, d_foo
+ 
+ 	vmrghh		%v9, %v2, %v3
+ 	vcmpequh.	%v10, %v9, %v5
+ 	bc		12, 24, pass
+ 
+ fail:
+ 	fail
+ 
+ pass:
+ 	pass
Index: sim/testsuite/sim/ppc/vmrghw.s
===================================================================
RCS file: vmrghw.s
diff -N vmrghw.s
*** /dev/null	Tue May  5 13:32:27 1998
--- vmrghw.s	Wed Dec 26 21:53:18 2001
***************
*** 0 ****
--- 1,34 ----
+ # PSIM altivec vmrghw testcase
+ # mach: powerpc
+ # as(powerpcle): -mlittle
+ # ld(powerpcle): -EL
+ 
+ 	.include "testutils.inc"
+ 
+ 	start
+ 
+ 	.data
+ 	.p2align 4
+ a_foo:	# v2
+ 	.long	0,  1,  2,  3
+ b_foo:	# v3
+ 	.long	16, 17, 18, 19
+ d_foo:	# v5
+ 	.long	0,  16, 1,  17
+ 
+ 	.text
+ 	.global vmrghw
+ vmrghw:
+ 	load_vr		%v2, %r3, a_foo
+ 	load_vr		%v3, %r3, b_foo
+ 	load_vr		%v5, %r3, d_foo
+ 
+ 	vmrghw		%v9, %v2, %v3
+ 	vcmpequw.	%v10, %v9, %v5
+ 	bc		12, 24, pass
+ 
+ fail:
+ 	fail
+ 
+ pass:
+ 	pass
Index: sim/testsuite/sim/ppc/vmrglb.s
===================================================================
RCS file: vmrglb.s
diff -N vmrglb.s
*** /dev/null	Tue May  5 13:32:27 1998
--- vmrglb.s	Wed Dec 26 21:53:18 2001
***************
*** 0 ****
--- 1,43 ----
+ # PSIM altivec vmrglb testcase
+ # mach: powerpc
+ # as(powerpcle): -mlittle
+ # ld(powerpcle): -EL
+ 
+ 	.include "testutils.inc"
+ 
+ 	start
+ 
+ 	.data
+ 	.p2align 4
+ a_foo:	# v2
+ 	.byte	0,  1,  2,  3
+ 	.byte	4,  5,  6,  7
+ 	.byte   8,  9,  10, 11
+ 	.byte   12, 13, 14, 15
+ b_foo:	# v3
+ 	.byte	16, 17, 18, 19
+ 	.byte	20, 21, 22, 23
+ 	.byte	24, 25, 26, 27
+ 	.byte	28, 29, 30, 31
+ d_foo:	# v5
+ 	.byte   8,  24, 9,  25
+ 	.byte	10, 26, 11, 27
+ 	.byte   12, 28, 13, 29
+ 	.byte	14, 30, 15, 31
+ 
+ 	.text
+ 	.global vmrglb
+ vmrglb:
+ 	load_vr		%v2, %r3, a_foo
+ 	load_vr		%v3, %r3, b_foo
+ 	load_vr		%v5, %r3, d_foo
+ 
+ 	vmrglb		%v9, %v2, %v3
+ 	vcmpequb.	%v10, %v9, %v5
+ 	bc		12, 24, pass
+ 
+ fail:
+ 	fail
+ 
+ pass:
+ 	pass
Index: sim/testsuite/sim/ppc/vmrglh.s
===================================================================
RCS file: vmrglh.s
diff -N vmrglh.s
*** /dev/null	Tue May  5 13:32:27 1998
--- vmrglh.s	Wed Dec 26 21:53:18 2001
***************
*** 0 ****
--- 1,37 ----
+ # PSIM altivec vmrglh testcase
+ # mach: powerpc
+ # as(powerpcle): -mlittle
+ # ld(powerpcle): -EL
+ 
+ 	.include "testutils.inc"
+ 
+ 	start
+ 
+ 	.data
+ 	.p2align 4
+ a_foo:	# v2
+ 	.short	0,  1,  2,  3
+ 	.short	4,  5,  6,  7
+ b_foo:	# v3
+ 	.short	16, 17, 18, 19
+ 	.short	20, 21, 22, 23
+ d_foo:	# v5
+ 	.short  4,  20, 5,  21
+ 	.short	6,  22, 7,  23
+ 
+ 	.text
+ 	.global vmrglh
+ vmrglh:
+ 	load_vr		%v2, %r3, a_foo
+ 	load_vr		%v3, %r3, b_foo
+ 	load_vr		%v5, %r3, d_foo
+ 
+ 	vmrglh		%v9, %v2, %v3
+ 	vcmpequh.	%v10, %v9, %v5
+ 	bc		12, 24, pass
+ 
+ fail:
+ 	fail
+ 
+ pass:
+ 	pass
Index: sim/testsuite/sim/ppc/vmrglw.s
===================================================================
RCS file: vmrglw.s
diff -N vmrglw.s
*** /dev/null	Tue May  5 13:32:27 1998
--- vmrglw.s	Wed Dec 26 21:53:18 2001
***************
*** 0 ****
--- 1,34 ----
+ # PSIM altivec vmrglw testcase
+ # mach: powerpc
+ # as(powerpcle): -mlittle
+ # ld(powerpcle): -EL
+ 
+ 	.include "testutils.inc"
+ 
+ 	start
+ 
+ 	.data
+ 	.p2align 4
+ a_foo:	# v2
+ 	.long	0,  1,  2,  3
+ b_foo:	# v3
+ 	.long	16, 17, 18, 19
+ d_foo:	# v5
+ 	.long	2,  18, 3,  19
+ 
+ 	.text
+ 	.global vmrglw
+ vmrglw:
+ 	load_vr		%v2, %r3, a_foo
+ 	load_vr		%v3, %r3, b_foo
+ 	load_vr		%v5, %r3, d_foo
+ 
+ 	vmrglw		%v9, %v2, %v3
+ 	vcmpequw.	%v10, %v9, %v5
+ 	bc		12, 24, pass
+ 
+ fail:
+ 	fail
+ 
+ pass:
+ 	pass
Index: sim/testsuite/sim/ppc/vmsummbm.s
===================================================================
RCS file: vmsummbm.s
diff -N vmsummbm.s
*** /dev/null	Tue May  5 13:32:27 1998
--- vmsummbm.s	Wed Dec 26 21:53:18 2001
***************
*** 0 ****
--- 1,43 ----
+ # PSIM altivec vmsummbm testcase
+ # mach: all
+ # as(powerpcle): -mlittle
+ # ld(powerpcle): -EL
+ 
+ 	.include "testutils.inc"
+ 
+ 	start
+ 
+ 	.data
+ 	.p2align 4
+ a_foo:	# v2
+ 	.byte	8, -9, 0, -1
+ 	.byte	4, -5, 6, -7
+ 	.byte	1, -1, 2, -3
+ 	.byte	2, -3, 4, -5
+ b_foo:	# v3
+ 	.byte	5, 3, 2, 3
+ 	.byte	7, 8, 6, 7
+ 	.byte	9, 1, 0, 1
+ 	.byte	5, 3, 4, 5
+ c_foo:	# v4
+ 	.long	10, 20, 30, 40
+ d_foo:	# v5 
+ 	.long	20, -5, 35, 32
+ 
+ 	.text
+ 	.global vmsummbm
+ vmsummbm:
+ 	load_vr		%v2, %r3, a_foo
+ 	load_vr		%v3, %r3, b_foo
+ 	load_vr		%v4, %r3, c_foo
+ 	load_vr		%v5, %r3, d_foo
+ 
+ 	vmsummbm	%v9, %v2, %v3, %v4
+ 	vcmpequw.	%v10, %v9, %v5
+ 	bc		12, 24, pass
+ 
+ fail:
+ 	fail
+ 
+ pass:
+ 	pass
Index: sim/testsuite/sim/ppc/vmsumshm.s
===================================================================
RCS file: vmsumshm.s
diff -N vmsumshm.s
*** /dev/null	Tue May  5 13:32:27 1998
--- vmsumshm.s	Wed Dec 26 21:53:18 2001
***************
*** 0 ****
--- 1,43 ----
+ # PSIM altivec vmsumshm testcase
+ # mach: all
+ # as(powerpcle): -mlittle
+ # ld(powerpcle): -EL
+ 
+ 	.include "testutils.inc"
+ 
+ 	start
+ 
+ 	.data
+ 	.p2align 4
+ a_foo:	# v2
+ 	.short	8, -9
+ 	.short	0, -1
+ 	.short	-4, -5
+ 	.short	6, -7
+ b_foo:	# v3
+ 	.short	5,  3
+ 	.short	2, -3
+ 	.short	-7, -8
+ 	.short	-6,  7
+ c_foo:	# v4
+ 	.long	10, 20, 30, 40
+ d_foo:	# v5
+ 	.long	23, 23, 98, -45
+ 
+ 	.text
+ 	.global vmsumshm
+ vmsumshm:
+ 	load_vr		%v2, %r3, a_foo
+ 	load_vr		%v3, %r3, b_foo
+ 	load_vr		%v4, %r3, c_foo
+ 	load_vr		%v5, %r3, d_foo
+ 
+ 	vmsumshm	%v9, %v2, %v3, %v4
+ 	vcmpequw.	%v10, %v9, %v5
+ 	bc		12, 24, pass
+ 
+ fail:
+ 	fail
+ 
+ pass:
+ 	pass
Index: sim/testsuite/sim/ppc/vmsumshs.s
===================================================================
RCS file: vmsumshs.s
diff -N vmsumshs.s
*** /dev/null	Tue May  5 13:32:27 1998
--- vmsumshs.s	Wed Dec 26 21:53:18 2001
***************
*** 0 ****
--- 1,43 ----
+ # PSIM altivec vmsumshs testcase
+ # mach: all
+ # as(powerpcle): -mlittle
+ # ld(powerpcle): -EL
+ 
+ 	.include "testutils.inc"
+ 
+ 	start
+ 
+ 	.data
+ 	.p2align 4
+ a_foo:	# v2
+ 	.short	8, -9
+ 	.short	0, -1
+ 	.short	-4, -5
+ 	.short	6, -7
+ b_foo:	# v3
+ 	.short	5,  3
+ 	.short	2, -3
+ 	.short	-7, -8
+ 	.short	-6,  7
+ c_foo:	# v4
+ 	.long	10, 20, 2147483640, -2147483640
+ d_foo:	# v5
+ 	.long	23, 23, 2147483647, -2147483648
+ 
+ 	.text
+ 	.global vmsumshs
+ vmsumshs:
+ 	load_vr		%v2, %r3, a_foo
+ 	load_vr		%v3, %r3, b_foo
+ 	load_vr		%v4, %r3, c_foo
+ 	load_vr		%v5, %r3, d_foo
+ 
+ 	vmsumshs	%v9, %v2, %v3, %v4
+ 	vcmpequw.	%v10, %v9, %v5
+ 	bc		12, 24, pass
+ 
+ fail:
+ 	fail
+ 
+ pass:
+ 	pass
Index: sim/testsuite/sim/ppc/vmulesb.s
===================================================================
RCS file: vmulesb.s
diff -N vmulesb.s
*** /dev/null	Tue May  5 13:32:27 1998
--- vmulesb.s	Wed Dec 26 21:53:18 2001
***************
*** 0 ****
--- 1,41 ----
+ # PSIM altivec vmulesb testcase
+ # mach: powerpc
+ # as(powerpcle): -mlittle
+ # ld(powerpcle): -EL
+ 
+ 	.include "testutils.inc"
+ 
+ 	start
+ 
+ 	.data
+ 	.p2align 4
+ a_foo:	# v2
+ 	.byte 10, -11, 12, -13
+ 	.byte 14, -15, 16, -17
+ 	.byte 18, -19, 20, -21
+ 	.byte 22, -23, 24, -25
+ b_foo:	# v3
+ 	.byte -10, 11, -12, 13
+ 	.byte -14, 15, -16, 17
+ 	.byte -18, 19, -20, 21
+ 	.byte -22, 23, -24, 25
+ d_foo:	# v5
+ 	.short -100, -144, -196, -256
+ 	.short -324, -400, -484, -576
+ 
+ 	.text
+ 	.global vmulesb
+ vmulesb:
+ 	load_vr		%v2, %r3, a_foo
+ 	load_vr		%v3, %r3, b_foo
+ 	load_vr		%v5, %r3, d_foo
+ 
+ 	vmulesb		%v9, %v2, %v3
+ 	vcmpequb.	%v10, %v9, %v5
+ 	bc		12, 24, pass
+ 
+ fail:
+ 	fail
+ 
+ pass:
+ 	pass
Index: sim/testsuite/sim/ppc/vmulesh.s
===================================================================
RCS file: vmulesh.s
diff -N vmulesh.s
*** /dev/null	Tue May  5 13:32:27 1998
--- vmulesh.s	Wed Dec 26 21:53:18 2001
***************
*** 0 ****
--- 1,36 ----
+ # PSIM altivec vmulesh testcase
+ # mach: powerpc
+ # as(powerpcle): -mlittle
+ # ld(powerpcle): -EL
+ 
+ 	.include "testutils.inc"
+ 
+ 	start
+ 
+ 	.data
+ 	.p2align 4
+ a_foo:	# v2
+ 	.short 10, -11, 12, -13
+ 	.short 14, -15, 16, -17
+ b_foo:	# v3
+ 	.short -10, 11, -12, 13
+ 	.short -14, 15, -16, 17
+ d_foo:	# v5
+ 	.long -100, -144, -196, -256
+ 
+ 	.text
+ 	.global vmulesh
+ vmulesh:
+ 	load_vr		%v2, %r3, a_foo
+ 	load_vr		%v3, %r3, b_foo
+ 	load_vr		%v5, %r3, d_foo
+ 
+ 	vmulesh		%v9, %v2, %v3
+ 	vcmpequh.	%v10, %v9, %v5
+ 	bc		12, 24, pass
+ 
+ fail:
+ 	fail
+ 
+ pass:
+ 	pass
Index: sim/testsuite/sim/ppc/vmuleub.s
===================================================================
RCS file: vmuleub.s
diff -N vmuleub.s
*** /dev/null	Tue May  5 13:32:27 1998
--- vmuleub.s	Wed Dec 26 21:53:18 2001
***************
*** 0 ****
--- 1,41 ----
+ # PSIM altivec vmuleub testcase
+ # mach: powerpc
+ # as(powerpcle): -mlittle
+ # ld(powerpcle): -EL
+ 
+ 	.include "testutils.inc"
+ 
+ 	start
+ 
+ 	.data
+ 	.p2align 4
+ a_foo:	# v2
+ 	.byte 10, 11, 12, 13
+ 	.byte 14, 15, 16, 17
+ 	.byte 18, 19, 20, 21
+ 	.byte 22, 23, 24, 25
+ b_foo:	# v3
+ 	.byte 10, 11, 12, 13
+ 	.byte 14, 15, 16, 17
+ 	.byte 18, 19, 20, 21
+ 	.byte 22, 23, 24, 25
+ d_foo:	# v5
+ 	.short 100, 144, 196, 256
+ 	.short 324, 400, 484, 576
+ 
+ 	.text
+ 	.global vmuleub
+ vmuleub:
+ 	load_vr		%v2, %r3, a_foo
+ 	load_vr		%v3, %r3, b_foo
+ 	load_vr		%v5, %r3, d_foo
+ 
+ 	vmuleub		%v9, %v2, %v3
+ 	vcmpequb.	%v10, %v9, %v5
+ 	bc		12, 24, pass
+ 
+ fail:
+ 	fail
+ 
+ pass:
+ 	pass
Index: sim/testsuite/sim/ppc/vmuleuh.s
===================================================================
RCS file: vmuleuh.s
diff -N vmuleuh.s
*** /dev/null	Tue May  5 13:32:27 1998
--- vmuleuh.s	Wed Dec 26 21:53:18 2001
***************
*** 0 ****
--- 1,36 ----
+ # PSIM altivec vmuleuh testcase
+ # mach: powerpc
+ # as(powerpcle): -mlittle
+ # ld(powerpcle): -EL
+ 
+ 	.include "testutils.inc"
+ 
+ 	start
+ 
+ 	.data
+ 	.p2align 4
+ a_foo:	# v2
+ 	.short 10, 11, 12, 13
+ 	.short 14, 15, 16, 17
+ b_foo:	# v3
+ 	.short 10, 11, 12, 13
+ 	.short 14, 15, 16, 17
+ d_foo:	# v5
+ 	.long 100, 144, 196, 256
+ 
+ 	.text
+ 	.global vmuleuh
+ vmuleuh:
+ 	load_vr		%v2, %r3, a_foo
+ 	load_vr		%v3, %r3, b_foo
+ 	load_vr		%v5, %r3, d_foo
+ 
+ 	vmuleuh		%v9, %v2, %v3
+ 	vcmpequh.	%v10, %v9, %v5
+ 	bc		12, 24, pass
+ 
+ fail:
+ 	fail
+ 
+ pass:
+ 	pass
Index: sim/testsuite/sim/ppc/vmulosb.s
===================================================================
RCS file: vmulosb.s
diff -N vmulosb.s
*** /dev/null	Tue May  5 13:32:27 1998
--- vmulosb.s	Wed Dec 26 21:53:18 2001
***************
*** 0 ****
--- 1,41 ----
+ # PSIM altivec vmulosb testcase
+ # mach: powerpc
+ # as(powerpcle): -mlittle
+ # ld(powerpcle): -EL
+ 
+ 	.include "testutils.inc"
+ 
+ 	start
+ 
+ 	.data
+ 	.p2align 4
+ a_foo:	# v2
+ 	.byte 10, -11, 12, -13
+ 	.byte 14, -15, 16, -17
+ 	.byte 18, -19, 20, -21
+ 	.byte 22, -23, 24, -25
+ b_foo:	# v3
+ 	.byte -10, 11, -12, 13
+ 	.byte -14, 15, -16, 17
+ 	.byte -18, 19, -20, 21
+ 	.byte -22, 23, -24, 25
+ d_foo:	# v5
+ 	.short -121, -169, -225, -289
+ 	.short -361, -441, -529, -625
+ 
+ 	.text
+ 	.global vmulosb
+ vmulosb:
+ 	load_vr		%v2, %r3, a_foo
+ 	load_vr		%v3, %r3, b_foo
+ 	load_vr		%v5, %r3, d_foo
+ 
+ 	vmulosb		%v9, %v2, %v3
+ 	vcmpequb.	%v10, %v9, %v5
+ 	bc		12, 24, pass
+ 
+ fail:
+ 	fail
+ 
+ pass:
+ 	pass
Index: sim/testsuite/sim/ppc/vmulosh.s
===================================================================
RCS file: vmulosh.s
diff -N vmulosh.s
*** /dev/null	Tue May  5 13:32:27 1998
--- vmulosh.s	Wed Dec 26 21:53:18 2001
***************
*** 0 ****
--- 1,36 ----
+ # PSIM altivec vmulosh testcase
+ # mach: powerpc
+ # as(powerpcle): -mlittle
+ # ld(powerpcle): -EL
+ 
+ 	.include "testutils.inc"
+ 
+ 	start
+ 
+ 	.data
+ 	.p2align 4
+ a_foo:	# v2
+ 	.short 10, -11, 12, -13
+ 	.short 14, -15, 16, -17
+ b_foo:	# v3
+ 	.short -10, 11, -12, 13
+ 	.short -14, 15, -16, 17
+ d_foo:	# v5
+ 	.long -121, -169, -225, -289
+ 
+ 	.text
+ 	.global vmulosh
+ vmulosh:
+ 	load_vr		%v2, %r3, a_foo
+ 	load_vr		%v3, %r3, b_foo
+ 	load_vr		%v5, %r3, d_foo
+ 
+ 	vmulosh		%v9, %v2, %v3
+ 	vcmpequh.	%v10, %v9, %v5
+ 	bc		12, 24, pass
+ 
+ fail:
+ 	fail
+ 
+ pass:
+ 	pass
Index: sim/testsuite/sim/ppc/vmuloub.s
===================================================================
RCS file: vmuloub.s
diff -N vmuloub.s
*** /dev/null	Tue May  5 13:32:27 1998
--- vmuloub.s	Wed Dec 26 21:53:18 2001
***************
*** 0 ****
--- 1,41 ----
+ # PSIM altivec vmuloub testcase
+ # mach: powerpc
+ # as(powerpcle): -mlittle
+ # ld(powerpcle): -EL
+ 
+ 	.include "testutils.inc"
+ 
+ 	start
+ 
+ 	.data
+ 	.p2align 4
+ a_foo:	# v2
+ 	.byte 10, 11, 12, 13
+ 	.byte 14, 15, 16, 17
+ 	.byte 18, 19, 20, 21
+ 	.byte 22, 23, 24, 25
+ b_foo:	# v3
+ 	.byte 10, 11, 12, 13
+ 	.byte 14, 15, 16, 17
+ 	.byte 18, 19, 20, 21
+ 	.byte 22, 23, 24, 25
+ d_foo:	# v5
+ 	.short 121, 169, 225, 289
+ 	.short 361, 441, 529, 625
+ 
+ 	.text
+ 	.global vmuloub
+ vmuloub:
+ 	load_vr		%v2, %r3, a_foo
+ 	load_vr		%v3, %r3, b_foo
+ 	load_vr		%v5, %r3, d_foo
+ 
+ 	vmuloub		%v9, %v2, %v3
+ 	vcmpequb.	%v10, %v9, %v5
+ 	bc		12, 24, pass
+ 
+ fail:
+ 	fail
+ 
+ pass:
+ 	pass
Index: sim/testsuite/sim/ppc/vmulouh.s
===================================================================
RCS file: vmulouh.s
diff -N vmulouh.s
*** /dev/null	Tue May  5 13:32:27 1998
--- vmulouh.s	Wed Dec 26 21:53:18 2001
***************
*** 0 ****
--- 1,36 ----
+ # PSIM altivec vmulouh testcase
+ # mach: powerpc
+ # as(powerpcle): -mlittle
+ # ld(powerpcle): -EL
+ 
+ 	.include "testutils.inc"
+ 
+ 	start
+ 
+ 	.data
+ 	.p2align 4
+ a_foo:	# v2
+ 	.short 10, 11, 12, 13
+ 	.short 14, 15, 16, 17
+ b_foo:	# v3
+ 	.short 10, 11, 12, 13
+ 	.short 14, 15, 16, 17
+ d_foo:	# v5
+ 	.long 121, 169, 225, 289
+ 
+ 	.text
+ 	.global vmulouh
+ vmulouh:
+ 	load_vr		%v2, %r3, a_foo
+ 	load_vr		%v3, %r3, b_foo
+ 	load_vr		%v5, %r3, d_foo
+ 
+ 	vmulouh		%v9, %v2, %v3
+ 	vcmpequh.	%v10, %v9, %v5
+ 	bc		12, 24, pass
+ 
+ fail:
+ 	fail
+ 
+ pass:
+ 	pass
Index: sim/testsuite/sim/ppc/vor.s
===================================================================
RCS file: vor.s
diff -N vor.s
*** /dev/null	Tue May  5 13:32:27 1998
--- vor.s	Wed Dec 26 21:53:18 2001
***************
*** 0 ****
--- 1,50 ----
+ # PSIM altivec vor/vnor/vxor testcase
+ # mach: all
+ # as(powerpcle): -mlittle
+ # ld(powerpcle): -EL
+ 
+ 	.include "testutils.inc"
+ 
+ 	start
+ 
+ 	.data
+ 	.p2align 4
+ 
+ a_foo:	# v2
+ 	.long 0xf08f1248, 0x12345678, 0x9abcdef0, 0xffff0000
+ b_foo:	# v3
+ 	.long 0x00378421, 0x87654321, 0x0fedcba9, 0x0000ffff
+ dor_foo:	# v5 (or)
+ 	.long 0xf0bf9669, 0x97755779, 0x9ffddff9, 0xffffffff
+ dnor_foo:	# v6 (nor)
+ 	.long 0x0f406996, 0x688aa886, 0x60022006, 0
+ dxor_foo:	# v7 (xor)
+ 	.long 0xf0b89669, 0x95511559, 0x95511559, 0xffffffff
+ 
+ 	.text
+ 	.global vor
+ vor:
+ 	load_vr		%v2, %r3, a_foo
+ 	load_vr		%v3, %r3, b_foo
+ 	load_vr		%v5, %r3, dor_foo
+ 	load_vr		%v6, %r3, dnor_foo
+ 	load_vr		%v7, %r3, dxor_foo
+ 
+ 	vor		%v9, %v2, %v3
+ 	vcmpequw.	%v10, %v9, %v5
+ 	bc		4, 24, fail
+ 
+ 	vnor		%v9, %v2, %v3
+ 	vcmpequw.	%v10, %v9, %v6
+ 	bc		4, 24, fail
+ 
+ 	vxor		%v9, %v2, %v3
+ 	vcmpequw.	%v10, %v9, %v7
+ 	bc		4, 24, fail
+ 
+ 	b		pass
+ fail:
+ 	fail
+ 
+ pass:
+ 	pass
Index: sim/testsuite/sim/ppc/vperm.s
===================================================================
RCS file: vperm.s
diff -N vperm.s
*** /dev/null	Tue May  5 13:32:27 1998
--- vperm.s	Wed Dec 26 21:53:18 2001
***************
*** 0 ****
--- 1,39 ----
+ # PSIM altivec vperm testcase
+ # mach: powerpc
+ # as(powerpcle): -mlittle
+ # ld(powerpcle): -EL
+ 
+ 	.include "testutils.inc"
+ 
+ 	start
+ 
+ 	.data
+ 	.p2align 4
+ 
+ 	# example from AltiVec PEM
+ a_foo:	# v2
+ 	.byte 0x0,  0x1,  0x2,  0x3,  0x4,  0x5,  0x6,  0x7
+ 	.byte 0x8,  0x9,  0xa,  0xb,  0xc,  0xd,  0xe,  0xf
+ b_foo:	# v3
+ 	.byte 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17
+ 	.byte 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f
+ c_foo:	# v4
+ 	.byte 0x1,  0x14, 0x18, 0x10, 0x16, 0x15, 0x19, 0x1a
+ 	.byte 0x1c, 0x1c, 0x1c, 0x13, 0x8,  0x1d, 0x1e, 0x0e
+ 
+ 	.text
+ 	.global vperm
+ vperm:
+ 	load_vr		%v2, %r3, a_foo
+ 	load_vr		%v3, %r3, b_foo
+ 	load_vr		%v4, %r3, c_foo
+ 
+ 	vperm		%v9, %v2, %v3, %v4
+ 	vcmpequw.	%v10, %v9, %v4
+ 	bc		12, 24, pass
+ 
+ fail:
+ 	fail
+ 
+ pass:
+ 	pass
Index: sim/testsuite/sim/ppc/vpkpx.s
===================================================================
RCS file: vpkpx.s
diff -N vpkpx.s
*** /dev/null	Tue May  5 13:32:27 1998
--- vpkpx.s	Wed Dec 26 21:53:18 2001
***************
*** 0 ****
--- 1,42 ----
+ # PSIM altivec vpkpx testcase
+ # mach: powerpc
+ # as(powerpcle): -mlittle
+ # ld(powerpcle): -EL
+ 
+ 	.include "testutils.inc"
+ 
+ 	start
+ 
+ 	.data
+ 	.p2align 4
+ 
+ a_foo:	# v2
+ 	.byte 1, 16, 24, 32
+ 	.byte 0, 40, 48, 56+1
+ 	.byte 1, 64, 72, 80+2
+ 	.byte 0, 88+3, 96, 104
+ b_foo:	# v3
+ 	.byte 11, 112, 120, 128+1
+ 	.byte 12, 136+3, 144, 152
+ 	.byte 13, 160, 168, 176
+ 	.byte 14, 184, 192+2, 200
+ d_foo:	# v5
+ 	.short 34916, 5319, 41258, 11661
+ 	.short 47600, 18003, 53942, 24345
+ 
+ 	.text
+ 	.global vpkpx
+ vpkpx:
+ 	load_vr		%v2, %r3, a_foo
+ 	load_vr		%v3, %r3, b_foo
+ 	load_vr		%v5, %r3, d_foo
+ 
+ 	vpkpx		%v9, %v2, %v3
+ 	vcmpequh.	%v10, %v9, %v5
+ 	bc		12, 24, pass
+ 
+ fail:
+ 	fail
+ 
+ pass:
+ 	pass
Index: sim/testsuite/sim/ppc/vpkshss.s
===================================================================
RCS file: vpkshss.s
diff -N vpkshss.s
*** /dev/null	Tue May  5 13:32:27 1998
--- vpkshss.s	Wed Dec 26 21:53:18 2001
***************
*** 0 ****
--- 1,40 ----
+ # PSIM altivec vpkshss testcase
+ # mach: powerpc
+ # as(powerpcle): -mlittle
+ # ld(powerpcle): -EL
+ 
+ 	.include "testutils.inc"
+ 
+ 	start
+ 
+ 	.data
+ 	.p2align 4
+ 
+ a_foo:	# v2
+ 	.short 1, 2, 3, 4
+ 	.short 5, 6, 7, 8
+ b_foo:	# v3
+ 	.short 126, 127, 128, 129
+ 	.short -129, -128, -127, -126
+ d_foo:	# v5
+ 	.byte 1, 2, 3, 4
+ 	.byte 5, 6, 7, 8
+ 	.byte 126, 127, 127, 127
+ 	.byte -128, -128, -127, -126
+ 
+ 	.text
+ 	.global vpkshss
+ vpkshss:
+ 	load_vr		%v2, %r3, a_foo
+ 	load_vr		%v3, %r3, b_foo
+ 	load_vr		%v5, %r3, d_foo
+ 
+ 	vpkshss		%v9, %v2, %v3
+ 	vcmpequh.	%v10, %v9, %v5
+ 	bc		12, 24, pass
+ 
+ fail:
+ 	fail
+ 
+ pass:
+ 	pass
Index: sim/testsuite/sim/ppc/vpkshus.s
===================================================================
RCS file: vpkshus.s
diff -N vpkshus.s
*** /dev/null	Tue May  5 13:32:27 1998
--- vpkshus.s	Wed Dec 26 21:53:18 2001
***************
*** 0 ****
--- 1,40 ----
+ # PSIM altivec vpkshus testcase
+ # mach: powerpc
+ # as(powerpcle): -mlittle
+ # ld(powerpcle): -EL
+ 
+ 	.include "testutils.inc"
+ 
+ 	start
+ 
+ 	.data
+ 	.p2align 4
+ 
+ a_foo:	# v2
+ 	.short 1, 2, 3, 4
+ 	.short 5, 6, 7, 8
+ b_foo:	# v3
+ 	.short 254, 255, 256, 257
+ 	.short 129, -128, 127, -126
+ d_foo:	# v5
+ 	.byte 1, 2, 3, 4
+ 	.byte 5, 6, 7, 8
+ 	.byte 254, 255, 255, 255
+ 	.byte 129, 0, 127, 0
+ 
+ 	.text
+ 	.global vpkshus
+ vpkshus:
+ 	load_vr		%v2, %r3, a_foo
+ 	load_vr		%v3, %r3, b_foo
+ 	load_vr		%v5, %r3, d_foo
+ 
+ 	vpkshus		%v9, %v2, %v3
+ 	vcmpequh.	%v10, %v9, %v5
+ 	bc		12, 24, pass
+ 
+ fail:
+ 	fail
+ 
+ pass:
+ 	pass
Index: sim/testsuite/sim/ppc/vpkswss.s
===================================================================
RCS file: vpkswss.s
diff -N vpkswss.s
*** /dev/null	Tue May  5 13:32:27 1998
--- vpkswss.s	Wed Dec 26 21:53:18 2001
***************
*** 0 ****
--- 1,36 ----
+ # PSIM altivec vpkswss testcase
+ # mach: powerpc
+ # as(powerpcle): -mlittle
+ # ld(powerpcle): -EL
+ 
+ 	.include "testutils.inc"
+ 
+ 	start
+ 
+ 	.data
+ 	.p2align 4
+ 
+ a_foo:	# v2
+ 	.long 1, 2, 3, 4
+ b_foo:	# v3
+ 	.long 32767, 32768, -32768, -32769
+ d_foo:	# v5
+ 	.short 1, 2, 3, 4
+ 	.short 32767, 32767, -32768, -32768
+ 
+ 	.text
+ 	.global vpkswss
+ vpkswss:
+ 	load_vr		%v2, %r3, a_foo
+ 	load_vr		%v3, %r3, b_foo
+ 	load_vr		%v5, %r3, d_foo
+ 
+ 	vpkswss		%v9, %v2, %v3
+ 	vcmpequh.	%v10, %v9, %v5
+ 	bc		12, 24, pass
+ 
+ fail:
+ 	fail
+ 
+ pass:
+ 	pass
Index: sim/testsuite/sim/ppc/vpkswus.s
===================================================================
RCS file: vpkswus.s
diff -N vpkswus.s
*** /dev/null	Tue May  5 13:32:27 1998
--- vpkswus.s	Wed Dec 26 21:53:18 2001
***************
*** 0 ****
--- 1,36 ----
+ # PSIM altivec vpkswus testcase
+ # mach: powerpc
+ # as(powerpcle): -mlittle
+ # ld(powerpcle): -EL
+ 
+ 	.include "testutils.inc"
+ 
+ 	start
+ 
+ 	.data
+ 	.p2align 4
+ 
+ a_foo:	# v2
+ 	.long 1, 2, 3, 4
+ b_foo:	# v3
+ 	.long 32767, 32768, 65537, 32769
+ d_foo:	# v5
+ 	.short 1, 2, 3, 4
+ 	.short 32767, 32768, 65535, 32769
+ 
+ 	.text
+ 	.global vpkswus
+ vpkswus:
+ 	load_vr		%v2, %r3, a_foo
+ 	load_vr		%v3, %r3, b_foo
+ 	load_vr		%v5, %r3, d_foo
+ 
+ 	vpkswus		%v9, %v2, %v3
+ 	vcmpequh.	%v10, %v9, %v5
+ 	bc		12, 24, pass
+ 
+ fail:
+ 	fail
+ 
+ pass:
+ 	pass
Index: sim/testsuite/sim/ppc/vpkuhum.s
===================================================================
RCS file: vpkuhum.s
diff -N vpkuhum.s
*** /dev/null	Tue May  5 13:32:27 1998
--- vpkuhum.s	Wed Dec 26 21:53:18 2001
***************
*** 0 ****
--- 1,40 ----
+ # PSIM altivec vpkuhum testcase
+ # mach: powerpc
+ # as(powerpcle): -mlittle
+ # ld(powerpcle): -EL
+ 
+ 	.include "testutils.inc"
+ 
+ 	start
+ 
+ 	.data
+ 	.p2align 4
+ 
+ a_foo:	# v2
+ 	.short 1, 2, 3, 4
+ 	.short 5, 6, 7, 8
+ b_foo:	# v3
+ 	.short 254, 255, 256, 257
+ 	.short 258, 259, 260, 261
+ d_foo:	# v5
+ 	.byte 1, 2, 3, 4
+ 	.byte 5, 6, 7, 8
+ 	.byte 254, 255, 0, 1
+ 	.byte 2, 3, 4, 5
+ 
+ 	.text
+ 	.global vpkuhum
+ vpkuhum:
+ 	load_vr		%v2, %r3, a_foo
+ 	load_vr		%v3, %r3, b_foo
+ 	load_vr		%v5, %r3, d_foo
+ 
+ 	vpkuhum		%v9, %v2, %v3
+ 	vcmpequh.	%v10, %v9, %v5
+ 	bc		12, 24, pass
+ 
+ fail:
+ 	fail
+ 
+ pass:
+ 	pass
Index: sim/testsuite/sim/ppc/vpkuhus.s
===================================================================
RCS file: vpkuhus.s
diff -N vpkuhus.s
*** /dev/null	Tue May  5 13:32:27 1998
--- vpkuhus.s	Wed Dec 26 21:53:19 2001
***************
*** 0 ****
--- 1,40 ----
+ # PSIM altivec vpkuhus testcase
+ # mach: powerpc
+ # as(powerpcle): -mlittle
+ # ld(powerpcle): -EL
+ 
+ 	.include "testutils.inc"
+ 
+ 	start
+ 
+ 	.data
+ 	.p2align 4
+ 
+ a_foo:	# v2
+ 	.short 1, 2, 3, 4
+ 	.short 5, 6, 7, 8
+ b_foo:	# v3
+ 	.short 254, 255, 256, 257
+ 	.short 258, 259, 260, 261
+ d_foo:	# v5
+ 	.byte 1, 2, 3, 4
+ 	.byte 5, 6, 7, 8
+ 	.byte 254, 255, 255, 255
+ 	.byte 255, 255, 255, 255
+ 
+ 	.text
+ 	.global vpkuhus
+ vpkuhus:
+ 	load_vr		%v2, %r3, a_foo
+ 	load_vr		%v3, %r3, b_foo
+ 	load_vr		%v5, %r3, d_foo
+ 
+ 	vpkuhus		%v9, %v2, %v3
+ 	vcmpequh.	%v10, %v9, %v5
+ 	bc		12, 24, pass
+ 
+ fail:
+ 	fail
+ 
+ pass:
+ 	pass
Index: sim/testsuite/sim/ppc/vpkuwum.s
===================================================================
RCS file: vpkuwum.s
diff -N vpkuwum.s
*** /dev/null	Tue May  5 13:32:27 1998
--- vpkuwum.s	Wed Dec 26 21:53:19 2001
***************
*** 0 ****
--- 1,36 ----
+ # PSIM altivec vpkuwum testcase
+ # mach: powerpc
+ # as(powerpcle): -mlittle
+ # ld(powerpcle): -EL
+ 
+ 	.include "testutils.inc"
+ 
+ 	start
+ 
+ 	.data
+ 	.p2align 4
+ 
+ a_foo:	# v2
+ 	.long 1, 2, 3, 4
+ b_foo:	# v3
+ 	.long 65534, 65535, 65536, 65537
+ d_foo:	# v5
+ 	.short 1, 2, 3, 4
+ 	.short 65534, 65535, 0, 1
+ 
+ 	.text
+ 	.global vpkuwum
+ vpkuwum:
+ 	load_vr		%v2, %r3, a_foo
+ 	load_vr		%v3, %r3, b_foo
+ 	load_vr		%v5, %r3, d_foo
+ 
+ 	vpkuwum		%v9, %v2, %v3
+ 	vcmpequh.	%v10, %v9, %v5
+ 	bc		12, 24, pass
+ 
+ fail:
+ 	fail
+ 
+ pass:
+ 	pass
Index: sim/testsuite/sim/ppc/vpkuwus.s
===================================================================
RCS file: vpkuwus.s
diff -N vpkuwus.s
*** /dev/null	Tue May  5 13:32:27 1998
--- vpkuwus.s	Wed Dec 26 21:53:19 2001
***************
*** 0 ****
--- 1,36 ----
+ # PSIM altivec vpkuwus testcase
+ # mach: powerpc
+ # as(powerpcle): -mlittle
+ # ld(powerpcle): -EL
+ 
+ 	.include "testutils.inc"
+ 
+ 	start
+ 
+ 	.data
+ 	.p2align 4
+ 
+ a_foo:	# v2
+ 	.long 1, 2, 3, 4
+ b_foo:	# v3
+ 	.long 65534, 65535, 65536, 65537
+ d_foo:	# v5
+ 	.short 1, 2, 3, 4
+ 	.short 65534, 65535, 65535, 65535
+ 
+ 	.text
+ 	.global vpkuwus
+ vpkuwus:
+ 	load_vr		%v2, %r3, a_foo
+ 	load_vr		%v3, %r3, b_foo
+ 	load_vr		%v5, %r3, d_foo
+ 
+ 	vpkuwus		%v9, %v2, %v3
+ 	vcmpequh.	%v10, %v9, %v5
+ 	bc		12, 24, pass
+ 
+ fail:
+ 	fail
+ 
+ pass:
+ 	pass
Index: sim/testsuite/sim/ppc/vrlb.s
===================================================================
RCS file: vrlb.s
diff -N vrlb.s
*** /dev/null	Tue May  5 13:32:27 1998
--- vrlb.s	Wed Dec 26 21:53:19 2001
***************
*** 0 ****
--- 1,42 ----
+ # PSIM altivec vrlb testcase
+ # mach: all
+ # as(powerpcle): -mlittle
+ # ld(powerpcle): -EL
+ 
+ 	.include "testutils.inc"
+ 
+ 	start
+ 
+ 	.data
+ 	.p2align 4
+ a_foo:	# v2
+ 	.byte 2, 6, 4, 8
+ 	.byte 1, 5, 3, 7
+ 	.byte 1, 5, 3, 7
+ 	.byte 2, 6, 4, 8
+ b_foo:	# v3
+ 	.byte 1, 2, 3, 4
+ 	.byte 5, 6, 7, 8
+ 	.byte 1, 2, 3, 4
+ 	.byte 5, 6, 7, 8
+ d_foo:	# v5
+ 	.byte 4, 24, 32, 128
+ 	.byte 32, 65, 129, 7
+ 	.byte 2, 20, 24, 112
+ 	.byte 64, 129, 2, 8
+ 
+ 	.text
+ 	.global vrlb
+ vrlb:
+ 	load_vr		%v2, %r3, a_foo
+ 	load_vr		%v3, %r3, b_foo
+ 	load_vr		%v5, %r3, d_foo
+ 
+ 	vrlb		%v9, %v2, %v3
+ 	vcmpequb.	%v10, %v9, %v5
+ 	bc		12, 24, pass
+ fail:
+ 	fail
+ 
+ pass:
+ 	pass
Index: sim/testsuite/sim/ppc/vrlh.s
===================================================================
RCS file: vrlh.s
diff -N vrlh.s
*** /dev/null	Tue May  5 13:32:27 1998
--- vrlh.s	Wed Dec 26 21:53:19 2001
***************
*** 0 ****
--- 1,36 ----
+ # PSIM altivec vrlh testcase
+ # mach: all
+ # as(powerpcle): -mlittle
+ # ld(powerpcle): -EL
+ 
+ 	.include "testutils.inc"
+ 
+ 	start
+ 
+ 	.data
+ 	.p2align 4
+ a_foo:	# v2
+ 	.short 2, 6, 10, 12
+ 	.short 1, 5, 9, 13
+ b_foo:	# v3
+ 	.short 2, 4, 6, 8
+ 	.short 3, 7, 11, 15
+ d_foo:	# v5
+ 	.short 8, 96, 640, 3072
+ 	.short 8, 640, 18432, 32774
+ 
+ 	.text
+ 	.global vrlh
+ vrlh:
+ 	load_vr		%v2, %r3, a_foo
+ 	load_vr		%v3, %r3, b_foo
+ 	load_vr		%v5, %r3, d_foo
+ 
+ 	vrlh		%v9, %v2, %v3
+ 	vcmpequb.	%v10, %v9, %v5
+ 	bc		12, 24, pass
+ fail:
+ 	fail
+ 
+ pass:
+ 	pass
Index: sim/testsuite/sim/ppc/vrlw.s
===================================================================
RCS file: vrlw.s
diff -N vrlw.s
*** /dev/null	Tue May  5 13:32:27 1998
--- vrlw.s	Wed Dec 26 21:53:19 2001
***************
*** 0 ****
--- 1,33 ----
+ # PSIM altivec vrlw testcase
+ # mach: all
+ # as(powerpcle): -mlittle
+ # ld(powerpcle): -EL
+ 
+ 	.include "testutils.inc"
+ 
+ 	start
+ 
+ 	.data
+ 	.p2align 4
+ a_foo:	# v2
+ 	.long 23456, 7654321, 97531, 2468
+ b_foo:	# v3
+ 	.long 30, 27, 4, 2
+ d_foo:	# v5
+ 	.long 5864, -2013026723, 1560496, 9872
+ 
+ 	.text
+ 	.global vrlw
+ vrlw:
+ 	load_vr		%v2, %r3, a_foo
+ 	load_vr		%v3, %r3, b_foo
+ 	load_vr		%v5, %r3, d_foo
+ 
+ 	vrlw		%v9, %v2, %v3
+ 	vcmpequb.	%v10, %v9, %v5
+ 	bc		12, 24, pass
+ fail:
+ 	fail
+ 
+ pass:
+ 	pass
Index: sim/testsuite/sim/ppc/vsel.s
===================================================================
RCS file: vsel.s
diff -N vsel.s
*** /dev/null	Tue May  5 13:32:27 1998
--- vsel.s	Wed Dec 26 21:53:19 2001
***************
*** 0 ****
--- 1,37 ----
+ # PSIM altivec vsel testcase
+ # mach: all
+ # as(powerpcle): -mlittle
+ # ld(powerpcle): -EL
+ 
+ 	.include "testutils.inc"
+ 
+ 	start
+ 
+ 	.data
+ 	.p2align 4
+ 
+ a_foo:	# v2
+ 	.long 0xf08f1248, 0x12345678, 0x9abcdef0, 0xffff0000
+ b_foo:	# v3
+ 	.long 0x00378421, 0x87654321, 0x0fedcba9, 0x0000ffff
+ c_foo:	# v4
+ 	.long 0xfedcba98, 0x76543210, 0x01234567, 0x89abcdef
+ d_foo:	# v5
+ 	.long 0x00178040, 0x06644668, 0x9bbddbb1, 0x7654cdef
+ 
+ 	.text
+ 	.global vsel
+ vsel:
+ 	load_vr		%v2, %r3, a_foo
+ 	load_vr		%v3, %r3, b_foo
+ 	load_vr		%v4, %r3, c_foo
+ 	load_vr		%v5, %r3, d_foo
+ 
+ 	vsel		%v9, %v2, %v3, %v4
+ 	vcmpequw.	%v10, %v9, %v5
+ 	bc		12, 24, pass
+ 
+ 	fail
+ 
+ pass:
+ 	pass
Index: sim/testsuite/sim/ppc/vsl.s
===================================================================
RCS file: vsl.s
diff -N vsl.s
*** /dev/null	Tue May  5 13:32:27 1998
--- vsl.s	Wed Dec 26 21:53:19 2001
***************
*** 0 ****
--- 1,34 ----
+ # PSIM altivec vsl testcase
+ # mach: powerpc
+ # as(powerpcle): -mlittle
+ # ld(powerpcle): -EL
+ 
+ 	.include "testutils.inc"
+ 
+ 	start
+ 
+ 	.data
+ 	.p2align 4
+ 
+ a_foo:	# v2
+ 	.long 0xf08f1248, 0x12345678, 0x9abcdef0, 0xffff0000
+ b_foo:	# v3
+ 	.long 0x04040404, 0x04040404, 0x04040404, 0x04040404
+ d_foo:	# v5
+ 	.long 0x08f12481, 0x23456789, 0xabcdef0f, 0xfff00000
+ 
+ 	.text
+ 	.global vsl
+ vsl:
+ 	load_vr		%v2, %r3, a_foo
+ 	load_vr		%v3, %r3, b_foo
+ 	load_vr		%v5, %r3, d_foo
+ 
+ 	vsl		%v9, %v2, %v3
+ 	vcmpequw.	%v10, %v9, %v5
+ 	bc		12, 24, pass
+ 
+ 	fail
+ 
+ pass:
+ 	pass
Index: sim/testsuite/sim/ppc/vslb.s
===================================================================
RCS file: vslb.s
diff -N vslb.s
*** /dev/null	Tue May  5 13:32:27 1998
--- vslb.s	Wed Dec 26 21:53:19 2001
***************
*** 0 ****
--- 1,34 ----
+ # PSIM altivec vslb testcase
+ # mach: all
+ # as(powerpcle): -mlittle
+ # ld(powerpcle): -EL
+ 
+ 	.include "testutils.inc"
+ 
+ 	start
+ 
+ 	.data
+ 	.p2align 4
+ 
+ a_foo:	# v2
+ 	.long 0xf08f1248, 0x12345678, 0x9abcdef0, 0xffff0000
+ b_foo:	# v3
+ 	.long 0x01020304, 0x05060708, 0x08070605, 0x04030201
+ d_foo:	# v5
+ 	.long 0xe03c9080, 0x40000078, 0x9a008000, 0xf0f80000
+ 
+ 	.text
+ 	.global vslb
+ vslb:
+ 	load_vr		%v2, %r3, a_foo
+ 	load_vr		%v3, %r3, b_foo
+ 	load_vr		%v5, %r3, d_foo
+ 
+ 	vslb		%v9, %v2, %v3
+ 	vcmpequw.	%v10, %v9, %v5
+ 	bc		12, 24, pass
+ 
+ 	fail
+ 
+ pass:
+ 	pass
Index: sim/testsuite/sim/ppc/vsldoi.s
===================================================================
RCS file: vsldoi.s
diff -N vsldoi.s
*** /dev/null	Tue May  5 13:32:27 1998
--- vsldoi.s	Wed Dec 26 21:53:19 2001
***************
*** 0 ****
--- 1,36 ----
+ # PSIM altivec vsldoi testcase
+ # mach: powerpc
+ # as(powerpcle): -mlittle
+ # ld(powerpcle): -EL
+ 
+ # BROKEN on the sim
+ 
+ 	.include "testutils.inc"
+ 
+ 	start
+ 
+ 	.data
+ 	.p2align 4
+ 
+ a_foo:	# v2
+ 	.long 0xf08f1248, 0x12345678, 0x9abcdef0, 0xffff0000
+ b_foo:	# v3
+ 	.long 0x01020304, 0x05060708, 0x08070605, 0x04030201
+ d_foo:	# v5
+ 	.long 0xffff0000, 0x01020304, 0x05060708, 0x08070605
+ 
+ 	.text
+ 	.global vsldoi
+ vsldoi:
+ 	load_vr		%v2, %r3, a_foo
+ 	load_vr		%v3, %r3, b_foo
+ 	load_vr		%v5, %r3, d_foo
+ 
+ 	vsldoi		%v9, %v2, %v3, 12
+ 	vcmpequw.	%v10, %v9, %v5
+ 	bc		12, 24, pass
+ 
+ 	fail
+ 
+ pass:
+ 	pass
Index: sim/testsuite/sim/ppc/vslh.s
===================================================================
RCS file: vslh.s
diff -N vslh.s
*** /dev/null	Tue May  5 13:32:27 1998
--- vslh.s	Wed Dec 26 21:53:19 2001
***************
*** 0 ****
--- 1,34 ----
+ # PSIM altivec vslh testcase
+ # mach: all
+ # as(powerpcle): -mlittle
+ # ld(powerpcle): -EL
+ 
+ 	.include "testutils.inc"
+ 
+ 	start
+ 
+ 	.data
+ 	.p2align 4
+ 
+ a_foo:	# v2
+ 	.long 0xf08f1248, 0x12345678, 0x9abcdef0, 0xffff0000
+ b_foo:	# v3
+ 	.long 0x00020004, 0x00060008, 0x00070005, 0x00030001
+ d_foo:	# v5
+ 	.long 0xc23c2480, 0x8d007800, 0x5e00de00, 0xfff80000
+ 
+ 	.text
+ 	.global vslh
+ vslh:
+ 	load_vr		%v2, %r3, a_foo
+ 	load_vr		%v3, %r3, b_foo
+ 	load_vr		%v5, %r3, d_foo
+ 
+ 	vslh		%v9, %v2, %v3
+ 	vcmpequw.	%v10, %v9, %v5
+ 	bc		12, 24, pass
+ 
+ 	fail
+ 
+ pass:
+ 	pass
Index: sim/testsuite/sim/ppc/vslo.s
===================================================================
RCS file: vslo.s
diff -N vslo.s
*** /dev/null	Tue May  5 13:32:27 1998
--- vslo.s	Wed Dec 26 21:53:19 2001
***************
*** 0 ****
--- 1,34 ----
+ # PSIM altivec vslo testcase
+ # mach: powerpc
+ # as(powerpcle): -mlittle
+ # ld(powerpcle): -EL
+ 
+ 	.include "testutils.inc"
+ 
+ 	start
+ 
+ 	.data
+ 	.p2align 4
+ 
+ a_foo:	# v2
+ 	.long 0xf08f1248, 0x12345678, 0x9abcdef0, 0xffff0000
+ b_foo:	# v3
+ 	.long 0, 0, 0, 0x18	# 3 << 3
+ d_foo:	# v5
+ 	.long 0x48123456, 0x789abcde, 0xf0ffff00, 0
+ 
+ 	.text
+ 	.global vslo
+ vslo:
+ 	load_vr		%v2, %r3, a_foo
+ 	load_vr		%v3, %r3, b_foo
+ 	load_vr		%v5, %r3, d_foo
+ 
+ 	vslo		%v9, %v2, %v3
+ 	vcmpequw.	%v10, %v9, %v5
+ 	bc		12, 24, pass
+ 
+ 	fail
+ 
+ pass:
+ 	pass
Index: sim/testsuite/sim/ppc/vslw.s
===================================================================
RCS file: vslw.s
diff -N vslw.s
*** /dev/null	Tue May  5 13:32:27 1998
--- vslw.s	Wed Dec 26 21:53:19 2001
***************
*** 0 ****
--- 1,34 ----
+ # PSIM altivec vslw testcase
+ # mach: all
+ # as(powerpcle): -mlittle
+ # ld(powerpcle): -EL
+ 
+ 	.include "testutils.inc"
+ 
+ 	start
+ 
+ 	.data
+ 	.p2align 4
+ 
+ a_foo:	# v2
+ 	.long 0xf08f1248, 0x12345678, 0x9abcdef0, 0xffff0000
+ b_foo:	# v3
+ 	.long 0x00000004, 0x00000008, 0x00000005, 0x00000001
+ d_foo:	# v5
+ 	.long 0x08f12480, 0x34567800, 0x579bde00, 0xfffe0000
+ 
+ 	.text
+ 	.global vslw
+ vslw:
+ 	load_vr		%v2, %r3, a_foo
+ 	load_vr		%v3, %r3, b_foo
+ 	load_vr		%v5, %r3, d_foo
+ 
+ 	vslw		%v9, %v2, %v3
+ 	vcmpequw.	%v10, %v9, %v5
+ 	bc		12, 24, pass
+ 
+ 	fail
+ 
+ pass:
+ 	pass
Index: sim/testsuite/sim/ppc/vsr.s
===================================================================
RCS file: vsr.s
diff -N vsr.s
*** /dev/null	Tue May  5 13:32:27 1998
--- vsr.s	Wed Dec 26 21:53:19 2001
***************
*** 0 ****
--- 1,34 ----
+ # PSIM altivec vsr testcase
+ # mach: powerpc
+ # as(powerpcle): -mlittle
+ # ld(powerpcle): -EL
+ 
+ 	.include "testutils.inc"
+ 
+ 	start
+ 
+ 	.data
+ 	.p2align 4
+ 
+ a_foo:	# v2
+ 	.long 0xf08f1248, 0x12345678, 0x9abcdef0, 0xffff0000
+ b_foo:	# v3
+ 	.long 0x04040404, 0x04040404, 0x04040404, 0x04040404
+ d_foo:	# v5
+ 	.long 0x0f08f124, 0x81234567, 0x89abcdef, 0x0ffff000
+ 
+ 	.text
+ 	.global vsr
+ vsr:
+ 	load_vr		%v2, %r3, a_foo
+ 	load_vr		%v3, %r3, b_foo
+ 	load_vr		%v5, %r3, d_foo
+ 
+ 	vsr		%v9, %v2, %v3
+ 	vcmpequw.	%v10, %v9, %v5
+ 	bc		12, 24, pass
+ 
+ 	fail
+ 
+ pass:
+ 	pass
Index: sim/testsuite/sim/ppc/vsrab.s
===================================================================
RCS file: vsrab.s
diff -N vsrab.s
*** /dev/null	Tue May  5 13:32:27 1998
--- vsrab.s	Wed Dec 26 21:53:19 2001
***************
*** 0 ****
--- 1,34 ----
+ # PSIM altivec vsrab testcase
+ # mach: all
+ # as(powerpcle): -mlittle
+ # ld(powerpcle): -EL
+ 
+ 	.include "testutils.inc"
+ 
+ 	start
+ 
+ 	.data
+ 	.p2align 4
+ 
+ a_foo:	# v2
+ 	.long 0xf08f1248, 0x12345678, 0x9abcdef0, 0xffff0000
+ b_foo:	# v3
+ 	.long 0x01020304, 0x05060708, 0x08070605, 0x04030201
+ d_foo:	# v5
+ 	.long 0xf8e30204, 0x00000078, 0x9affffff, 0xffff0000
+ 
+ 	.text
+ 	.global vsrab
+ vsrab:
+ 	load_vr		%v2, %r3, a_foo
+ 	load_vr		%v3, %r3, b_foo
+ 	load_vr		%v5, %r3, d_foo
+ 
+ 	vsrab		%v9, %v2, %v3
+ 	vcmpequw.	%v10, %v9, %v5
+ 	bc		12, 24, pass
+ 
+ 	fail
+ 
+ pass:
+ 	pass
Index: sim/testsuite/sim/ppc/vsrah.s
===================================================================
RCS file: vsrah.s
diff -N vsrah.s
*** /dev/null	Tue May  5 13:32:27 1998
--- vsrah.s	Wed Dec 26 21:53:19 2001
***************
*** 0 ****
--- 1,34 ----
+ # PSIM altivec vsrah testcase
+ # mach: all
+ # as(powerpcle): -mlittle
+ # ld(powerpcle): -EL
+ 
+ 	.include "testutils.inc"
+ 
+ 	start
+ 
+ 	.data
+ 	.p2align 4
+ 
+ a_foo:	# v2
+ 	.long 0xf08f1248, 0x12345678, 0x9abcdef0, 0xffff0000
+ b_foo:	# v3
+ 	.long 0x00020004, 0x00060008, 0x00070005, 0x00030001
+ d_foo:	# v5
+ 	.long 0xfc230124, 0x00480056, 0xff35fef7, 0xffff0000
+ 
+ 	.text
+ 	.global vsrah
+ vsrah:
+ 	load_vr		%v2, %r3, a_foo
+ 	load_vr		%v3, %r3, b_foo
+ 	load_vr		%v5, %r3, d_foo
+ 
+ 	vsrah		%v9, %v2, %v3
+ 	vcmpequw.	%v10, %v9, %v5
+ 	bc		12, 24, pass
+ 
+ 	fail
+ 
+ pass:
+ 	pass
Index: sim/testsuite/sim/ppc/vsraw.s
===================================================================
RCS file: vsraw.s
diff -N vsraw.s
*** /dev/null	Tue May  5 13:32:27 1998
--- vsraw.s	Wed Dec 26 21:53:19 2001
***************
*** 0 ****
--- 1,34 ----
+ # PSIM altivec vsraw testcase
+ # mach: all
+ # as(powerpcle): -mlittle
+ # ld(powerpcle): -EL
+ 
+ 	.include "testutils.inc"
+ 
+ 	start
+ 
+ 	.data
+ 	.p2align 4
+ 
+ a_foo:	# v2
+ 	.long 0xf08f1248, 0x12345678, 0x9abcdef0, 0xffff0000
+ b_foo:	# v3
+ 	.long 4, 8, 5, 1
+ d_foo:	# v5
+ 	.long 0xff08f124, 0x00123456, 0xfcd5e6f7, 0xffff8000
+ 
+ 	.text
+ 	.global vsraw
+ vsraw:
+ 	load_vr		%v2, %r3, a_foo
+ 	load_vr		%v3, %r3, b_foo
+ 	load_vr		%v5, %r3, d_foo
+ 
+ 	vsraw		%v9, %v2, %v3
+ 	vcmpequw.	%v10, %v9, %v5
+ 	bc		12, 24, pass
+ 
+ 	fail
+ 
+ pass:
+ 	pass
Index: sim/testsuite/sim/ppc/vsrb.s
===================================================================
RCS file: vsrb.s
diff -N vsrb.s
*** /dev/null	Tue May  5 13:32:27 1998
--- vsrb.s	Wed Dec 26 21:53:19 2001
***************
*** 0 ****
--- 1,34 ----
+ # PSIM altivec vsrb testcase
+ # mach: all
+ # as(powerpcle): -mlittle
+ # ld(powerpcle): -EL
+ 
+ 	.include "testutils.inc"
+ 
+ 	start
+ 
+ 	.data
+ 	.p2align 4
+ 
+ a_foo:	# v2
+ 	.long 0xf08f1248, 0x12345678, 0x9abcdef0, 0xffff0000
+ b_foo:	# v3
+ 	.long 0x01020304, 0x05060708, 0x08070605, 0x04030201
+ d_foo:	# v5
+ 	.long 0x78230204, 0x00000078, 0x9a010307, 0x0f1f0000
+ 
+ 	.text
+ 	.global vsrb
+ vsrb:
+ 	load_vr		%v2, %r3, a_foo
+ 	load_vr		%v3, %r3, b_foo
+ 	load_vr		%v5, %r3, d_foo
+ 
+ 	vsrb		%v9, %v2, %v3
+ 	vcmpequw.	%v10, %v9, %v5
+ 	bc		12, 24, pass
+ 
+ 	fail
+ 
+ pass:
+ 	pass
Index: sim/testsuite/sim/ppc/vsrh.s
===================================================================
RCS file: vsrh.s
diff -N vsrh.s
*** /dev/null	Tue May  5 13:32:27 1998
--- vsrh.s	Wed Dec 26 21:53:19 2001
***************
*** 0 ****
--- 1,34 ----
+ # PSIM altivec vsrh testcase
+ # mach: all
+ # as(powerpcle): -mlittle
+ # ld(powerpcle): -EL
+ 
+ 	.include "testutils.inc"
+ 
+ 	start
+ 
+ 	.data
+ 	.p2align 4
+ 
+ a_foo:	# v2
+ 	.long 0xf08f1248, 0x12345678, 0x9abcdef0, 0xffff0000
+ b_foo:	# v3
+ 	.long 0x00020004, 0x00060008, 0x00070005, 0x00030001
+ d_foo:	# v5
+ 	.long 0x3c230124, 0x00480056, 0x013506f7, 0x1fff0000
+ 
+ 	.text
+ 	.global vsrh
+ vsrh:
+ 	load_vr		%v2, %r3, a_foo
+ 	load_vr		%v3, %r3, b_foo
+ 	load_vr		%v5, %r3, d_foo
+ 
+ 	vsrh		%v9, %v2, %v3
+ 	vcmpequw.	%v10, %v9, %v5
+ 	bc		12, 24, pass
+ 
+ 	fail
+ 
+ pass:
+ 	pass
Index: sim/testsuite/sim/ppc/vsro.s
===================================================================
RCS file: vsro.s
diff -N vsro.s
*** /dev/null	Tue May  5 13:32:27 1998
--- vsro.s	Wed Dec 26 21:53:19 2001
***************
*** 0 ****
--- 1,34 ----
+ # PSIM altivec vsro testcase
+ # mach: powerpc
+ # as(powerpcle): -mlittle
+ # ld(powerpcle): -EL
+ 
+ 	.include "testutils.inc"
+ 
+ 	start
+ 
+ 	.data
+ 	.p2align 4
+ 
+ a_foo:	# v2
+ 	.long 0xf08f1248, 0x12345678, 0x9abcdef0, 0xffff0000
+ b_foo:	# v3
+ 	.long 0, 0, 0, 0x18	# 4 << 3
+ d_foo:	# v5
+ 	.long 0x000000f0, 0x8f124812, 0x3456789a, 0xbcdef0ff
+ 
+ 	.text
+ 	.global vsro
+ vsro:
+ 	load_vr		%v2, %r3, a_foo
+ 	load_vr		%v3, %r3, b_foo
+ 	load_vr		%v5, %r3, d_foo
+ 
+ 	vsro		%v9, %v2, %v3
+ 	vcmpequw.	%v10, %v9, %v5
+ 	bc		12, 24, pass
+ 
+ 	fail
+ 
+ pass:
+ 	pass
Index: sim/testsuite/sim/ppc/vsrw.s
===================================================================
RCS file: vsrw.s
diff -N vsrw.s
*** /dev/null	Tue May  5 13:32:27 1998
--- vsrw.s	Wed Dec 26 21:53:19 2001
***************
*** 0 ****
--- 1,34 ----
+ # PSIM altivec vsrw testcase
+ # mach: all
+ # as(powerpcle): -mlittle
+ # ld(powerpcle): -EL
+ 
+ 	.include "testutils.inc"
+ 
+ 	start
+ 
+ 	.data
+ 	.p2align 4
+ 
+ a_foo:	# v2
+ 	.long 0xf08f1248, 0x12345678, 0x9abcdef0, 0xffff0000
+ b_foo:	# v3
+ 	.long 4, 8, 5, 1
+ d_foo:	# v5
+ 	.long 0x0f08f124, 0x00123456, 0x04d5e6f7, 0x7fff8000
+ 
+ 	.text
+ 	.global vsrw
+ vsrw:
+ 	load_vr		%v2, %r3, a_foo
+ 	load_vr		%v3, %r3, b_foo
+ 	load_vr		%v5, %r3, d_foo
+ 
+ 	vsrw		%v9, %v2, %v3
+ 	vcmpequw.	%v10, %v9, %v5
+ 	bc		12, 24, pass
+ 
+ 	fail
+ 
+ pass:
+ 	pass
Index: sim/testsuite/sim/ppc/vsubcuw.s
===================================================================
RCS file: vsubcuw.s
diff -N vsubcuw.s
*** /dev/null	Tue May  5 13:32:27 1998
--- vsubcuw.s	Wed Dec 26 21:53:19 2001
***************
*** 0 ****
--- 1,41 ----
+ # PSIM altivec vsubcuw testcase
+ # mach: all
+ # as(powerpcle): -mlittle
+ # ld(powerpcle): -EL
+ 
+ 	.include "testutils.inc"
+ 
+ 	start
+ 
+ 	.data
+ 	.p2align 4
+ a_foo:
+ 	.long 10
+ 	.long 50
+ 	.long 128
+ 	.long 0
+ b_foo:
+ 	.long 9
+ 	.long 45
+ 	.long 256
+ 	.long 6
+ d_foo:
+ 	.long 1
+ 	.long 1
+ 	.long 0
+ 	.long 0
+ 
+ 	.text
+ 	.global vsubcuw
+ vsubcuw:
+ 	load_vr		%v2, %r3, a_foo
+ 	load_vr		%v3, %r3, b_foo
+ 	load_vr		%v5, %r3, d_foo
+ 	vsubcuw		%v4, %v2, %v3
+ 	vcmpequw.	%v6, %v4, %v5
+ 	bc		12, 24, pass
+ fail:
+ 	fail
+ 
+ pass:
+ 	pass
Index: sim/testsuite/sim/ppc/vsubsbs.s
===================================================================
RCS file: vsubsbs.s
diff -N vsubsbs.s
*** /dev/null	Tue May  5 13:32:27 1998
--- vsubsbs.s	Wed Dec 26 21:53:19 2001
***************
*** 0 ****
--- 1,44 ----
+ # PSIM altivec vsubsbs testcase
+ # mach: all
+ # as(powerpcle): -mlittle
+ # ld(powerpcle): -EL
+ 
+ 	.include "testutils.inc"
+ 
+ 	start
+ 
+ 	.data
+ 	.p2align 4
+ a_foo:	# v2
+ 	.byte 10, -11, 12, -13
+ 	.byte 14, -15, 16, -17
+ 	.byte 18, -19, 125, -21
+ 	.byte 22, -23, 24, -25
+ b_foo:	# v3
+ 	.byte -10, 11, -12, 13
+ 	.byte -14, 15, -16, 17
+ 	.byte -18, 119, -20, 21
+ 	.byte -22, 23, -24, 25
+ d_foo:	# v5
+ 	.byte 20, -22, 24, -26
+ 	.byte 28, -30, 32, -34
+ 	.byte 36, -128, 127, -42
+ 	.byte 44, -46, 48, -50
+ 
+ 	.text
+ 	.global vsubsbs
+ vsubsbs:
+ 	load_vr		%v2, %r3, a_foo
+ 	load_vr		%v3, %r3, b_foo
+ 	load_vr		%v5, %r3, d_foo
+ 
+ 	vsubsbs		%v9, %v2, %v3
+ 	vcmpequb.	%v10, %v9, %v5
+ 	bc		4, 24, fail
+ 
+ 	b		pass
+ fail:
+ 	fail
+ 
+ pass:
+ 	pass
Index: sim/testsuite/sim/ppc/vsubshs.s
===================================================================
RCS file: vsubshs.s
diff -N vsubshs.s
*** /dev/null	Tue May  5 13:32:27 1998
--- vsubshs.s	Wed Dec 26 21:53:19 2001
***************
*** 0 ****
--- 1,39 ----
+ # PSIM altivec vsubshs testcase
+ # mach: all
+ # as(powerpcle): -mlittle
+ # ld(powerpcle): -EL
+ 
+ 	.include "testutils.inc"
+ 
+ 	start
+ 
+ 	.data
+ 	.p2align 4
+ 
+ a_foo:	# v2
+ 	.short 32020, -32021, 5, -32023
+ 	.short 32024, -32025, 32026, -32027
+ b_foo:	# v3
+ 	.short -32020, 32021, -32022, 32023
+ 	.short 32024, 32025, 32026, 32027
+ d_foo:	# v5
+ 	.short 32767, -32768, 32027, -32768
+ 	.short 0, -32768, 0, -32768
+ 
+ 	.text
+ 	.global vsubshs
+ vsubshs:
+ 	load_vr		%v2, %r3, a_foo
+ 	load_vr		%v3, %r3, b_foo
+ 	load_vr		%v5, %r3, d_foo
+ 
+ 	vsubshs		%v9, %v2, %v3
+ 	vcmpequh.	%v10, %v9, %v5
+ 	bc		4, 24, fail
+ 
+ 	b		pass
+ fail:
+ 	fail
+ 
+ pass:
+ 	pass
Index: sim/testsuite/sim/ppc/vsubsws.s
===================================================================
RCS file: vsubsws.s
diff -N vsubsws.s
*** /dev/null	Tue May  5 13:32:27 1998
--- vsubsws.s	Wed Dec 26 21:53:19 2001
***************
*** 0 ****
--- 1,36 ----
+ # PSIM altivec vsubsws testcase
+ # mach: all
+ # as(powerpcle): -mlittle
+ # ld(powerpcle): -EL
+ 
+ 	.include "testutils.inc"
+ 
+ 	start
+ 
+ 	.data
+ 	.p2align 4
+ 
+ a_foo:	# v2
+ 	.long 2147483640, -2147483641, 2147483642, -2147483643
+ b_foo:	# v3
+ 	.long -2147483640, 2147483641, 2147483642, 2147483643
+ d_foo:	# v5
+ 	.long 2147483647, -2147483648, 0, -2147483648
+ 
+ 	.text
+ 	.global vsubsws
+ vsubsws:
+ 	load_vr		%v2, %r3, a_foo
+ 	load_vr		%v3, %r3, b_foo
+ 	load_vr		%v5, %r3, d_foo
+ 
+ 	vsubsws		%v9, %v2, %v3
+ 	vcmpequw.	%v10, %v9, %v5
+ 	bc		4, 24, fail
+ 
+ 	b		pass
+ fail:
+ 	fail
+ 
+ pass:
+ 	pass
Index: sim/testsuite/sim/ppc/vsububm.s
===================================================================
RCS file: vsububm.s
diff -N vsububm.s
*** /dev/null	Tue May  5 13:32:27 1998
--- vsububm.s	Wed Dec 26 21:53:19 2001
***************
*** 0 ****
--- 1,43 ----
+ # PSIM altivec vsububm testcase
+ # mach: all
+ # as(powerpcle): -mlittle
+ # ld(powerpcle): -EL
+ 
+ 	.include "testutils.inc"
+ 
+ 	start
+ 
+ 	.data
+ 	.p2align 4
+ a_foo:	# v2
+ 	.byte 245, 246, 247, 248
+ 	.byte 249, 250, 251, 252
+ 	.byte 253, 254, 255, 254
+ 	.byte 253, 252, 251, 250
+ b_foo:	# v3
+ 	.byte 10, 11, 12, 13
+ 	.byte 14, 15, 16, 17
+ 	.byte 18, 19,  0,  2
+ 	.byte  4,  6,  8, 10
+ d_foo:	# v5
+ 	.byte 235, 235, 235, 235
+ 	.byte 235, 235, 235, 235
+ 	.byte 235, 235, 255, 252
+ 	.byte 249, 246, 243, 240
+ 
+ 	.text
+ 	.global vsububm
+ vsububm:
+ 	load_vr		%v2, %r3, a_foo
+ 	load_vr		%v3, %r3, b_foo
+ 	load_vr		%v5, %r3, d_foo
+ 
+ 	vsububm		%v9, %v2, %v3
+ 	vcmpequb.	%v10, %v9, %v5
+ 	bc		12, 24, pass
+ 
+ fail:
+ 	fail
+ 
+ pass:
+ 	pass
Index: sim/testsuite/sim/ppc/vsububs.s
===================================================================
RCS file: vsububs.s
diff -N vsububs.s
*** /dev/null	Tue May  5 13:32:27 1998
--- vsububs.s	Wed Dec 26 21:53:19 2001
***************
*** 0 ****
--- 1,43 ----
+ # PSIM altivec vsububs testcase
+ # mach: all
+ # as(powerpcle): -mlittle
+ # ld(powerpcle): -EL
+ 
+ 	.include "testutils.inc"
+ 
+ 	start
+ 
+ 	.data
+ 	.p2align 4
+ a_foo:	# v2
+ 	.byte 245, 246, 247, 248
+ 	.byte 249, 250, 251, 252
+ 	.byte 18, 19,  0,  2
+ 	.byte  4,  6,  8, 10
+ b_foo:	# v3
+ 	.byte 10, 11, 12, 13
+ 	.byte 14, 15, 16, 17
+ 	.byte 253, 254, 255, 254
+ 	.byte 253, 252, 251, 250
+ d_foo:	# v5
+ 	.byte 235, 235, 235, 235
+ 	.byte 235, 235, 235, 235
+ 	.byte 0, 0, 0, 0
+ 	.byte 0, 0, 0, 0
+ 
+ 	.text
+ 	.global vsububs
+ vsububs:
+ 	load_vr		%v2, %r3, a_foo
+ 	load_vr		%v3, %r3, b_foo
+ 	load_vr		%v5, %r3, d_foo
+ 
+ 	vsububs		%v9, %v2, %v3
+ 	vcmpequb.	%v10, %v9, %v5
+ 	bc		12, 24, pass
+ 
+ fail:
+ 	fail
+ 
+ pass:
+ 	pass
Index: sim/testsuite/sim/ppc/vsubuhm.s
===================================================================
RCS file: vsubuhm.s
diff -N vsubuhm.s
*** /dev/null	Tue May  5 13:32:27 1998
--- vsubuhm.s	Wed Dec 26 21:53:19 2001
***************
*** 0 ****
--- 1,37 ----
+ # PSIM altivec vsubuhm testcase
+ # mach: all
+ # as(powerpcle): -mlittle
+ # ld(powerpcle): -EL
+ 
+ 	.include "testutils.inc"
+ 
+ 	start
+ 
+ 	.data
+ 	.p2align 4
+ a_foo:	# v2
+ 	.short 32820, 32021, 32822, 32023
+ 	.short 32024, 32025, 32026, 32927
+ b_foo:	# v3
+ 	.short 32024, 32025, 32026, 32927
+ 	.short 32820, 32021, 32822, 32023
+ d_foo:	# v5
+ 	.short 796, 65532, 796, 64632
+ 	.short 64740, 4, 64740, 904
+ 
+ 	.text
+ 	.global vsubuhm
+ vsubuhm:
+ 	load_vr		%v2, %r3, a_foo
+ 	load_vr		%v3, %r3, b_foo
+ 	load_vr		%v5, %r3, d_foo
+ 
+ 	vsubuhm		%v9, %v2, %v3
+ 	vcmpequh.	%v10, %v9, %v5
+ 	bc		12, 24, pass
+ 
+ fail:
+ 	fail
+ 
+ pass:
+ 	pass
Index: sim/testsuite/sim/ppc/vsubuhs.s
===================================================================
RCS file: vsubuhs.s
diff -N vsubuhs.s
*** /dev/null	Tue May  5 13:32:27 1998
--- vsubuhs.s	Wed Dec 26 21:53:19 2001
***************
*** 0 ****
--- 1,37 ----
+ # PSIM altivec vsubuhs testcase
+ # mach: all
+ # as(powerpcle): -mlittle
+ # ld(powerpcle): -EL
+ 
+ 	.include "testutils.inc"
+ 
+ 	start
+ 
+ 	.data
+ 	.p2align 4
+ a_foo:	# v2
+ 	.short 32820, 32021, 32822, 32023
+ 	.short 32024, 32025, 32026, 32927
+ b_foo:	# v3
+ 	.short 32024, 32025, 32026, 32927
+ 	.short 32820, 32021, 32822, 32023
+ d_foo:	# v5
+ 	.short 796, 0, 796, 0
+ 	.short 0, 4, 0, 904
+ 
+ 	.text
+ 	.global vsubuhs
+ vsubuhs:
+ 	load_vr		%v2, %r3, a_foo
+ 	load_vr		%v3, %r3, b_foo
+ 	load_vr		%v5, %r3, d_foo
+ 
+ 	vsubuhs		%v9, %v2, %v3
+ 	vcmpequh.	%v10, %v9, %v5
+ 	bc		12, 24, pass
+ 
+ fail:
+ 	fail
+ 
+ pass:
+ 	pass
Index: sim/testsuite/sim/ppc/vsubuwm.s
===================================================================
RCS file: vsubuwm.s
diff -N vsubuwm.s
*** /dev/null	Tue May  5 13:32:27 1998
--- vsubuwm.s	Wed Dec 26 21:53:19 2001
***************
*** 0 ****
--- 1,34 ----
+ # PSIM altivec vsubuwm testcase
+ # mach: all
+ # as(powerpcle): -mlittle
+ # ld(powerpcle): -EL
+ 
+ 	.include "testutils.inc"
+ 
+ 	start
+ 
+ 	.data
+ 	.p2align 4
+ a_foo:	# v2
+ 	.long 3000000000, 1500000200, 3000000400, 2500000600
+ b_foo:	# v3
+ 	.long 1000000000, 3000000100, 2000000200, 3000000300
+ d_foo:	# v5
+ 	.long 2000000000, 2794967396, 1000000200, 3794967596
+ 
+ 	.text
+ 	.global vsubuwm
+ vsubuwm:
+ 	load_vr		%v2, %r3, a_foo
+ 	load_vr		%v3, %r3, b_foo
+ 	load_vr		%v5, %r3, d_foo
+ 
+ 	vsubuwm		%v9, %v2, %v3
+ 	vcmpequw.	%v10, %v9, %v5
+ 	bc		12, 24, pass
+ 
+ fail:
+ 	fail
+ 
+ pass:
+ 	pass
Index: sim/testsuite/sim/ppc/vsubuws.s
===================================================================
RCS file: vsubuws.s
diff -N vsubuws.s
*** /dev/null	Tue May  5 13:32:27 1998
--- vsubuws.s	Wed Dec 26 21:53:19 2001
***************
*** 0 ****
--- 1,34 ----
+ # PSIM altivec vsubuws testcase
+ # mach: all
+ # as(powerpcle): -mlittle
+ # ld(powerpcle): -EL
+ 
+ 	.include "testutils.inc"
+ 
+ 	start
+ 
+ 	.data
+ 	.p2align 4
+ a_foo:	# v2
+ 	.long 3000000000, 1500000200, 3000000400, 2500000600
+ b_foo:	# v3
+ 	.long 1000000000, 3000000100, 2000000200, 3000000300
+ d_foo:	# v5
+ 	.long 2000000000, 0, 1000000200, 0
+ 
+ 	.text
+ 	.global vsubuws
+ vsubuws:
+ 	load_vr		%v2, %r3, a_foo
+ 	load_vr		%v3, %r3, b_foo
+ 	load_vr		%v5, %r3, d_foo
+ 
+ 	vsubuws		%v9, %v2, %v3
+ 	vcmpequw.	%v10, %v9, %v5
+ 	bc		12, 24, pass
+ 
+ fail:
+ 	fail
+ 
+ pass:
+ 	pass
Index: sim/testsuite/sim/ppc/vsum2sws.s
===================================================================
RCS file: vsum2sws.s
diff -N vsum2sws.s
*** /dev/null	Tue May  5 13:32:27 1998
--- vsum2sws.s	Wed Dec 26 21:53:19 2001
***************
*** 0 ****
--- 1,39 ----
+ # PSIM altivec vsum2sws testcase
+ # mach: powerpc
+ # as(powerpcle): -mlittle
+ # ld(powerpcle): -EL
+ 
+ 	.include "testutils.inc"
+ 
+ 	start
+ 
+ 	.data
+ 	.p2align 4
+ 
+ a_foo:	# v2
+ 	.long 1, 2, 3, 4
+ 	#.long 5000, 234556342, 2047483642, -40000000
+ b_foo:	# v3
+ 	#.long 55, 247483642, 55, 94561336
+ 	.long 5, 6, 7, 8
+ d_foo:	# v5
+ 	#.long 0, 482044984, 0, 2102044978
+ 	.long 0, 9, 0, 15
+ 
+ 	.text
+ 	.global vsum2sws
+ vsum2sws:
+ 	load_vr		%v2, %r3, a_foo
+ 	load_vr		%v3, %r3, b_foo
+ 	load_vr		%v5, %r3, d_foo
+ 
+ 	vsum2sws	%v9, %v2, %v3
+ 	vcmpequw.	%v10, %v9, %v5
+ 	bc		4, 24, fail
+ 
+ 	b		pass
+ fail:
+ 	fail
+ 
+ pass:
+ 	pass
Index: sim/testsuite/sim/ppc/vsum4sbs.s
===================================================================
RCS file: vsum4sbs.s
diff -N vsum4sbs.s
*** /dev/null	Tue May  5 13:32:27 1998
--- vsum4sbs.s	Wed Dec 26 21:53:19 2001
***************
*** 0 ****
--- 1,39 ----
+ # PSIM altivec vsum4sbs testcase
+ # mach: all
+ # as(powerpcle): -mlittle
+ # ld(powerpcle): -EL
+ 
+ 	.include "testutils.inc"
+ 
+ 	start
+ 
+ 	.data
+ 	.p2align 4
+ 
+ a_foo:	# v2
+ 	.byte 50, 40, 30, 20	# 140
+ 	.byte 100, 110, 120, 90 # 420
+ 	.byte 100, 101, 102, 103 # 406
+ 	.byte 8, 6, 4, 2 # 20
+ b_foo:	# v3
+ 	.long 14, 23, 42, 81
+ d_foo:	# v5
+ 	.long 154, 443, 448, 101
+ 
+ 	.text
+ 	.global vsum4sbs
+ vsum4sbs:
+ 	load_vr		%v2, %r3, a_foo
+ 	load_vr		%v3, %r3, b_foo
+ 	load_vr		%v5, %r3, d_foo
+ 
+ 	vsum4sbs		%v9, %v2, %v3
+ 	vcmpequw.	%v10, %v9, %v5
+ 	bc		4, 24, fail
+ 
+ 	b		pass
+ fail:
+ 	fail
+ 
+ pass:
+ 	pass
Index: sim/testsuite/sim/ppc/vsum4shs.s
===================================================================
RCS file: vsum4shs.s
diff -N vsum4shs.s
*** /dev/null	Tue May  5 13:32:27 1998
--- vsum4shs.s	Wed Dec 26 21:53:19 2001
***************
*** 0 ****
--- 1,37 ----
+ # PSIM altivec vsum4shs testcase
+ # mach: all
+ # as(powerpcle): -mlittle
+ # ld(powerpcle): -EL
+ 
+ 	.include "testutils.inc"
+ 
+ 	start
+ 
+ 	.data
+ 	.p2align 4
+ 
+ a_foo:	# v2
+ 	.short 50, 40, 30, 20
+ 	.short 100, 110, 120, 130
+ b_foo:	# v3
+ 	.long 14, 23, 42, 81
+ d_foo:	# v5
+ 	.long 104, 73, 252, 331
+ 
+ 	.text
+ 	.global vsum4shs
+ vsum4shs:
+ 	load_vr		%v2, %r3, a_foo
+ 	load_vr		%v3, %r3, b_foo
+ 	load_vr		%v5, %r3, d_foo
+ 
+ 	vsum4shs		%v9, %v2, %v3
+ 	vcmpequw.	%v10, %v9, %v5
+ 	bc		4, 24, fail
+ 
+ 	b		pass
+ fail:
+ 	fail
+ 
+ pass:
+ 	pass
Index: sim/testsuite/sim/ppc/vsum4ubs.s
===================================================================
RCS file: vsum4ubs.s
diff -N vsum4ubs.s
*** /dev/null	Tue May  5 13:32:27 1998
--- vsum4ubs.s	Wed Dec 26 21:53:19 2001
***************
*** 0 ****
--- 1,39 ----
+ # PSIM altivec vsum4ubs testcase
+ # mach: all
+ # as(powerpcle): -mlittle
+ # ld(powerpcle): -EL
+ 
+ 	.include "testutils.inc"
+ 
+ 	start
+ 
+ 	.data
+ 	.p2align 4
+ 
+ a_foo:	# v2
+ 	.byte 50, 40, 30, 20	# 140
+ 	.byte 100, 110, 120, 90 # 420
+ 	.byte 100, 101, 102, 103 # 406
+ 	.byte 8, 6, 4, 2 # 20
+ b_foo:	# v3
+ 	.long 14, 23, 42, 81
+ d_foo:	# v5
+ 	.long 154, 443, 448, 101
+ 
+ 	.text
+ 	.global vsum4ubs
+ vsum4ubs:
+ 	load_vr		%v2, %r3, a_foo
+ 	load_vr		%v3, %r3, b_foo
+ 	load_vr		%v5, %r3, d_foo
+ 
+ 	vsum4ubs		%v9, %v2, %v3
+ 	vcmpequw.	%v10, %v9, %v5
+ 	bc		4, 24, fail
+ 
+ 	b		pass
+ fail:
+ 	fail
+ 
+ pass:
+ 	pass
Index: sim/testsuite/sim/ppc/vsumsws.s
===================================================================
RCS file: vsumsws.s
diff -N vsumsws.s
*** /dev/null	Tue May  5 13:32:27 1998
--- vsumsws.s	Wed Dec 26 21:53:19 2001
***************
*** 0 ****
--- 1,36 ----
+ # PSIM altivec vsumsws testcase
+ # mach: powerpc
+ # as(powerpcle): -mlittle
+ # ld(powerpcle): -EL
+ 
+ 	.include "testutils.inc"
+ 
+ 	start
+ 
+ 	.data
+ 	.p2align 4
+ 
+ a_foo:	# v2
+ 	.long 5000, 234556342, 2047483642, -40000000
+ b_foo:	# v3
+ 	.long -2147483640, 2147483641, 2147483642, 94561336
+ d_foo:	# v5
+ 	.long 0, 0, 0, 2147483647
+ 
+ 	.text
+ 	.global vsumsws
+ vsumsws:
+ 	load_vr		%v2, %r3, a_foo
+ 	load_vr		%v3, %r3, b_foo
+ 	load_vr		%v5, %r3, d_foo
+ 
+ 	vsumsws		%v9, %v2, %v3
+ 	vcmpequw.	%v10, %v9, %v5
+ 	bc		4, 24, fail
+ 
+ 	b		pass
+ fail:
+ 	fail
+ 
+ pass:
+ 	pass
Index: sim/testsuite/sim/ppc/vupkhpx.s
===================================================================
RCS file: vupkhpx.s
diff -N vupkhpx.s
*** /dev/null	Tue May  5 13:32:27 1998
--- vupkhpx.s	Wed Dec 26 21:53:19 2001
***************
*** 0 ****
--- 1,33 ----
+ # PSIM altivec vupkhpx testcase
+ # mach: powerpc
+ # as(powerpcle): -mlittle
+ # ld(powerpcle): -EL
+ 
+ 	.include "testutils.inc"
+ 
+ 	start
+ 
+ 	.data
+ 	.p2align 4
+ 
+ a_foo:	# v2
+ 	.short 0xface, 0x1234, 0x5678, 0x90db
+ 	.short 0, 0, 0, 0
+ 
+ d_foo:	# v5
+ 	.long 0xff1e160e, 0x41114, 0x151318, 0xff04061b
+ 
+ 	.text
+ 	.global vupkhpx
+ vupkhpx:
+ 	load_vr		%v2, %r3, a_foo
+ 	load_vr		%v5, %r3, d_foo
+ 
+ 	vupkhpx		%v9, %v2
+ 	vcmpequw.	%v10, %v9, %v5
+ 	bc		12, 24, pass
+ 
+ 	fail
+ 
+ pass:
+ 	pass
Index: sim/testsuite/sim/ppc/vupkhsb.s
===================================================================
RCS file: vupkhsb.s
diff -N vupkhsb.s
*** /dev/null	Tue May  5 13:32:27 1998
--- vupkhsb.s	Wed Dec 26 21:53:19 2001
***************
*** 0 ****
--- 1,35 ----
+ # PSIM altivec vupkhsb testcase
+ # mach: powerpc
+ # as(powerpcle): -mlittle
+ # ld(powerpcle): -EL
+ 
+ 	.include "testutils.inc"
+ 
+ 	start
+ 
+ 	.data
+ 	.p2align 4
+ 
+ a_foo:	# v2
+ 	.byte	-1, -5, -8, 3
+ 	.byte	-124, 4, 3, 23
+ 	.byte	0, 0, 0, 0
+ 	.byte	0, 0, 0, 0
+ d_foo:	# v5
+ 	.short	-1, -5, -8, 3
+ 	.short	-124, 4, 3, 23
+ 
+ 	.text
+ 	.global vupkhsb
+ vupkhsb:
+ 	load_vr		%v2, %r3, a_foo
+ 	load_vr		%v5, %r3, d_foo
+ 
+ 	vupkhsb		%v9, %v2
+ 	vcmpequw.	%v10, %v9, %v5
+ 	bc		12, 24, pass
+ 
+ 	fail
+ 
+ pass:
+ 	pass
Index: sim/testsuite/sim/ppc/vupkhsh.s
===================================================================
RCS file: vupkhsh.s
diff -N vupkhsh.s
*** /dev/null	Tue May  5 13:32:27 1998
--- vupkhsh.s	Wed Dec 26 21:53:19 2001
***************
*** 0 ****
--- 1,32 ----
+ # PSIM altivec vupkhsh testcase
+ # mach: powerpc
+ # as(powerpcle): -mlittle
+ # ld(powerpcle): -EL
+ 
+ 	.include "testutils.inc"
+ 
+ 	start
+ 
+ 	.data
+ 	.p2align 4
+ 
+ a_foo:	# v2
+ 	.short	-1, -5, -8, 3
+ 	.short	0, 0, 0, 0
+ d_foo:	# v5
+ 	.long	-1, -5, -8, 3
+ 
+ 	.text
+ 	.global vupkhsh
+ vupkhsh:
+ 	load_vr		%v2, %r3, a_foo
+ 	load_vr		%v5, %r3, d_foo
+ 
+ 	vupkhsh		%v9, %v2
+ 	vcmpequw.	%v10, %v9, %v5
+ 	bc		12, 24, pass
+ 
+ 	fail
+ 
+ pass:
+ 	pass
Index: sim/testsuite/sim/ppc/vupklpx.s
===================================================================
RCS file: vupklpx.s
diff -N vupklpx.s
*** /dev/null	Tue May  5 13:32:27 1998
--- vupklpx.s	Wed Dec 26 21:53:19 2001
***************
*** 0 ****
--- 1,33 ----
+ # PSIM altivec vupklpx testcase
+ # mach: powerpc
+ # as(powerpcle): -mlittle
+ # ld(powerpcle): -EL
+ 
+ 	.include "testutils.inc"
+ 
+ 	start
+ 
+ 	.data
+ 	.p2align 4
+ 
+ a_foo:	# v2
+ 	.short 0, 0, 0, 0
+ 	.short 0xface, 0x1234, 0x5678, 0x90db
+ 
+ d_foo:	# v5
+ 	.long 0xff1e160e, 0x41114, 0x151318, 0xff04061b
+ 
+ 	.text
+ 	.global vupklpx
+ vupklpx:
+ 	load_vr		%v2, %r3, a_foo
+ 	load_vr		%v5, %r3, d_foo
+ 
+ 	vupklpx		%v9, %v2
+ 	vcmpequw.	%v10, %v9, %v5
+ 	bc		12, 24, pass
+ 
+ 	fail
+ 
+ pass:
+ 	pass
Index: sim/testsuite/sim/ppc/vupklsb.s
===================================================================
RCS file: vupklsb.s
diff -N vupklsb.s
*** /dev/null	Tue May  5 13:32:27 1998
--- vupklsb.s	Wed Dec 26 21:53:19 2001
***************
*** 0 ****
--- 1,35 ----
+ # PSIM altivec vupklsb testcase
+ # mach: powerpc
+ # as(powerpcle): -mlittle
+ # ld(powerpcle): -EL
+ 
+ 	.include "testutils.inc"
+ 
+ 	start
+ 
+ 	.data
+ 	.p2align 4
+ 
+ a_foo:	# v2
+ 	.byte	0, 0, 0, 0
+ 	.byte	0, 0, 0, 0
+ 	.byte	-1, -5, -8, 3
+ 	.byte	-124, 4, 3, 23
+ d_foo:	# v5
+ 	.short	-1, -5, -8, 3
+ 	.short	-124, 4, 3, 23
+ 
+ 	.text
+ 	.global vupklsb
+ vupklsb:
+ 	load_vr		%v2, %r3, a_foo
+ 	load_vr		%v5, %r3, d_foo
+ 
+ 	vupklsb		%v9, %v2
+ 	vcmpequw.	%v10, %v9, %v5
+ 	bc		12, 24, pass
+ 
+ 	fail
+ 
+ pass:
+ 	pass
Index: sim/testsuite/sim/ppc/vupklsh.s
===================================================================
RCS file: vupklsh.s
diff -N vupklsh.s
*** /dev/null	Tue May  5 13:32:27 1998
--- vupklsh.s	Wed Dec 26 21:53:19 2001
***************
*** 0 ****
--- 1,32 ----
+ # PSIM altivec vupklsh testcase
+ # mach: powerpc
+ # as(powerpcle): -mlittle
+ # ld(powerpcle): -EL
+ 
+ 	.include "testutils.inc"
+ 
+ 	start
+ 
+ 	.data
+ 	.p2align 4
+ 
+ a_foo:	# v2
+ 	.short	0, 0, 0, 0
+ 	.short	-1, -5, -8, 3
+ d_foo:	# v5
+ 	.long	-1, -5, -8, 3
+ 
+ 	.text
+ 	.global vupklsh
+ vupklsh:
+ 	load_vr		%v2, %r3, a_foo
+ 	load_vr		%v5, %r3, d_foo
+ 
+ 	vupklsh		%v9, %v2
+ 	vcmpequw.	%v10, %v9, %v5
+ 	bc		12, 24, pass
+ 
+ 	fail
+ 
+ pass:
+ 	pass


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]