aboutsummaryrefslogtreecommitdiff
path: root/sc4pd/headers/common/SC_Altivec.h
blob: 8ae3f66427a67088e322891964d9d292a5661bbe (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
/*
	SuperCollider real time audio synthesis system
    Copyright (c) 2003 James McCartney. All rights reserved.
	http://www.audiosynth.com

    This program is free software; you can redistribute it and/or modify
    it under the terms of the GNU General Public License as published by
    the Free Software Foundation; either version 2 of the License, or
    (at your option) any later version.

    This program is distributed in the hope that it will be useful,
    but WITHOUT ANY WARRANTY; without even the implied warranty of
    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    GNU General Public License for more details.

    You should have received a copy of the GNU General Public License
    along with this program; if not, write to the Free Software
    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
*/

#ifndef _SC_Altivec_
#define _SC_Altivec_

#if defined(SC_LINUX) && defined(__ALTIVEC__)
# include <altivec.h>
#endif

#if __VEC__

typedef vector signed int vint32;
typedef vector unsigned int vuint32;
typedef vector float vfloat32;

// Since gcc 3.3 vector initializers are surrounded by brackets. <sk>
#if defined(__GNUC__) && (__GNUC__ >= 3) && (__GNUC_MINOR__ >= 3)
# define vinit(x)	{ x, x, x, x }
#else
# define vinit(x)	( x )
#endif

//#define vload(x) (*vtempptr = (x), vec_splat(vtemp,0))
#define define_vtemp vfloat32 vtemp; float *vtempptr = (float*)&vtemp;
#define define_vones vfloat32 vones = vec_ctf(vec_splat_s32(1),0);
#define define_vzero vfloat32 vzero = (vfloat32)vec_splat_s32(0);
#define vi0123 (vec_unpackh(vec_unpackh((vector signed char)vec_lvsl(0,(int*)0))))
#define v0123 (vec_ctf(vec_unpackh(vec_unpackh((vector signed char)vec_lvsl(0,(int*)0))), 0))
#define v0123_4ths (vec_ctf(vec_unpackh(vec_unpackh((vector signed char)vec_lvsl(0,(int*)0))), 2))
#define vstart(x, vslope) (vec_madd(vslope, v0123_4ths, vload(x)))

#define vec_not(a) (vtemp = (a); vec_nor(vtemp, vtemp))
#define vec_cmplt(a, b) (vec_cmpgt(b, a))
#define vec_cmple(a, b) (vec_cmpge(b, a))
#define vec_mul(a, b) (vec_madd(a, b, vzero))
#define vec_2sComp(x) (vec_sub(vec_sub (x, x), x))

#define USEVEC (ft->mAltivecAvailable && !(BUFLENGTH & 3))

typedef union vec_union {
        int32		i[4];
        float32		f[4];
        vint32		vi;
        vfloat32	vf;
} vec_union;

inline vfloat32 vload( float f )
{
        vec_union temp;
        temp.f[0] = f;
        return vec_splat( vec_lde( 0, temp.f ), 0 );
}

inline vint32 vload( int32 i )
{
        vec_union temp;
        temp.i[0] = i;
        return vec_splat( vec_lde( 0, temp.i ), 0 );
}

inline vint32 vload( int32 a, int32 b, int32 c, int32 d )
{
        vec_union temp;
        temp.i[0] = a;
        temp.i[1] = b;
        temp.i[2] = c;
        temp.i[3] = d;
        return temp.vi;
}

inline vector float vec_float_1( void )
{
	return vec_ctf( vec_splat_u32(1), 0); 
}

inline vector float vec_reciprocal( vector float v ) 
{
	vector float reciprocal = vec_re( v ); 
	return vec_madd( reciprocal, vec_nmsub( reciprocal, v, vec_float_1()), reciprocal ); //Newton Rapheson refinement 
}

#define vec_div(a, b)  vec_mul(a, vec_reciprocal(b))

// seed = ((seed & mask) << shift1) ^ (((seed << shift2) ^ seed) >> shift3);

#define define_trshifts \
	vuint32 trmask = ((vuint32)(0xFFFFFFFE,0xFFFFFFF8,0xFFFFFFF0,0)); \
	vuint32 trshift1 = ((vuint32)(12, 14,  7,  0)); \
	vuint32 trshift2 = ((vuint32)(13,  2,  3,  0)); \
	vuint32 trshift3 = ((vuint32)(19, 25, 11,  0));

inline vuint32 trands(vuint32 seed, vuint32 trmask, vuint32 shift1, vuint32 shift2, vuint32 shift3)
{
	return vec_xor(vec_sl(vec_and(seed, trmask),shift1), vec_sr(vec_xor(vec_sl(seed,shift2),seed),shift3));
}

#endif
#endif