aboutsummaryrefslogtreecommitdiff
path: root/circuitpython/lib/mp3/src/assembly.h
blob: a910e31b6ea8d9af2ce23c6be10bfe65e17faa95 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
/* ***** BEGIN LICENSE BLOCK *****
 * Version: RCSL 1.0/RPSL 1.0 
 *  
 * Portions Copyright (c) 1995-2002 RealNetworks, Inc. All Rights Reserved. 
 *      
 * The contents of this file, and the files included with this file, are 
 * subject to the current version of the RealNetworks Public Source License 
 * Version 1.0 (the "RPSL") available at 
 * http://www.helixcommunity.org/content/rpsl unless you have licensed 
 * the file under the RealNetworks Community Source License Version 1.0 
 * (the "RCSL") available at http://www.helixcommunity.org/content/rcsl, 
 * in which case the RCSL will apply. You may also obtain the license terms 
 * directly from RealNetworks.  You may not use this file except in 
 * compliance with the RPSL or, if you have a valid RCSL with RealNetworks 
 * applicable to this file, the RCSL.  Please see the applicable RPSL or 
 * RCSL for the rights, obligations and limitations governing use of the 
 * contents of the file.  
 *  
 * This file is part of the Helix DNA Technology. RealNetworks is the 
 * developer of the Original Code and owns the copyrights in the portions 
 * it created. 
 *  
 * This file, and the files included with this file, is distributed and made 
 * available on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER 
 * EXPRESS OR IMPLIED, AND REALNETWORKS HEREBY DISCLAIMS ALL SUCH WARRANTIES, 
 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, FITNESS
 * FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. 
 * 
 * Technology Compatibility Kit Test Suite(s) Location: 
 *    http://www.helixcommunity.org/content/tck 
 * 
 * Contributor(s): 
 *  
 * ***** END LICENSE BLOCK ***** */ 

/**************************************************************************************
 * Fixed-point MP3 decoder
 * Jon Recker (jrecker@real.com), Ken Cooke (kenc@real.com)
 * June 2003
 *
 * assembly.h - assembly language functions and prototypes for supported platforms
 *
 * - inline rountines with access to 64-bit multiply results 
 * - x86 (_WIN32) and ARM (ARM_ADS, _WIN32_WCE) versions included
 * - some inline functions are mix of asm and C for speed
 * - some functions are in native asm files, so only the prototype is given here
 *
 * MULSHIFT32(x, y)    signed multiply of two 32-bit integers (x and y), returns top 32 bits of 64-bit result
 * FASTABS(x)          branchless absolute value of signed integer x
 * CLZ(x)              count leading zeros in x
 * MADD64(sum, x, y)   (Windows only) sum [64-bit] += x [32-bit] * y [32-bit]
 * SHL64(sum, x, y)    (Windows only) 64-bit left shift using __int64
 * SAR64(sum, x, y)    (Windows only) 64-bit right shift using __int64
 */

#ifndef _ASSEMBLY_H
#define _ASSEMBLY_H

#ifdef __cplusplus
extern "C" {
#endif

#if (defined _WIN32 && !defined _WIN32_WCE) || (defined __WINS__ && defined _SYMBIAN) || defined(_OPENWAVE_SIMULATOR) || defined(WINCE_EMULATOR)    /* Symbian emulator for Ix86 */

#pragma warning( disable : 4035 )	/* complains about inline asm not returning a value */

static __inline int MULSHIFT32(int x, int y)	
{
    __asm {
		mov		eax, x
	    imul	y
	    mov		eax, edx
	}
}

static __inline int FASTABS(int x) 
{
	int sign;

	sign = x >> (sizeof(int) * 8 - 1);
	x ^= sign;
	x -= sign;

	return x;
}

static __inline int CLZ(int x)
{
	int numZeros;

	if (!x)
		return (sizeof(int) * 8);

	numZeros = 0;
	while (!(x & 0x80000000)) {
		numZeros++;
		x <<= 1;
	} 

	return numZeros;
}

/* MADD64, SHL64, SAR64:
 * write in assembly to avoid dependency on run-time lib for 64-bit shifts, muls
 *  (sometimes compiler thunks to function calls instead of code generating)
 * required for Symbian emulator
 */
#ifdef __CW32__
typedef long long Word64;
#else
typedef __int64 Word64;
#endif

static __inline Word64 MADD64(Word64 sum, int x, int y)
{
	unsigned int sumLo = ((unsigned int *)&sum)[0];
	int sumHi = ((int *)&sum)[1];

	__asm {
		mov		eax, x
		imul	y
		add		eax, sumLo
		adc		edx, sumHi
	}

	/* equivalent to return (sum + ((__int64)x * y)); */
}

static __inline Word64 SHL64(Word64 x, int n)
{
	unsigned int xLo = ((unsigned int *)&x)[0];
	int xHi = ((int *)&x)[1];
	unsigned char nb = (unsigned char)n;

	if (n < 32) {
		__asm {
			mov		edx, xHi
			mov		eax, xLo
			mov		cl, nb
			shld    edx, eax, cl
			shl     eax, cl
		}
	} else if (n < 64) {
		/* shl masks cl to 0x1f */
		__asm {
			mov		edx, xLo
			mov		cl, nb
			xor     eax, eax
			shl     edx, cl
		}
	} else {
		__asm {
			xor		edx, edx
			xor		eax, eax
		}
	}
}

static __inline Word64 SAR64(Word64 x, int n)
{
	unsigned int xLo = ((unsigned int *)&x)[0];
	int xHi = ((int *)&x)[1];
	unsigned char nb = (unsigned char)n;

	if (n < 32) {
		__asm {
			mov		edx, xHi
			mov		eax, xLo
			mov		cl, nb
			shrd	eax, edx, cl
			sar		edx, cl
		}
	} else if (n < 64) {
		/* sar masks cl to 0x1f */
		__asm {
			mov		edx, xHi
			mov		eax, xHi
			mov		cl, nb
			sar		edx, 31
			sar		eax, cl
		}
	} else {
		__asm {
			sar		xHi, 31
			mov		eax, xHi
			mov		edx, xHi
		}
	}
}

#elif (defined _WIN32) && (defined _WIN32_WCE)

/* use asm function for now (EVC++ 3.0 does horrible job compiling __int64 version) */
#define MULSHIFT32	xmp3_MULSHIFT32
int MULSHIFT32(int x, int y);

static __inline int FASTABS(int x) 
{
	int sign;

	sign = x >> (sizeof(int) * 8 - 1);
	x ^= sign;
	x -= sign;

	return x;
}

static __inline int CLZ(int x)
{
	int numZeros;

	if (!x)
		return (sizeof(int) * 8);

	numZeros = 0;
	while (!(x & 0x80000000)) {
		numZeros++;
		x <<= 1;
	} 

	return numZeros;
}

#elif defined ARM_ADS

static __inline int MULSHIFT32(int x, int y)
{
    /* important rules for smull RdLo, RdHi, Rm, Rs:
     *     RdHi and Rm can't be the same register
     *     RdLo and Rm can't be the same register
     *     RdHi and RdLo can't be the same register
     * Note: Rs determines early termination (leading sign bits) so if you want to specify
     *   which operand is Rs, put it in the SECOND argument (y)
	 * For inline assembly, x and y are not assumed to be R0, R1 so it shouldn't matter
	 *   which one is returned. (If this were a function call, returning y (R1) would 
	 *   require an extra "mov r0, r1")
     */
    int zlow;
    __asm {
    	smull zlow,y,x,y
   	}

    return y;
}

static __inline int FASTABS(int x) 
{
	int t=0; /*Really is not necessary to initialiaze only to avoid warning*/

	__asm {
		eor	t, x, x, asr #31
		sub	t, t, x, asr #31
	}

	return t;
}

static __inline int CLZ(int x)
{
	int numZeros;

	if (!x)
		return (sizeof(int) * 8);

	numZeros = 0;
	while (!(x & 0x80000000)) {
		numZeros++;
		x <<= 1;
	} 

	return numZeros;
}

#elif defined(__GNUC__) && (defined(ARM) || defined(__ARMEL__)) && (__ARM_ARCH >= 7)

static __inline int MULSHIFT32(int x, int y)
{
    /* important rules for smull RdLo, RdHi, Rm, Rs:
     *     RdHi and Rm can't be the same register
     *     RdLo and Rm can't be the same register
     *     RdHi and RdLo can't be the same register
     * Note: Rs determines early termination (leading sign bits) so if you want to specify
     *   which operand is Rs, put it in the SECOND argument (y)
	 * For inline assembly, x and y are not assumed to be R0, R1 so it shouldn't matter
	 *   which one is returned. (If this were a function call, returning y (R1) would
	 *   require an extra "mov r0, r1")
     */
    int zlow;
    __asm__ volatile ("smull %0,%1,%2,%3" : "=&r" (zlow), "=r" (y) : "r" (x), "1" (y)) ;

    return y;
}

static __inline int FASTABS(int x)
{
	int t=0; /*Really is not necessary to initialiaze only to avoid warning*/

	__asm__ volatile (
		"eor %0,%2,%2, asr #31;"
		"sub %0,%1,%2, asr #31;"
		: "=&r" (t)
		: "0" (t), "r" (x)
	 );

	return t;
}

static __inline int CLZ(int x)
{
	int numZeros;

	if (!x)
		return (sizeof(int) * 8);

	numZeros = 0;
	while (!(x & 0x80000000)) {
		numZeros++;
		x <<= 1;
	}

	return numZeros;
}

typedef signed long long int    Word64;  // 64-bit signed integer.
typedef union _U64 {
        Word64 w64;
        struct {
                /* ARM ADS = little endian */
                unsigned int lo32;
                signed int   hi32;
        } r;
} U64;

static __inline Word64 MADD64(Word64 sum64, int x, int y)
{
        U64 u;
        u.w64 = sum64;

        __asm__ volatile ("smlal %0,%1,%2,%3" : "+&r" (u.r.lo32), "+&r" (u.r.hi32) : "r" (x), "r" (y) : "cc");

        return u.w64;
}

__attribute__((__always_inline__)) static __inline Word64 SAR64(Word64 x, int n)
{
  unsigned int xLo = (unsigned int) x;
  int xHi = (int) (x >> 32);
  int nComp = 32-n;
  int tmp;
  // Shortcut: n is always < 32.
  __asm__ __volatile__( "lsl %2, %0, %3\n\t"  // tmp <- xHi<<(32-n)
                        "asr %0, %0, %4\n\t"  // xHi <- xHi>>n
                        "lsr %1, %1, %4\n\t"  // xLo <- xLo>>n
                        "orr  %1, %2\n\t"      // xLo <= xLo || tmp
                        : "+&r" (xHi), "+r" (xLo), "=&r" (tmp)
                        : "r" (nComp), "r" (n) );
  x = xLo | ((Word64)xHi << 32);
  return( x );
}
#elif defined(__GNUC__) && defined(__AVR32_UC__)

typedef signed long long int    Word64;  // 64-bit signed integer.


__attribute__((__always_inline__)) static __inline int MULSHIFT32(int x, int y)
{
    signed long long int s64Tmp;
    __asm__ __volatile__( "muls.d	%0, %1, %2"
                          : "=r" (s64Tmp)
                          : "r" (x), "r" (y) );
		return( s64Tmp >> 32 );
}

__attribute__((__always_inline__)) static __inline int FASTABS(int x)
{
    int tmp;
    __asm__ __volatile__( "abs %0"
                          : "=r" (tmp)
                          : "r" (x) );
    return tmp; 
    
}


__attribute__((__always_inline__))  static __inline int CLZ(int x)
{
    int tmp;
    __asm__ __volatile__( "clz %0,%1"
                          : "=r" (tmp)
                          : "r" (x) );
    return tmp;
}


/* MADD64, SAR64:
 * write in assembly to avoid dependency on run-time lib for 64-bit shifts, muls
 * (sometimes compiler do function calls instead of code generating)
 */
__attribute__((__always_inline__)) static __inline Word64 MADD64(Word64 sum, int x, int y)
{
  __asm__ __volatile__( "macs.d %0, %1, %2"
                        : "+r" (sum)
                        : "r" (x), "r" (y) );
  return( sum );
}


__attribute__((__always_inline__)) static __inline Word64 SAR64(Word64 x, int n)
{
  unsigned int xLo = (unsigned int) x;
  int xHi = (int) (x >> 32);
  int nComp = 32-n;
  int tmp;
  // Shortcut: n is always < 32. 
  __asm__ __volatile__( "lsl %2, %0, %3\n\t"  // tmp <- xHi<<(32-n)
                        "asr %0, %0, %4\n\t"  // xHi <- xHi>>n
                        "lsr %1, %1, %4\n\t"  // xLo <- xLo>>n
                        "or  %1, %2\n\t"      // xLo <= xLo || tmp
                        : "+&r" (xHi), "+r" (xLo), "=&r" (tmp)
                        : "r" (nComp), "r" (n) );
  x = xLo | ((Word64)xHi << 32);
  return( x );
}

#elif (defined(__CORTEX_M) && __CORTEX_M == 0x04U) || defined(__MK66FX1M0__) || defined(__MK64FX512__) || defined(__MK20DX256__)	/* teensy 3.6, 3.5, or 3.1/2 */

/* ARM cortex m4 */

typedef signed long long int    Word64;  // 64-bit signed integer.


static __inline int MULSHIFT32(int x, int y)
{
    /* important rules for smull RdLo, RdHi, Rm, Rs:
     *     RdHi and Rm can't be the same register
     *     RdLo and Rm can't be the same register
     *     RdHi and RdLo can't be the same register
     * Note: Rs determines early termination (leading sign bits) so if you want to specify
     *   which operand is Rs, put it in the SECOND argument (y)
	 * For inline assembly, x and y are not assumed to be R0, R1 so it shouldn't matter
	 *   which one is returned. (If this were a function call, returning y (R1) would
	 *   require an extra "mov r0, r1")
     */
    int zlow;
    __asm__ volatile ("smull %0,%1,%2,%3" : "=&r" (zlow), "=r" (y) : "r" (x), "1" (y)) ;

    return y;
}

static __inline int FASTABS(int x)
{
        int sign;

        sign = x >> (sizeof(int) * 8 - 1);
        x ^= sign;
        x -= sign;

        return x;
}

static __inline int CLZ(int x)
{
#if defined(__MK66FX1M0__) || defined(__MK64FX512__) || defined(__MK20DX256__)	/* teensy 3.6, 3.5, or 3.1/2 */
	return __builtin_clz(x);
#else
	return __CLZ(x);
#endif
}

typedef union _U64 {
        Word64 w64;
        struct {
                /* ARM ADS = little endian */
                unsigned int lo32;
                signed int   hi32;
        } r;
} U64;

static __inline Word64 MADD64(Word64 sum64, int x, int y)
{
        U64 u;
        u.w64 = sum64;
        
        __asm__ volatile ("smlal %0,%1,%2,%3" : "+&r" (u.r.lo32), "+&r" (u.r.hi32) : "r" (x), "r" (y) : "cc");
        
        return u.w64;
}


__attribute__((__always_inline__)) static __inline Word64 SAR64(Word64 x, int n)
{
  unsigned int xLo = (unsigned int) x;
  int xHi = (int) (x >> 32);
  int nComp = 32-n;
  int tmp;
  // Shortcut: n is always < 32. 
  __asm__ __volatile__( "lsl %2, %0, %3\n\t"  // tmp <- xHi<<(32-n)
                        "asr %0, %0, %4\n\t"  // xHi <- xHi>>n
                        "lsr %1, %1, %4\n\t"  // xLo <- xLo>>n
                        "orr  %1, %2\n\t"      // xLo <= xLo || tmp
                        : "+&r" (xHi), "+r" (xLo), "=&r" (tmp)
                        : "r" (nComp), "r" (n) );
  x = xLo | ((Word64)xHi << 32);
  return( x );
}

//END cortex m4


#else

#include <stdint.h>

typedef int64_t Word64;

static inline int MULSHIFT32(int x, int y) {
    return ((int64_t)x * y) >> 32;
}

static inline int FASTABS(int x) {
    int sign = x >> 31;
    return (x ^ sign) - sign;
}

static inline int CLZ(int x) {
    return __builtin_clz(x);
}

static inline Word64 MADD64(Word64 sum, int x, int y) {
    return sum + (int64_t)x * y;
}

static inline Word64 SHL64(Word64 x, int n) {
    return x << n;
}

static inline Word64 SAR64(Word64 x, int n) {
    return x >> n;
}

static inline short SAR64_Clip(Word64 x) {
    return SAR64(x, 26);
}

#endif	/* platforms */

#ifdef __cplusplus
}
#endif
#endif /* _ASSEMBLY_H */