1 << (SBC_PROTO_FIXED4_SCALE - 1),
1 << (SBC_PROTO_FIXED4_SCALE - 1),
};
- asm volatile (
+ __asm__ volatile (
"movq (%0), %%mm0\n"
"movq 8(%0), %%mm1\n"
"pmaddwd (%1), %%mm0\n"
1 << (SBC_PROTO_FIXED8_SCALE - 1),
1 << (SBC_PROTO_FIXED8_SCALE - 1),
};
- asm volatile (
+ __asm__ volatile (
"movq (%0), %%mm0\n"
"movq 8(%0), %%mm1\n"
"movq 16(%0), %%mm2\n"
out += out_stride;
sbc_analyze_four_mmx(x + 0, out, analysis_consts_fixed4_simd_even);
- asm volatile ("emms\n");
+ __asm__ volatile ("emms\n");
}
static inline void sbc_analyze_4b_8s_mmx(int16_t *x, int32_t *out,
out += out_stride;
sbc_analyze_eight_mmx(x + 0, out, analysis_consts_fixed8_simd_even);
- asm volatile ("emms\n");
+ __asm__ volatile ("emms\n");
}
static void sbc_calc_scalefactors_mmx(
for (sb = 0; sb < subbands; sb += 2) {
blk = (blocks - 1) * (((char *) &sb_sample_f[1][0][0] -
(char *) &sb_sample_f[0][0][0]));
- asm volatile (
+ __asm__ volatile (
"movq (%4), %%mm0\n"
"1:\n"
"movq (%1, %0), %%mm1\n"
: "cc", "memory");
}
}
- asm volatile ("emms\n");
+ __asm__ volatile ("emms\n");
}
static int check_mmx_support(void)
return 1; /* We assume that all 64-bit processors have MMX support */
#else
int cpuid_feature_information;
- asm volatile (
+ __asm__ volatile (
/* According to Intel manual, CPUID instruction is supported
* if the value of ID bit (bit 21) in EFLAGS can be modified */
"pushf\n"
/* TODO: merge even and odd cases (or even merge all four calls to this
* function) in order to have only aligned reads from 'in' array
* and reduce number of load instructions */
- asm volatile (
+ __asm__ volatile (
"vld1.16 {d4, d5}, [%0, :64]!\n"
"vld1.16 {d8, d9}, [%1, :128]!\n"
/* TODO: merge even and odd cases (or even merge all four calls to this
* function) in order to have only aligned reads from 'in' array
* and reduce number of load instructions */
- asm volatile (
+ __asm__ volatile (
"vld1.16 {d4, d5}, [%0, :64]!\n"
"vld1.16 {d8, d9}, [%1, :128]!\n"
for (sb = 0; sb < subbands; sb += 4) {
int blk = blocks;
int32_t *in = &sb_sample_f[0][ch][sb];
- asm volatile (
+ __asm__ volatile (
"vmov.s32 q0, #0\n"
"vmov.s32 q1, %[c1]\n"
"vmov.s32 q14, #1\n"
i = subbands;
- asm volatile (
+ __asm__ volatile (
/*
* constants: q13 = (31 - SCALE_OUT_BITS), q14 = 1
* input: q0 = ((1 << SCALE_OUT_BITS) + 1)
if (position < nsamples) {
int16_t *dst = &X[0][SBC_X_BUFFER_SIZE - 40];
int16_t *src = &X[0][position];
- asm volatile (
+ __asm__ volatile (
"vld1.16 {d0, d1, d2, d3}, [%[src], :128]!\n"
"vst1.16 {d0, d1, d2, d3}, [%[dst], :128]!\n"
"vld1.16 {d0, d1, d2, d3}, [%[src], :128]!\n"
if (nchannels > 1) {
dst = &X[1][SBC_X_BUFFER_SIZE - 40];
src = &X[1][position];
- asm volatile (
+ __asm__ volatile (
"vld1.16 {d0, d1, d2, d3}, [%[src], :128]!\n"
"vst1.16 {d0, d1, d2, d3}, [%[dst], :128]!\n"
"vld1.16 {d0, d1, d2, d3}, [%[src], :128]!\n"
/* poor 'pcm' alignment */
int16_t *x = &X[0][position];
int16_t *y = &X[1][position];
- asm volatile (
+ __asm__ volatile (
"vld1.8 {d0, d1}, [%[perm], :128]\n"
"1:\n"
"sub %[x], %[x], #16\n"
/* proper 'pcm' alignment */
int16_t *x = &X[0][position];
int16_t *y = &X[1][position];
- asm volatile (
+ __asm__ volatile (
"vld1.8 {d0, d1}, [%[perm], :128]\n"
"1:\n"
"sub %[x], %[x], #16\n"
"d20", "d21", "d22", "d23");
} else {
int16_t *x = &X[0][position];
- asm volatile (
+ __asm__ volatile (
"vld1.8 {d0, d1}, [%[perm], :128]\n"
"1:\n"
"sub %[x], %[x], #16\n"
if (position < nsamples) {
int16_t *dst = &X[0][SBC_X_BUFFER_SIZE - 72];
int16_t *src = &X[0][position];
- asm volatile (
+ __asm__ volatile (
"vld1.16 {d0, d1, d2, d3}, [%[src], :128]!\n"
"vst1.16 {d0, d1, d2, d3}, [%[dst], :128]!\n"
"vld1.16 {d0, d1, d2, d3}, [%[src], :128]!\n"
if (nchannels > 1) {
dst = &X[1][SBC_X_BUFFER_SIZE - 72];
src = &X[1][position];
- asm volatile (
+ __asm__ volatile (
"vld1.16 {d0, d1, d2, d3}, [%[src], :128]!\n"
"vst1.16 {d0, d1, d2, d3}, [%[dst], :128]!\n"
"vld1.16 {d0, d1, d2, d3}, [%[src], :128]!\n"
/* poor 'pcm' alignment */
int16_t *x = &X[0][position];
int16_t *y = &X[1][position];
- asm volatile (
+ __asm__ volatile (
"vld1.8 {d0, d1, d2, d3}, [%[perm], :128]\n"
"1:\n"
"sub %[x], %[x], #32\n"
/* proper 'pcm' alignment */
int16_t *x = &X[0][position];
int16_t *y = &X[1][position];
- asm volatile (
+ __asm__ volatile (
"vld1.8 {d0, d1, d2, d3}, [%[perm], :128]\n"
"1:\n"
"sub %[x], %[x], #32\n"
"d20", "d21", "d22", "d23");
} else {
int16_t *x = &X[0][position];
- asm volatile (
+ __asm__ volatile (
"vld1.8 {d0, d1, d2, d3}, [%[perm], :128]\n"
"1:\n"
"sub %[x], %[x], #32\n"