Commit 7156ccb8 authored by Raymond Knopp's avatar Raymond Knopp

ulsim compiles, dlsim mostly done

parent 9d287e2d
......@@ -127,7 +127,11 @@ add_list_string_option(CMAKE_BUILD_TYPE "RelWithDebInfo" "Choose the type of bui
Message("Architecture is ${CMAKE_SYSTEM_PROCESSOR}")
if (CMAKE_SYSTEM_PROCESSOR STREQUAL "armv7l")
set(C_FLAGS_PROCESSOR "-gdwarf-2 -mfloat-abi=hard -mfpu=neon -lgcc -lrt")
else (CMAKE_SYSTEM_PROCESSOR STREQUAL "armv7l")
set(COMPILATION_AVX2 "False")
elseif (CMAKE_SYSTEM_PROCESSOR STREQUAL "aarch64")
set(C_FLAGS_PROCESSOR "-gdwarf-2 -lgcc -lrt")
set(COMPILATION_AVX2 "False")
else (CMAKE_SYSTEM_PROCESSOR STREQUAL CMAKE_SYSTEM_PROCESSOR STREQUAL "aarch64")
if(EXISTS "/proc/cpuinfo")
file(STRINGS "/proc/cpuinfo" CPUINFO REGEX flags LIMIT_COUNT 1)
if (CPUINFO MATCHES "avx2")
......
......@@ -62,7 +62,7 @@ struct treillis {
int exit_state;
} __attribute__ ((aligned(64)));
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
struct treillis {
union {
......@@ -192,7 +192,7 @@ char interleave_compact_byte(short * base_interleaver,unsigned char * input, uns
0b01000000,
0b10000000);
#endif
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
uint8x16_t *i_128=(uint8x16_t *)input, *o_128=(uint8x16_t *)expandInput;
uint8x16_t tmp1,tmp2;
uint16x8_t tmp3;
......@@ -335,7 +335,7 @@ char interleave_compact_byte(short * base_interleaver,unsigned char * input, uns
o_256+=8;
#endif
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
tmp1=vld1q_u8((uint8_t*)i_128);
//print_bytes("tmp1:",(uint8_t*)&tmp1);
......@@ -421,7 +421,7 @@ char interleave_compact_byte(short * base_interleaver,unsigned char * input, uns
__m256i tmp;
uint32_t *systematic2_ptr=(uint32_t *) output;
#endif
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
uint8x16_t tmp;
const uint8_t __attribute__ ((aligned (16))) _Powers[16]=
{ 1, 2, 4, 8, 16, 32, 64, 128, 1, 2, 4, 8, 16, 32, 64, 128 };
......@@ -497,7 +497,7 @@ char interleave_compact_byte(short * base_interleaver,unsigned char * input, uns
*systematic2_ptr++=(unsigned int)_mm256_movemask_epi8(tmp);
#endif
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
tmp=vsetq_lane_u8(expandInput[*ptr_intl++],tmp,7);
tmp=vsetq_lane_u8(expandInput[*ptr_intl++],tmp,6);
tmp=vsetq_lane_u8(expandInput[*ptr_intl++],tmp,5);
......@@ -571,7 +571,7 @@ void threegpplte_turbo_encoder_sse(unsigned char *input,
#if defined(__x86_64__) || defined(__i386__)
__m64 *ptr_output=(__m64*) output;
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
uint8x8_t *ptr_output=(uint8x8_t*)output;
#endif
unsigned char cur_s1, cur_s2;
......@@ -593,7 +593,7 @@ void threegpplte_turbo_encoder_sse(unsigned char *input,
all_treillis[state1][cur_s2].parity2_64[code_rate]);
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
*ptr_output++ = vadd_u8(all_treillis[state0][cur_s1].systematic_andp1_64[code_rate],
all_treillis[state0][cur_s1].parity2_64[code_rate]);
#endif
......
......@@ -152,7 +152,7 @@ void compute_gamma8(llr_t* m11,llr_t* m10,llr_t* systematic,channel_t* y_parity,
__m128i *y_parity128 = (__m128i *)y_parity;
__m128i *m10_128 = (__m128i *)m10;
__m128i *m11_128 = (__m128i *)m11;
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
int8x16_t *systematic128 = (int8x16_t *)systematic;
int8x16_t *y_parity128 = (int8x16_t *)y_parity;
int8x16_t *m10_128 = (int8x16_t *)m10;
......@@ -178,7 +178,7 @@ void compute_gamma8(llr_t* m11,llr_t* m10,llr_t* systematic,channel_t* y_parity,
_mm_srai_epi16(_mm_adds_epi16(sh,yph),1));
m10_128[k] = _mm_packs_epi16(_mm_srai_epi16(_mm_subs_epi16(sl,ypl),1),
_mm_srai_epi16(_mm_subs_epi16(sh,yph),1));
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
m11_128[k] = vhaddq_s8(systematic128[k],y_parity128[k]);
m10_128[k] = vhsubq_s8(systematic128[k],y_parity128[k]);
#endif
......@@ -196,7 +196,7 @@ void compute_gamma8(llr_t* m11,llr_t* m10,llr_t* systematic,channel_t* y_parity,
_mm_srai_epi16(_mm_adds_epi16(sh,yph),1));
m10_128[k] = _mm_packs_epi16(_mm_srai_epi16(_mm_subs_epi16(sl,ypl),1),
_mm_srai_epi16(_mm_subs_epi16(sh,yph),1));
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
m11_128[k] = vhaddq_s8(systematic128[k+term_flag],y_parity128[k]);
m10_128[k] = vhsubq_s8(systematic128[k+term_flag],y_parity128[k]);
#endif
......@@ -215,7 +215,7 @@ void compute_alpha8(llr_t* alpha,llr_t* beta,llr_t* m_11,llr_t* m_10,unsigned sh
__m128i m_b0,m_b1,m_b2,m_b3,m_b4,m_b5,m_b6,m_b7;
__m128i new0,new1,new2,new3,new4,new5,new6,new7;
__m128i alpha_max;
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
int8x16_t *alpha128=(int8x16_t *)alpha,*alpha_ptr;
int8x16_t *m11p,*m10p;
int8x16_t m_b0,m_b1,m_b2,m_b3,m_b4,m_b5,m_b6,m_b7;
......@@ -311,7 +311,7 @@ void compute_alpha8(llr_t* alpha,llr_t* beta,llr_t* m_11,llr_t* m_10,unsigned sh
alpha[112] = -MAX8/2;
}
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
alpha128[0] = vdupq_n_s8(-MAX8/2);
alpha128[0] = vsetq_lane_s8(0,alpha128[0],0);
alpha128[1] = vdupq_n_s8(-MAX8/2);
......@@ -415,7 +415,7 @@ void compute_beta8(llr_t* alpha,llr_t* beta,llr_t *m_11,llr_t* m_10,unsigned sho
__m128i *beta128,*alpha128,*beta_ptr;
__m128i beta_max;
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
int8x16_t m11_128,m10_128;
int8x16_t m_b0,m_b1,m_b2,m_b3,m_b4,m_b5,m_b6,m_b7;
int8x16_t new0,new1,new2,new3,new4,new5,new6,new7;
......@@ -499,7 +499,7 @@ void compute_beta8(llr_t* alpha,llr_t* beta,llr_t *m_11,llr_t* m_10,unsigned sho
#if defined(__x86_64__) || defined(__i386__)
beta_ptr = (__m128i*)&beta[frame_length<<3];
alpha128 = (__m128i*)&alpha[0];
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
beta_ptr = (int8x16_t*)&beta[frame_length<<3];
alpha128 = (int8x16_t*)&alpha[0];
#endif
......@@ -532,7 +532,7 @@ void compute_beta8(llr_t* alpha,llr_t* beta,llr_t *m_11,llr_t* m_10,unsigned sho
beta_ptr[5] = _mm_insert_epi8(beta_ptr[5],beta5,15);
beta_ptr[6] = _mm_insert_epi8(beta_ptr[6],beta6,15);
beta_ptr[7] = _mm_insert_epi8(beta_ptr[7],beta7,15);
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
beta_ptr[0] = vsetq_lane_s8(beta0,beta_ptr[0],15);
beta_ptr[1] = vsetq_lane_s8(beta1,beta_ptr[1],15);
beta_ptr[2] = vsetq_lane_s8(beta2,beta_ptr[2],15);
......@@ -546,7 +546,7 @@ void compute_beta8(llr_t* alpha,llr_t* beta,llr_t *m_11,llr_t* m_10,unsigned sho
#if defined(__x86_64__) || defined(__i386__)
beta_ptr = (__m128i*)&beta[frame_length<<3];
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
beta_ptr = (int8x16_t*)&beta[frame_length<<3];
#endif
for (k=(frame_length>>4)-1;
......@@ -600,7 +600,7 @@ void compute_beta8(llr_t* alpha,llr_t* beta,llr_t *m_11,llr_t* m_10,unsigned sho
beta_ptr[5] = _mm_subs_epi8(beta_ptr[5],beta_max);
beta_ptr[6] = _mm_subs_epi8(beta_ptr[6],beta_max);
beta_ptr[7] = _mm_subs_epi8(beta_ptr[7],beta_max);
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
m11_128=((int8x16_t*)m_11)[k];
m10_128=((int8x16_t*)m_10)[k];
m_b0 = vqaddq_s8(beta_ptr[4],m11_128); //m11
......@@ -666,7 +666,7 @@ void compute_beta8(llr_t* alpha,llr_t* beta,llr_t *m_11,llr_t* m_10,unsigned sho
beta_ptr[5] = _mm_srli_si128(beta128[5],1);
beta_ptr[6] = _mm_srli_si128(beta128[6],1);
beta_ptr[7] = _mm_srli_si128(beta128[7],1);
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
beta128 = (int8x16_t*)&beta[0];
beta_ptr = (int8x16_t*)&beta[frame_length<<3];
beta_ptr[0] = (int8x16_t)vshrq_n_s64((int64x2_t)beta128[0],8); beta_ptr[0] = vsetq_lane_s8(beta[7],beta_ptr[0],8);
......@@ -693,7 +693,7 @@ void compute_ext8(llr_t* alpha,llr_t* beta,llr_t* m_11,llr_t* m_10,llr_t* ext, l
__m128i m01_1,m01_2,m01_3,m01_4;
__m128i m10_1,m10_2,m10_3,m10_4;
__m128i m11_1,m11_2,m11_3,m11_4;
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
int8x16_t *alpha128=(int8x16_t *)alpha;
int8x16_t *beta128=(int8x16_t *)beta;
int8x16_t *m11_128,*m10_128,*ext_128;
......@@ -770,7 +770,7 @@ void compute_ext8(llr_t* alpha,llr_t* beta,llr_t* m_11,llr_t* m_10,llr_t* ext, l
alpha_ptr+=8;
beta_ptr+=8;
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
m11_128 = (int8x16_t*)&m_11[k<<4];
m10_128 = (int8x16_t*)&m_10[k<<4];
......@@ -952,7 +952,7 @@ unsigned char phy_threegpplte_turbo_decoder8(short *y,
__m128i *yp128;
__m128i tmp128[(n+8)>>3];
__m128i tmp, zeros=_mm_setzero_si128();
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
int8x16_t *yp128;
int8x16_t tmp128[(n+8)>>3];
int8x16_t tmp, zeros=vdupq_n_s8(0);
......@@ -1039,7 +1039,7 @@ unsigned char phy_threegpplte_turbo_decoder8(short *y,
yp128 = (__m128i*)y8;
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
int32x4_t avg=vdupq_n_s32(0);
......@@ -1188,7 +1188,7 @@ unsigned char phy_threegpplte_turbo_decoder8(short *y,
yp2[j] = _mm_extract_epi8(yp128[2],15);
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
s[j] = vgetq_lane_s8(yp128[0],0);
yp1[j] = vgetq_lane_s8(yp128[0],1);
yp2[j] = vgetq_lane_s8(yp128[0],2);
......@@ -1367,7 +1367,7 @@ unsigned char phy_threegpplte_turbo_decoder8(short *y,
tmp=_mm_insert_epi8(tmp,((llr_t*)ext)[*pi4_p++],13);
tmp=_mm_insert_epi8(tmp,((llr_t*)ext)[*pi4_p++],14);
((__m128i *)systematic2)[i]=_mm_insert_epi8(tmp,((llr_t*)ext)[*pi4_p++],15);
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
tmp=vsetq_lane_s8(((llr_t*)ext)[*pi4_p++],tmp,0);
tmp=vsetq_lane_s8(((llr_t*)ext)[*pi4_p++],tmp,1);
tmp=vsetq_lane_s8(((llr_t*)ext)[*pi4_p++],tmp,2);
......@@ -1419,7 +1419,7 @@ unsigned char phy_threegpplte_turbo_decoder8(short *y,
tmp=_mm_insert_epi8(tmp,ext2[*pi5_p++],15);
decoded_bytes_interl[i]=(uint16_t) _mm_movemask_epi8(_mm_cmpgt_epi8(tmp,zeros));
((__m128i *)systematic1)[i] = _mm_adds_epi8(_mm_subs_epi8(tmp,((__m128i*)ext)[i]),((__m128i *)systematic0)[i]);
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
tmp=vsetq_lane_s8(ext2[*pi5_p++],tmp,0);
tmp=vsetq_lane_s8(ext2[*pi5_p++],tmp,1);
tmp=vsetq_lane_s8(ext2[*pi5_p++],tmp,2);
......@@ -1465,7 +1465,7 @@ unsigned char phy_threegpplte_turbo_decoder8(short *y,
tmp128[i] = _mm_adds_epi8(((__m128i *)ext2)[i],((__m128i *)systematic2)[i]);
((__m128i *)systematic1)[i] = _mm_adds_epi8(_mm_subs_epi8(tmp,((__m128i*)ext)[i]),((__m128i *)systematic0)[i]);
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
tmp=vsetq_lane_s8(ext2[*pi5_p++],tmp,0);
tmp=vsetq_lane_s8(ext2[*pi5_p++],tmp,1);
tmp=vsetq_lane_s8(ext2[*pi5_p++],tmp,2);
......@@ -1523,7 +1523,7 @@ unsigned char phy_threegpplte_turbo_decoder8(short *y,
decoded_bytes[n_128*j +i]=(uint8_t) _mm_movemask_epi8(_mm_packs_epi16(tmp2,zeros));
}
}
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
uint8x16_t* dbytes=(uint8x16_t*)decoded_bytes_interl;
uint16x8_t mask __attribute__((aligned(16)));
int n_128=n2>>7;
......@@ -1566,7 +1566,7 @@ unsigned char phy_threegpplte_turbo_decoder8(short *y,
tmp=_mm_insert_epi8(tmp, ((llr_t *)tmp128)[*pi6_p++],8);
tmp=_mm_cmpgt_epi8(tmp,zeros);
((uint16_t *)decoded_bytes)[i]=(uint16_t)_mm_movemask_epi8(tmp);
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
tmp=vsetq_lane_s8(((llr_t *)tmp128)[*pi6_p++],tmp,7);
tmp=vsetq_lane_s8(((llr_t *)tmp128)[*pi6_p++],tmp,6);
tmp=vsetq_lane_s8(((llr_t *)tmp128)[*pi6_p++],tmp,5);
......@@ -1645,7 +1645,7 @@ unsigned char phy_threegpplte_turbo_decoder8(short *y,
__m128i* ext_128=(__m128i*) ext;
__m128i* s1_128=(__m128i*) systematic1;
__m128i* s0_128=(__m128i*) systematic0;
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
int8x16_t* ext_128=(int8x16_t*) ext;
int8x16_t* s1_128=(int8x16_t*) systematic1;
int8x16_t* s0_128=(int8x16_t*) systematic0;
......@@ -1655,7 +1655,7 @@ unsigned char phy_threegpplte_turbo_decoder8(short *y,
for (i=0; i<myloop; i++) {
#if defined(__x86_64__) || defined(__i386__)
*ext_128=_mm_adds_epi8(_mm_subs_epi8(*ext_128,*s1_128++),*s0_128++);
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
*ext_128=vqaddq_s8(vqsubq_s8(*ext_128,*s1_128++),*s0_128++);
#endif
ext_128++;
......
......@@ -193,7 +193,7 @@ void phy_viterbi_dot11_sse2(char *y,unsigned char *decoded_bytes,unsigned short
__m128i *m0_ptr,*m1_ptr,*TB_ptr = &TB[offset<<2];
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
uint8x16x2_t TB[2*4095*8]; // 2 int8x16_t per input bit, 8 bits / byte, 4095 is largest packet size in bytes
uint8x16_t even0_30a,even0_30b,even32_62a,even32_62b,odd1_31a,odd1_31b,odd33_63a,odd33_63b,TBeven0_30,TBeven32_62,TBodd1_31,TBodd33_63;
......@@ -224,7 +224,7 @@ void phy_viterbi_dot11_sse2(char *y,unsigned char *decoded_bytes,unsigned short
metrics48_63 = _mm_setzero_si128();
}
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
if (offset == 0) {
// set initial metrics
......@@ -318,7 +318,7 @@ void phy_viterbi_dot11_sse2(char *y,unsigned char *decoded_bytes,unsigned short
metrics16_31 = _mm_subs_epu8(metrics16_31,min_state);
metrics32_47 = _mm_subs_epu8(metrics32_47,min_state);
metrics48_63 = _mm_subs_epu8(metrics48_63,min_state);
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
m0_ptr = (uint8x16_t *)&m0_table[table_offset];
m1_ptr = (uint8x16_t *)&m1_table[table_offset];
......
......@@ -139,7 +139,7 @@ void phy_viterbi_lte_sse2(int8_t *y,uint8_t *decoded_bytes,uint16_t n)
__m128i min_state,min_state2;
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
uint8x16x2_t TB[2*8192]; // 2 int8x16_t per input bit, 8 bits / byte, 8192 is largest packet size in bits
uint8x16_t even0_30a,even0_30b,even32_62a,even32_62b,odd1_31a,odd1_31b,odd33_63a,odd33_63b,TBeven0_30,TBeven32_62,TBodd1_31,TBodd33_63;
......@@ -168,7 +168,7 @@ void phy_viterbi_lte_sse2(int8_t *y,uint8_t *decoded_bytes,uint16_t n)
metrics16_31 = _mm_setzero_si128();
metrics32_47 = _mm_setzero_si128();
metrics48_63 = _mm_setzero_si128();
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
metrics0_31.val[0] = vdupq_n_u8(0);
metrics0_31.val[1] = vdupq_n_u8(0);
metrics32_63.val[0] = vdupq_n_u8(0);
......@@ -262,7 +262,7 @@ void phy_viterbi_lte_sse2(int8_t *y,uint8_t *decoded_bytes,uint16_t n)
metrics16_31 = _mm_subs_epu8(metrics16_31,min_state);
metrics32_47 = _mm_subs_epu8(metrics32_47,min_state);
metrics48_63 = _mm_subs_epu8(metrics48_63,min_state);
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
m0_ptr = (uint8x16_t *)&m0_table[table_offset];
m1_ptr = (uint8x16_t *)&m1_table[table_offset];
......@@ -356,7 +356,7 @@ void phy_viterbi_lte_sse2(int8_t *y,uint8_t *decoded_bytes,uint16_t n)
}
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
for (s=0; s<16; s++)
if (((uint8_t *)&metrics0_31.val[0])[s] > maxm) {
maxm = ((uint8_t *)&metrics0_31.val[0])[s];
......
......@@ -297,7 +297,7 @@ void freq_equalization(LTE_DL_FRAME_PARMS *frame_parms,
rxdataF_comp128 = (__m128i *)&rxdataF_comp[0][symbol*frame_parms->N_RB_DL*12];
ul_ch_mag128 = (__m128i *)&ul_ch_mag[0][symbol*frame_parms->N_RB_DL*12];
ul_ch_magb128 = (__m128i *)&ul_ch_magb[0][symbol*frame_parms->N_RB_DL*12];
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
int16x8_t *ul_ch_mag128,*ul_ch_magb128,*rxdataF_comp128;
rxdataF_comp128 = (int16x8_t*)&rxdataF_comp[0][symbol*frame_parms->N_RB_DL*12];
ul_ch_mag128 = (int16x8_t*)&ul_ch_mag[0][symbol*frame_parms->N_RB_DL*12];
......@@ -326,7 +326,7 @@ void freq_equalization(LTE_DL_FRAME_PARMS *frame_parms,
ul_ch_mag128[re] = _mm_set1_epi16(316); // this is 512*4/sqrt(42)
ul_ch_magb128[re] = _mm_set1_epi16(158); // this is 512*2/sqrt(42)
}
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
rxdataF_comp128[re] = vmulq_s16(rxdataF_comp128[re],*((int16x8_t *)&inv_ch[8*amp]));
if (Qm==4)
......
......@@ -30,7 +30,7 @@
#if defined(__x86_64__) || defined(__i386__)
__m128i avg128F;
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
int32x4_t avg128F;
#endif
......@@ -42,7 +42,7 @@ int dl_channel_level(int16_t *dl_ch,
int16_t rb;
#if defined(__x86_64__) || defined(__i386__)
__m128i *dl_ch128;
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
int16x4_t *dl_ch128;
#endif
int avg;
......@@ -61,7 +61,7 @@ int dl_channel_level(int16_t *dl_ch,
dl_ch128+=3;
}
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
avg128F = vdupq_n_s32(0);
dl_ch128=(int16x4_t *)dl_ch;
......
......@@ -469,7 +469,7 @@ void lte_ue_measurements(PHY_VARS_UE *ue,
unsigned int limit,subband;
#if defined(__x86_64__) || defined(__i386__)
__m128i *dl_ch0_128,*dl_ch1_128;
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
int16x8_t *dl_ch0_128, *dl_ch1_128;
#endif
int *dl_ch0,*dl_ch1;
......@@ -668,7 +668,7 @@ void lte_ue_measurements(PHY_VARS_UE *ue,
dl_ch0_128 = (__m128i *)&ue->common_vars.common_vars_rx_data_per_thread[ue->current_thread_id[subframe]].dl_ch_estimates[eNB_id][aarx][4];
dl_ch1_128 = (__m128i *)&ue->common_vars.common_vars_rx_data_per_thread[ue->current_thread_id[subframe]].dl_ch_estimates[eNB_id][2+aarx][4];
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
int32x4_t pmi128_re,pmi128_im,mmtmpPMI0,mmtmpPMI1,mmtmpPMI0b,mmtmpPMI1b;
dl_ch0_128 = (int16x8_t *)&ue->common_vars.common_vars_rx_data_per_thread[ue->current_thread_id[subframe]].dl_ch_estimates[eNB_id][aarx][4];
......@@ -683,7 +683,7 @@ void lte_ue_measurements(PHY_VARS_UE *ue,
pmi128_re = _mm_xor_si128(pmi128_re,pmi128_re);
pmi128_im = _mm_xor_si128(pmi128_im,pmi128_im);
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
pmi128_re = vdupq_n_s32(0);
pmi128_im = vdupq_n_s32(0);
......@@ -741,7 +741,7 @@ void lte_ue_measurements(PHY_VARS_UE *ue,
pmi128_im = _mm_add_epi32(pmi128_im,mmtmpPMI1);
//print_ints(" pmi128_im 1 ",&pmi128_im);*/
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
mmtmpPMI0 = vmull_s16(((int16x4_t*)dl_ch0_128)[0], ((int16x4_t*)dl_ch1_128)[0]);
mmtmpPMI1 = vmull_s16(((int16x4_t*)dl_ch0_128)[1], ((int16x4_t*)dl_ch1_128)[1]);
......@@ -1066,6 +1066,7 @@ void conjch0_mult_ch1(int *ch0,
{
//This function is used to compute multiplications in Hhermitian * H matrix
unsigned short rb;
#if defined(__x86_64__) || defined(__i386__)
__m128i *dl_ch0_128,*dl_ch1_128, *ch0conj_ch1_128, mmtmpD0,mmtmpD1,mmtmpD2,mmtmpD3;
dl_ch0_128 = (__m128i *)ch0;
......@@ -1100,6 +1101,10 @@ void conjch0_mult_ch1(int *ch0,
}
_mm_empty();
_m_empty();
#else
AssertFatal(1==0,"To be done for ARM\n");
#endif
}
void construct_HhH_elements(int *ch0conj_ch0, //00_00
......@@ -1116,6 +1121,8 @@ void construct_HhH_elements(int *ch0conj_ch0, //00_00
int32_t *after_mf_11,
unsigned short nb_rb)
{
#if defined(__x86_64__) || defined(__i386__)
unsigned short rb;
__m128i *ch0conj_ch0_128, *ch1conj_ch1_128, *ch2conj_ch2_128, *ch3conj_ch3_128;
__m128i *ch0conj_ch1_128, *ch1conj_ch0_128, *ch2conj_ch3_128, *ch3conj_ch2_128;
......@@ -1173,6 +1180,10 @@ void construct_HhH_elements(int *ch0conj_ch0, //00_00
}
_mm_empty();
_m_empty();
#else
AssertFatal(1==0,"To be done for ARM\n");
#endif
}
......@@ -1180,6 +1191,7 @@ void squared_matrix_element(int32_t *Hh_h_00,
int32_t *Hh_h_00_sq,
unsigned short nb_rb)
{
#if defined(__x86_64__) || defined(__i386__)
unsigned short rb;
__m128i *Hh_h_00_128,*Hh_h_00_sq_128;
......@@ -1201,6 +1213,10 @@ void squared_matrix_element(int32_t *Hh_h_00,
}
_mm_empty();
_m_empty();
#else
AssertFatal(1==0,"To be done for ARM\n");
#endif
}
......@@ -1213,6 +1229,8 @@ void det_HhH(int32_t *after_mf_00,
unsigned short nb_rb)
{
#if defined(__x86_64__) || defined(__i386__)
unsigned short rb;
__m128i *after_mf_00_128,*after_mf_01_128, *after_mf_10_128, *after_mf_11_128, ad_re_128, bc_re_128;
__m128i *det_fin_128, det_128;
......@@ -1250,6 +1268,11 @@ void det_HhH(int32_t *after_mf_00,
}
_mm_empty();
_m_empty();
#else
AssertFatal(1==0,"To be done for ARM\n");
#endif
}
void numer(int32_t *Hh_h_00_sq,
......@@ -1260,6 +1283,8 @@ void numer(int32_t *Hh_h_00_sq,
unsigned short nb_rb)
{
#if defined(__x86_64__) || defined(__i386__)
unsigned short rb;
__m128i *h_h_00_sq_128, *h_h_01_sq_128, *h_h_10_sq_128, *h_h_11_sq_128;
__m128i *num_fin_128, sq_a_plus_sq_d_128, sq_b_plus_sq_c_128;
......@@ -1296,6 +1321,11 @@ void numer(int32_t *Hh_h_00_sq,
}
_mm_empty();
_m_empty();
#else
AssertFatal(1==0,"To be done for ARM\n");
#endif
}
......@@ -1379,7 +1409,7 @@ void dlsch_channel_level_TM34_meas(int *ch00,
_mm_empty();
_m_empty();
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
#endif
}
......@@ -78,7 +78,7 @@ int32_t lte_ul_channel_estimation(PHY_VARS_eNB *eNB,
#if defined(__x86_64__) || defined(__i386__)
__m128i *rxdataF128,*ul_ref128,*ul_ch128;
__m128i mmtmpU0,mmtmpU1,mmtmpU2,mmtmpU3;
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
int16x8_t *rxdataF128,*ul_ref128,*ul_ch128;
int32x4_t mmtmp0,mmtmp1,mmtmp_re,mmtmp_im;
#endif
......@@ -122,7 +122,7 @@ int32_t temp_in_ifft_0[2048*2] __attribute__((aligned(32)));
rxdataF128 = (__m128i *)&rxdataF_ext[aa][symbol_offset];
ul_ch128 = (__m128i *)&ul_ch_estimates[aa][symbol_offset];
ul_ref128 = (__m128i *)ul_ref_sigs_rx[u][v][Msc_RS_idx];
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
rxdataF128 = (int16x8_t *)&rxdataF_ext[aa][symbol_offset];
ul_ch128 = (int16x8_t *)&ul_ch_estimates[aa][symbol_offset];
ul_ref128 = (int16x8_t *)ul_ref_sigs_rx[u][v][Msc_RS_idx];
......@@ -173,7 +173,7 @@ int32_t temp_in_ifft_0[2048*2] __attribute__((aligned(32)));
mmtmpU3 = _mm_unpackhi_epi32(mmtmpU0,mmtmpU1);
ul_ch128[2] = _mm_packs_epi32(mmtmpU2,mmtmpU3);
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
mmtmp0 = vmull_s16(((int16x4_t*)ul_ref128)[0],((int16x4_t*)rxdataF128)[0]);
mmtmp1 = vmull_s16(((int16x4_t*)ul_ref128)[1],((int16x4_t*)rxdataF128)[1]);
mmtmp_re = vcombine_s32(vpadd_s32(vget_low_s32(mmtmp0),vget_high_s32(mmtmp0)),
......@@ -633,7 +633,7 @@ int16_t lte_ul_freq_offset_estimation(LTE_DL_FRAME_PARMS *frame_parms,
phase_idx = -phase_idx;
return(phase_idx);
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
return(0);
#endif
}
......@@ -655,7 +655,7 @@ void pdcch_channel_level(int32_t **dl_ch_estimates_ext,
#if defined(__x86_64__) || defined(__i386__)
__m128i *dl_ch128;
__m128i avg128P;
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
int16x8_t *dl_ch128;
int32x4_t *avg128P;
#endif
......@@ -665,7 +665,8 @@ void pdcch_channel_level(int32_t **dl_ch_estimates_ext,
#if defined(__x86_64__) || defined(__i386__)
avg128P = _mm_setzero_si128();
dl_ch128=(__m128i *)&dl_ch_estimates_ext[(aatx<<1)+aarx][0];
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
AssertFatal(1==0,"to be done for ARM\n");
#endif
for (rb=0; rb<nb_rb; rb++) {
......@@ -674,8 +675,9 @@ void pdcch_channel_level(int32_t **dl_ch_estimates_ext,
avg128P = _mm_add_epi32(avg128P,_mm_madd_epi16(dl_ch128[0],dl_ch128[0]));
avg128P = _mm_add_epi32(avg128P,_mm_madd_epi16(dl_ch128[1],dl_ch128[1]));
avg128P = _mm_add_epi32(avg128P,_mm_madd_epi16(dl_ch128[2],dl_ch128[2]));
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
AssertFatal(1==0,"To be done for ARM\n");
#endif
dl_ch128+=3;
/*
......@@ -705,8 +707,9 @@ void pdcch_channel_level(int32_t **dl_ch_estimates_ext,
#if defined(__x86_64) || defined(__i386__)
__m128i mmtmpPD0,mmtmpPD1,mmtmpPD2,mmtmpPD3;
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
#endif
void pdcch_dual_stream_correlation(LTE_DL_FRAME_PARMS *frame_parms,
uint8_t symbol,
......@@ -719,8 +722,9 @@ void pdcch_dual_stream_correlation(LTE_DL_FRAME_PARMS *frame_parms,
uint16_t rb;
#if defined(__x86_64__) || defined(__i386__)
__m128i *dl_ch128,*dl_ch128i,*dl_ch_rho128;
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
AssertFatal(1==0,"To be done for ARM\n");
#endif
uint8_t aarx;
......@@ -734,7 +738,8 @@ void pdcch_dual_stream_correlation(LTE_DL_FRAME_PARMS *frame_parms,
dl_ch128i = (__m128i *)&dl_ch_estimates_ext_i[aarx][symbol*frame_parms->N_RB_DL*12];
dl_ch_rho128 = (__m128i *)&dl_ch_rho_ext[aarx][symbol*frame_parms->N_RB_DL*12];
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
#endif
......@@ -806,7 +811,8 @@ void pdcch_dual_stream_correlation(LTE_DL_FRAME_PARMS *frame_parms,
dl_ch_rho128+=3;
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
#endif
}
......@@ -831,7 +837,8 @@ void pdcch_detection_mrc_i(LTE_DL_FRAME_PARMS *frame_parms,
#if defined(__x86_64__) || defined(__i386__)
__m128i *rxdataF_comp128_0,*rxdataF_comp128_1,*rxdataF_comp128_i0,*rxdataF_comp128_i1,*rho128_0,*rho128_1,*rho128_i0,*rho128_i1;
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
int16x8_t *rxdataF_comp128_0,*rxdataF_comp128_1,*rxdataF_comp128_i0,*rxdataF_comp128_i1,*rho128_0,*rho128_1,*rho128_i0,*rho128_i1;
#endif
int32_t i;
......@@ -843,7 +850,8 @@ void pdcch_detection_mrc_i(LTE_DL_FRAME_PARMS *frame_parms,
#if defined(__x86_64__) || defined(__i386__)
rxdataF_comp128_0 = (__m128i *)&rxdataF_comp[(aatx<<1)][symbol*frame_parms->N_RB_DL*12];
rxdataF_comp128_1 = (__m128i *)&rxdataF_comp[(aatx<<1)+1][symbol*frame_parms->N_RB_DL*12];
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
rxdataF_comp128_0 = (int16x8_t *)&rxdataF_comp[(aatx<<1)][symbol*frame_parms->N_RB_DL*12];
rxdataF_comp128_1 = (int16x8_t *)&rxdataF_comp[(aatx<<1)+1][symbol*frame_parms->N_RB_DL*12];
#endif
......@@ -851,7 +859,8 @@ void pdcch_detection_mrc_i(LTE_DL_FRAME_PARMS *frame_parms,
for (i=0; i<frame_parms->N_RB_DL*3; i++) {
#if defined(__x86_64__) || defined(__i386__)
rxdataF_comp128_0[i] = _mm_adds_epi16(_mm_srai_epi16(rxdataF_comp128_0[i],1),_mm_srai_epi16(rxdataF_comp128_1[i],1));
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
rxdataF_comp128_0[i] = vhaddq_s16(rxdataF_comp128_0[i],rxdataF_comp128_1[i]);
#endif
}
......@@ -860,14 +869,14 @@ void pdcch_detection_mrc_i(LTE_DL_FRAME_PARMS *frame_parms,
#if defined(__x86_64__) || defined(__i386__)
rho128_0 = (__m128i *) &rho[0][symbol*frame_parms->N_RB_DL*12];
rho128_1 = (__m128i *) &rho[1][symbol*frame_parms->N_RB_DL*12];
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
rho128_0 = (int16x8_t *) &rho[0][symbol*frame_parms->N_RB_DL*12];
rho128_1 = (int16x8_t *) &rho[1][symbol*frame_parms->N_RB_DL*12];
#endif
for (i=0; i<frame_parms->N_RB_DL*3; i++) {
#if defined(__x86_64__) || defined(__i386__)
rho128_0[i] = _mm_adds_epi16(_mm_srai_epi16(rho128_0[i],1),_mm_srai_epi16(rho128_1[i],1));
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
rho128_0[i] = vhaddq_s16(rho128_0[i],rho128_1[i]);
#endif
}
......@@ -877,7 +886,7 @@ void pdcch_detection_mrc_i(LTE_DL_FRAME_PARMS *frame_parms,
rho128_i1 = (__m128i *) &rho_i[1][symbol*frame_parms->N_RB_DL*12];
rxdataF_comp128_i0 = (__m128i *)&rxdataF_comp_i[0][symbol*frame_parms->N_RB_DL*12];
rxdataF_comp128_i1 = (__m128i *)&rxdataF_comp_i[1][symbol*frame_parms->N_RB_DL*12];
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
rho128_i0 = (int16x8_t*) &rho_i[0][symbol*frame_parms->N_RB_DL*12];
rho128_i1 = (int16x8_t*) &rho_i[1][symbol*frame_parms->N_RB_DL*12];
rxdataF_comp128_i0 = (int16x8_t *)&rxdataF_comp_i[0][symbol*frame_parms->N_RB_DL*12];
......@@ -889,7 +898,7 @@ void pdcch_detection_mrc_i(LTE_DL_FRAME_PARMS *frame_parms,
#if defined(__x86_64__) || defined(__i386__)
rxdataF_comp128_i0[i] = _mm_adds_epi16(_mm_srai_epi16(rxdataF_comp128_i0[i],1),_mm_srai_epi16(rxdataF_comp128_i1[i],1));
rho128_i0[i] = _mm_adds_epi16(_mm_srai_epi16(rho128_i0[i],1),_mm_srai_epi16(rho128_i1[i],1));
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
rxdataF_comp128_i0[i] = vhaddq_s16(rxdataF_comp128_i0[i],rxdataF_comp128_i1[i]);
rho128_i0[i] = vhaddq_s16(rho128_i0[i],rho128_i1[i]);
......@@ -1388,8 +1397,8 @@ void pdcch_channel_compensation(int32_t **rxdataF_ext,
#if defined(__x86_64__) || defined(__i386__)
__m128i *dl_ch128,*rxdataF128,*rxdataF_comp128;
__m128i *dl_ch128_2, *rho128;
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
AssertFatal(1==0,"To be done for ARM\n");
#endif
uint8_t aatx,aarx,pilots=0;
......@@ -1492,7 +1501,8 @@ void pdcch_channel_compensation(int32_t **rxdataF_ext,
rxdataF128+=2;
rxdataF_comp128+=2;
}
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
#endif
}
......@@ -1509,7 +1519,8 @@ void pdcch_channel_compensation(int32_t **rxdataF_ext,
dl_ch128 = (__m128i *)&dl_ch_estimates_ext[aarx][symbol*frame_parms->N_RB_DL*12];
dl_ch128_2 = (__m128i *)&dl_ch_estimates_ext[2+aarx][symbol*frame_parms->N_RB_DL*12];
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
#endif
for (rb=0; rb<frame_parms->N_RB_DL; rb++) {
......@@ -1604,7 +1615,8 @@ void pdcch_detection_mrc(LTE_DL_FRAME_PARMS *frame_parms,
#if defined(__x86_64__) || defined(__i386__)
__m128i *rxdataF_comp128_0,*rxdataF_comp128_1;
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
int16x8_t *rxdataF_comp128_0,*rxdataF_comp128_1;
#endif
int32_t i;
......@@ -1614,7 +1626,8 @@ void pdcch_detection_mrc(LTE_DL_FRAME_PARMS *frame_parms,
#if defined(__x86_64__) || defined(__i386__)
rxdataF_comp128_0 = (__m128i *)&rxdataF_comp[(aatx<<1)][symbol*frame_parms->N_RB_DL*12];
rxdataF_comp128_1 = (__m128i *)&rxdataF_comp[(aatx<<1)+1][symbol*frame_parms->N_RB_DL*12];
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
rxdataF_comp128_0 = (int16x8_t *)&rxdataF_comp[(aatx<<1)][symbol*frame_parms->N_RB_DL*12];
rxdataF_comp128_1 = (int16x8_t *)&rxdataF_comp[(aatx<<1)+1][symbol*frame_parms->N_RB_DL*12];
#endif
......@@ -1622,7 +1635,8 @@ void pdcch_detection_mrc(LTE_DL_FRAME_PARMS *frame_parms,
for (i=0; i<frame_parms->N_RB_DL*3; i++) {
#if defined(__x86_64__) || defined(__i386__)
rxdataF_comp128_0[i] = _mm_adds_epi16(_mm_srai_epi16(rxdataF_comp128_0[i],1),_mm_srai_epi16(rxdataF_comp128_1[i],1));
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
rxdataF_comp128_0[i] = vhaddq_s16(rxdataF_comp128_0[i],rxdataF_comp128_1[i]);
#endif
}
......
......@@ -46,6 +46,7 @@
int16_t dlsch_demod_shift = 0;
int16_t interf_unaw_shift = 13;
//#define DEBUG_HARQ
//#define DEBUG_PHY 1
......@@ -1506,7 +1507,7 @@ void dlsch_channel_compensation(int **rxdataF_ext,
_mm_empty();
_m_empty();
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
unsigned short rb;
......@@ -1753,9 +1754,11 @@ void prec2A_TM56_128(unsigned char pmi,__m128i *ch0,__m128i *ch1)
_mm_empty();
_m_empty();
}
#elif defined(__arm__)
void prec2A_TM56_128(unsigned char pmi,__m128i *ch0,__m128i *ch1) {
#elif defined(__arm__) || defined(__aarch64__)
void prec2A_TM56_128(unsigned char pmi,int16x8_t *ch0,int16x8_t *ch1) {
AssertFatal(1==0,"To be done for ARM\n");
/*
// sqrt(2) is already taken into account in computation sqrt_rho_a, sqrt_rho_b,
//so removed it
......@@ -1795,6 +1798,7 @@ void prec2A_TM56_128(unsigned char pmi,__m128i *ch0,__m128i *ch1) {
_mm_empty();
_m_empty();
*/
}
#endif
// precoding is stream 0 .5(1,1) .5(1,-1) .5(1,1) .5(1,-1)
......@@ -1803,6 +1807,7 @@ void prec2A_TM56_128(unsigned char pmi,__m128i *ch0,__m128i *ch1) {
short TM3_prec[8]__attribute__((aligned(16))) = {1,1,-1,-1,1,1,-1,-1} ;
#if defined(__x86_64__) || defined(__i386__)
void prec2A_TM3_128(__m128i *ch0,__m128i *ch1) {
__m128i amp = _mm_set1_epi16(ONE_OVER_SQRT2_Q15);
......@@ -1843,10 +1848,17 @@ void prec2A_TM3_128(__m128i *ch0,__m128i *ch1) {
_mm_empty();
_m_empty();
}
#else
void prec2A_TM3_128(int16x8_t *ch0,int16x8_t *ch1) {
AssertFatal(1==0,"To be done for ARM\n");
}
#endif
// pmi = 0 => stream 0 (1,1), stream 1 (1,-1)
// pmi = 1 => stream 0 (1,j), stream 2 (1,-j)
#if defined(__x86_64__) || defined(__i386__)
void prec2A_TM4_128(int pmi,__m128i *ch0,__m128i *ch1) {
// sqrt(2) is already taken into account in computation sqrt_rho_a, sqrt_rho_b,
......@@ -1893,7 +1905,11 @@ void prec2A_TM4_128(int pmi,__m128i *ch0,__m128i *ch1) {
// print_shorts("prec2A_TM4 ch0 (end):",ch0);
//print_shorts("prec2A_TM4 ch1 (end):",ch1);
}
#elsif defined(__arm__) || defined(__aarch64__)
void prec2A_TM4_128(int pmi,int16x8_t *ch0,int16x8_t *ch1) {
AssertFatal(1==0,"To be done for ARM\n");
}
#endif
void dlsch_channel_compensation_TM56(int **rxdataF_ext,
int **dl_ch_estimates_ext,
int **dl_ch_mag,
......@@ -2102,7 +2118,7 @@ void dlsch_channel_compensation_TM56(int **rxdataF_ext,
//printf("eNB_id %d, symbol %d: precoded CQI %d dB\n",eNB_id,symbol,
// measurements->precoded_cqi_dB[eNB_id][0]);
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
uint32_t rb,Nre;
uint32_t aarx,symbol_mod,pilots=0;
......@@ -2682,8 +2698,9 @@ void dlsch_channel_compensation_TM34(LTE_DL_FRAME_PARMS *frame_parms,
_mm_empty();
_m_empty();
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
AssertFatal(1==0,"To be checked for ARM (output_shift => output_shift0/1)\n");
unsigned short rb,Nre;
unsigned char aarx,symbol_mod,pilots=0;
int precoded_signal_strength0=0,precoded_signal_strength1=0, rx_power_correction;
......@@ -2694,7 +2711,7 @@ void dlsch_channel_compensation_TM34(LTE_DL_FRAME_PARMS *frame_parms,
int32x4_t mmtmpD0,mmtmpD1,mmtmpD0b,mmtmpD1b;
int16x8_t *dl_ch_mag0_128,*dl_ch_mag0_128b,*dl_ch_mag1_128,*dl_ch_mag1_128b,mmtmpD2,mmtmpD3,mmtmpD4,*rxdataF_comp0_128,*rxdataF_comp1_128;
int16x8_t QAM_amp0_128,QAM_amp0_128b,QAM_amp1_128,QAM_amp1_128b;
int32x4_t output_shift128 = vmovq_n_s32(-(int32_t)output_shift);
int32x4_t output_shift128 = vmovq_n_s32(-(int32_t)output_shift0);
int **rxdataF_ext = pdsch_vars->rxdataF_ext;
int **dl_ch_estimates_ext = pdsch_vars->dl_ch_estimates_ext;
......@@ -3091,8 +3108,8 @@ void dlsch_dual_stream_correlation(LTE_DL_FRAME_PARMS *frame_parms,
_mm_empty();
_m_empty();
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
AssertFatal(1==0,"To be done for ARM\n");
#endif
}
......@@ -3212,7 +3229,7 @@ void dlsch_dual_stream_correlation(LTE_DL_FRAME_PARMS *frame_parms,
_mm_empty();
_m_empty();
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
#endif
}
......@@ -3297,7 +3314,7 @@ void dlsch_detection_mrc(LTE_DL_FRAME_PARMS *frame_parms,
_mm_empty();
_m_empty();
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
unsigned char aatx;
int i;
......@@ -3368,6 +3385,7 @@ void dlsch_detection_mrc_TM34(LTE_DL_FRAME_PARMS *frame_parms,
unsigned char dual_stream_UE) {
int i;
#if defined(__x86_64__) || defined(__i386__)
__m128i *rxdataF_comp128_0,*rxdataF_comp128_1,*rxdataF_comp128_i0,*rxdataF_comp128_i1,*dl_ch_mag128_0,*dl_ch_mag128_1,*dl_ch_mag128_0b,*dl_ch_mag128_1b,*rho128_0,*rho128_1,*rho128_i0,*rho128_i1,*dl_ch_mag128_i0,*dl_ch_mag128_i1,*dl_ch_mag128_i0b,*dl_ch_mag128_i1b;
int **rxdataF_comp0 = pdsch_vars->rxdataF_comp0;
......@@ -3435,6 +3453,9 @@ void dlsch_detection_mrc_TM34(LTE_DL_FRAME_PARMS *frame_parms,
_mm_empty();
_m_empty();
#elsif defined(__arm__) || defined(__aarch64__)
AssertFatal(1==0,"To be done for ARM\n");
#endif
}
......@@ -3495,8 +3516,8 @@ void dlsch_scale_channel(int **dl_ch_estimates_ext,
}
}
#elif defined(__arm__)
#elif defined(__arm__) || defined(__arch64__)
AssertFatal(1==0,"To be done for ARM\n");
#endif
}
......@@ -3573,7 +3594,7 @@ void dlsch_channel_level(int **dl_ch_estimates_ext,
_mm_empty();
_m_empty();
#elif defined(__arm__)
#elif defined(__arm__) || defined(__arch64__)
short rb;
unsigned char aatx,aarx,nre=12,symbol_mod;
......@@ -3769,7 +3790,9 @@ void dlsch_channel_level_TM34(int **dl_ch_estimates_ext,
_mm_empty();
_m_empty();
#elif defined(__arm__)
#elif defined(__arm__) || defined(__arch64__)
AssertFatal(1==0,"To be done for ARM\n");
#endif
}
......@@ -3872,7 +3895,7 @@ void dlsch_channel_level_TM34(int **dl_ch_estimates_ext,
_mm_empty();
_m_empty();
#elif defined(__arm__)
#elif defined(__arm__) || defined(__arch64__)
#endif
}*/
......@@ -3956,8 +3979,8 @@ void dlsch_channel_level_TM56(int **dl_ch_estimates_ext,
_mm_empty();
_m_empty();
#elif defined(__arm__)
#elif defined(__arm__) || defined(__arch64__)
AssertFatal(1==0,"To be done for ARM\n");
#endif
}
......@@ -4028,8 +4051,8 @@ void dlsch_channel_level_TM7(int **dl_bf_ch_estimates_ext,
_mm_empty();
_m_empty();
#elif defined(__arm__)
#elif defined(__arm__) || defined(__arch64__)
AssertFatal(1==0,"To be done for ARM\n");
#endif
}
//#define ONE_OVER_2_Q15 16384
......@@ -4134,7 +4157,8 @@ void dlsch_alamouti(LTE_DL_FRAME_PARMS *frame_parms,
_mm_empty();
_m_empty();
#elif defined(__arm__)
#elif defined(__arm__) || defined(__arch64__)
AssertFatal(1==0,"To be done for ARM\n");
#endif
}
......
This diff is collapsed.
......@@ -517,7 +517,7 @@ int pbch_channel_level(int **dl_ch_estimates_ext,
#if defined(__x86_64__) || defined(__i386__)
__m128i avg128;
__m128i *dl_ch128;
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
int32x4_t avg128;
int16x8_t *dl_ch128;
#endif
......
......@@ -402,7 +402,7 @@ void mch_channel_level(int **dl_ch_estimates_ext,
int i,aarx,nre;
#if defined(__x86_64__) || defined(__i386__)
__m128i *dl_ch128,avg128;
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
int32x4_t avg128;
#endif
for (aarx=0; aarx<frame_parms->nb_antennas_rx; aarx++) {
......@@ -412,8 +412,8 @@ void mch_channel_level(int **dl_ch_estimates_ext,
// 5 is always a symbol with no pilots for both normal and extended prefix
dl_ch128=(__m128i *)&dl_ch_estimates_ext[aarx][symbol*frame_parms->N_RB_DL*12];
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
AssertFatal(1==0,"To be done for ARM\n");
#endif
if ((symbol == 2) || (symbol == 6) || (symbol == 10))
......@@ -458,7 +458,8 @@ void mch_channel_compensation(int **rxdataF_ext,
#if defined(__x86_64__) || defined(__i386__)
__m128i *dl_ch128,*dl_ch_mag128,*dl_ch_mag128b,*rxdataF128,*rxdataF_comp128;
__m128i mmtmpD0,mmtmpD1,mmtmpD2,mmtmpD3,QAM_amp128,QAM_amp128b;
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
AssertFatal(1==0,"to be done for ARM\n");
#endif
if ((symbol == 2) || (symbol == 6) || (symbol == 10))
......@@ -474,7 +475,8 @@ void mch_channel_compensation(int **rxdataF_ext,
QAM_amp128 = _mm_set1_epi16(QAM64_n1); //
QAM_amp128b = _mm_set1_epi16(QAM64_n2);
}
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
#endif
......@@ -487,7 +489,8 @@ void mch_channel_compensation(int **rxdataF_ext,
dl_ch_mag128b = (__m128i *)&dl_ch_magb[aarx][symbol*frame_parms->N_RB_DL*12];
rxdataF128 = (__m128i *)&rxdataF_ext[aarx][symbol*frame_parms->N_RB_DL*12];
rxdataF_comp128 = (__m128i *)&rxdataF_comp[aarx][symbol*frame_parms->N_RB_DL*12];
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
#endif
......@@ -524,7 +527,8 @@ void mch_channel_compensation(int **rxdataF_ext,
dl_ch_mag128b[1] = _mm_mulhi_epi16(dl_ch_mag128b[1],QAM_amp128b);
dl_ch_mag128b[1] = _mm_slli_epi16(dl_ch_mag128b[1],1);
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
#endif
}
......@@ -579,7 +583,8 @@ void mch_channel_compensation(int **rxdataF_ext,
rxdataF128+=2;
rxdataF_comp128+=2;
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
#endif
}
......@@ -603,7 +608,7 @@ void mch_detection_mrc(LTE_DL_FRAME_PARMS *frame_parms,
int i;
#if defined(__x86_64__) || defined(__i386__)
__m128i *rxdataF_comp128_0,*rxdataF_comp128_1,*dl_ch_mag128_0,*dl_ch_mag128_1,*dl_ch_mag128_0b,*dl_ch_mag128_1b;
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
int16x8_t *rxdataF_comp128_0,*rxdataF_comp128_1,*dl_ch_mag128_0,*dl_ch_mag128_1,*dl_ch_mag128_0b,*dl_ch_mag128_1b;
#endif
if (frame_parms->nb_antennas_rx>1) {
......@@ -617,7 +622,8 @@ void mch_detection_mrc(LTE_DL_FRAME_PARMS *frame_parms,
dl_ch_mag128_0b = (__m128i *)&dl_ch_magb[0][symbol*frame_parms->N_RB_DL*12];
dl_ch_mag128_1b = (__m128i *)&dl_ch_magb[1][symbol*frame_parms->N_RB_DL*12];
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
rxdataF_comp128_0 = (int16x8_t *)&rxdataF_comp[0][symbol*frame_parms->N_RB_DL*12];
rxdataF_comp128_1 = (int16x8_t *)&rxdataF_comp[1][symbol*frame_parms->N_RB_DL*12];
dl_ch_mag128_0 = (int16x8_t *)&dl_ch_mag[0][symbol*frame_parms->N_RB_DL*12];
......@@ -632,7 +638,7 @@ void mch_detection_mrc(LTE_DL_FRAME_PARMS *frame_parms,
rxdataF_comp128_0[i] = _mm_adds_epi16(_mm_srai_epi16(rxdataF_comp128_0[i],1),_mm_srai_epi16(rxdataF_comp128_1[i],1));
dl_ch_mag128_0[i] = _mm_adds_epi16(_mm_srai_epi16(dl_ch_mag128_0[i],1),_mm_srai_epi16(dl_ch_mag128_1[i],1));
dl_ch_mag128_0b[i] = _mm_adds_epi16(_mm_srai_epi16(dl_ch_mag128_0b[i],1),_mm_srai_epi16(dl_ch_mag128_1b[i],1));
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
rxdataF_comp128_0[i] = vhaddq_s16(rxdataF_comp128_0[i],rxdataF_comp128_1[i]);
dl_ch_mag128_0[i] = vhaddq_s16(dl_ch_mag128_0[i],dl_ch_mag128_1[i]);
dl_ch_mag128_0b[i] = vhaddq_s16(dl_ch_mag128_0b[i],dl_ch_mag128_1b[i]);
......@@ -705,7 +711,7 @@ void mch_16qam_llr(LTE_DL_FRAME_PARMS *frame_parms,
__m128i *ch_mag;
__m128i llr128[2],xmm0;
uint32_t *llr32;
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
int16x8_t *rxF = (int16x8_t*)&rxdataF_comp[0][(symbol*frame_parms->N_RB_DL*12)];
int16x8_t *ch_mag;
int16x8_t llr128[2],xmm0;
......@@ -720,7 +726,8 @@ void mch_16qam_llr(LTE_DL_FRAME_PARMS *frame_parms,
} else {
llr32 = (uint32_t*)*llr32p;
}
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
if (symbol==2) {
llr16 = (int16_t*)dlsch_llr;
} else {
......@@ -729,7 +736,8 @@ void mch_16qam_llr(LTE_DL_FRAME_PARMS *frame_parms,
#endif
#if defined(__x86_64__) || defined(__i386__)
ch_mag = (__m128i*)&dl_ch_mag[0][(symbol*frame_parms->N_RB_DL*12)];
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
ch_mag = (int16x8_t*)&dl_ch_mag[0][(symbol*frame_parms->N_RB_DL*12)];
#endif
if ((symbol==2) || (symbol==6) || (symbol==10)) {
......@@ -769,7 +777,8 @@ void mch_16qam_llr(LTE_DL_FRAME_PARMS *frame_parms,
llr32[7] = ((uint32_t *)&llr128[1])[3];
llr32+=8;
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
xmm0 = vabsq_s16(rxF[i]);
xmm0 = vsubq_s16(ch_mag[i],xmm0);
......@@ -818,7 +827,7 @@ void mch_64qam_llr(LTE_DL_FRAME_PARMS *frame_parms,
#if defined(__x86_64__) || defined(__i386__)
__m128i xmm1,xmm2,*ch_mag,*ch_magb;
__m128i *rxF = (__m128i*)&rxdataF_comp[0][(symbol*frame_parms->N_RB_DL*12)];
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
int16x8_t xmm1,xmm2,*ch_mag,*ch_magb;
int16x8_t *rxF = (int16x8_t*)&rxdataF_comp[0][(symbol*frame_parms->N_RB_DL*12)];
#endif
......@@ -837,7 +846,8 @@ void mch_64qam_llr(LTE_DL_FRAME_PARMS *frame_parms,
#if defined(__x86_64__) || defined(__i386__)
ch_mag = (__m128i*)&dl_ch_mag[0][(symbol*frame_parms->N_RB_DL*12)];
ch_magb = (__m128i*)&dl_ch_magb[0][(symbol*frame_parms->N_RB_DL*12)];
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
ch_mag = (int16x8_t*)&dl_ch_mag[0][(symbol*frame_parms->N_RB_DL*12)];
ch_magb = (int16x8_t*)&dl_ch_magb[0][(symbol*frame_parms->N_RB_DL*12)];
#endif
......@@ -862,7 +872,8 @@ void mch_64qam_llr(LTE_DL_FRAME_PARMS *frame_parms,
xmm1 = _mm_subs_epi16(ch_mag[i],xmm1);
xmm2 = _mm_abs_epi16(xmm1);
xmm2 = _mm_subs_epi16(ch_magb[i],xmm2);
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
xmm1 = vabsq_s16(rxF[i]);
xmm1 = vsubq_s16(ch_mag[i],xmm1);
xmm2 = vabsq_s16(xmm1);
......@@ -893,7 +904,8 @@ void mch_64qam_llr(LTE_DL_FRAME_PARMS *frame_parms,
llr2[3] = _mm_extract_epi16(xmm1,1);//((short *)&xmm1)[j+1];
llr2[4] = _mm_extract_epi16(xmm2,0);//((short *)&xmm2)[j];
llr2[5] = _mm_extract_epi16(xmm2,1);//((short *)&xmm2)[j+1];
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
llr2[2] = vgetq_lane_s16(xmm1,0);
llr2[3] = vgetq_lane_s16(xmm1,1);//((short *)&xmm1)[j+1];
llr2[4] = vgetq_lane_s16(xmm2,0);//((short *)&xmm2)[j];
......@@ -908,7 +920,8 @@ void mch_64qam_llr(LTE_DL_FRAME_PARMS *frame_parms,
llr2[3] = _mm_extract_epi16(xmm1,3);//((short *)&xmm1)[j+1];
llr2[4] = _mm_extract_epi16(xmm2,2);//((short *)&xmm2)[j];
llr2[5] = _mm_extract_epi16(xmm2,3);//((short *)&xmm2)[j+1];
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
llr2[2] = vgetq_lane_s16(xmm1,2);
llr2[3] = vgetq_lane_s16(xmm1,3);//((short *)&xmm1)[j+1];
llr2[4] = vgetq_lane_s16(xmm2,2);//((short *)&xmm2)[j];
......@@ -922,7 +935,8 @@ void mch_64qam_llr(LTE_DL_FRAME_PARMS *frame_parms,
llr2[3] = _mm_extract_epi16(xmm1,5);//((short *)&xmm1)[j+1];
llr2[4] = _mm_extract_epi16(xmm2,4);//((short *)&xmm2)[j];
llr2[5] = _mm_extract_epi16(xmm2,5);//((short *)&xmm2)[j+1];
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
llr2[2] = vgetq_lane_s16(xmm1,4);
llr2[3] = vgetq_lane_s16(xmm1,5);//((short *)&xmm1)[j+1];
llr2[4] = vgetq_lane_s16(xmm2,4);//((short *)&xmm2)[j];
......@@ -936,7 +950,8 @@ void mch_64qam_llr(LTE_DL_FRAME_PARMS *frame_parms,
llr2[3] = _mm_extract_epi16(xmm1,7);//((short *)&xmm1)[j+1];
llr2[4] = _mm_extract_epi16(xmm2,6);//((short *)&xmm2)[j];
llr2[5] = _mm_extract_epi16(xmm2,7);//((short *)&xmm2)[j+1];
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
llr2[2] = vgetq_lane_s16(xmm1,6);
llr2[3] = vgetq_lane_s16(xmm1,7);//((short *)&xmm1)[j+1];
llr2[4] = vgetq_lane_s16(xmm2,6);//((short *)&xmm2)[j];
......
......@@ -1074,7 +1074,7 @@ unsigned int ulsch_decoding(PHY_VARS_eNB *eNB,eNB_rxtx_proc_t *proc,
((__m256i*)cseq)[i2++] = ((__m256i*)unscrambling_lut)[s&65535];
((__m256i*)cseq)[i2++] = ((__m256i*)unscrambling_lut)[(s>>16)&65535];
#endif
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
((int16x8_t*)cseq)[i2++] = ((int16x8_t*)unscrambling_lut)[(s&65535)<<1];
((int16x8_t*)cseq)[i2++] = ((int16x8_t*)unscrambling_lut)[1+((s&65535)<<1)];
s>>=16;
......@@ -1170,7 +1170,7 @@ unsigned int ulsch_decoding(PHY_VARS_eNB *eNB,eNB_rxtx_proc_t *proc,
i2=j<<2;
for (r=0; r<Rmux_prime; r++) {
/*
#if defined(__arm__) || defined(__aarch64__)
c = cseq[i];
y[i2++] = c*ulsch_llr[i++];
c = cseq[i];
......@@ -1180,9 +1180,10 @@ unsigned int ulsch_decoding(PHY_VARS_eNB *eNB,eNB_rxtx_proc_t *proc,
c = cseq[i];
y[i2] = c*ulsch_llr[i++];
i2=(i2+(Cmux<<2)-3);
*/
#elif defined(__x86_64__) || defined(__i386__)
// slightly more optimized version (equivalent to above) for 16QAM to improve computational performance
*(__m64 *)&y[i2] = _mm_sign_pi16(*(__m64*)&ulsch_llr[i],*(__m64*)&cseq[i]);i+=4;i2+=(Cmux<<2);
#endif
}
......@@ -1437,7 +1438,7 @@ unsigned int ulsch_decoding(PHY_VARS_eNB *eNB,eNB_rxtx_proc_t *proc,
for (iprime=0; iprime<G;iprime+=16,j2+=16)
*((__m256i *)&ulsch_harq->e[iprime]) = *((__m256i *)&y[j2]);
#endif
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
for (iprime=0; iprime<G;iprime+=8,j2+=8)
*((int16x8_t *)&ulsch_harq->e[iprime]) = *((int16x8_t *)&y[j2]);
#endif
......
......@@ -52,7 +52,7 @@ void lte_idft(LTE_DL_FRAME_PARMS *frame_parms,uint32_t *z, uint16_t Msc_PUSCH)
#if defined(__x86_64__) || defined(__i386__)
__m128i idft_in128[3][1200],idft_out128[3][1200];
__m128i norm128;
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
int16x8_t idft_in128[3][1200],idft_out128[3][1200];
int16x8_t norm128;
#endif
......@@ -119,7 +119,7 @@ void lte_idft(LTE_DL_FRAME_PARMS *frame_parms,uint32_t *z, uint16_t Msc_PUSCH)
*&(((__m128i*)z10)[i])=_mm_sign_epi16(*&(((__m128i*)z10)[i]),*(__m128i*)&conjugate2[0]);
*&(((__m128i*)z11)[i])=_mm_sign_epi16(*&(((__m128i*)z11)[i]),*(__m128i*)&conjugate2[0]);
}
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
*&(((int16x8_t*)z0)[i])=vmulq_s16(*&(((int16x8_t*)z0)[i]),*(int16x8_t*)&conjugate2[0]);
*&(((int16x8_t*)z1)[i])=vmulq_s16(*&(((int16x8_t*)z1)[i]),*(int16x8_t*)&conjugate2[0]);
*&(((int16x8_t*)z2)[i])=vmulq_s16(*&(((int16x8_t*)z2)[i]),*(int16x8_t*)&conjugate2[0]);
......@@ -167,7 +167,7 @@ void lte_idft(LTE_DL_FRAME_PARMS *frame_parms,uint32_t *z, uint16_t Msc_PUSCH)
#if defined(__x86_64__)||defined(__i386__)
norm128 = _mm_set1_epi16(9459);
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
norm128 = vdupq_n_s16(9459);
#endif
for (i=0; i<12; i++) {
......@@ -175,7 +175,7 @@ void lte_idft(LTE_DL_FRAME_PARMS *frame_parms,uint32_t *z, uint16_t Msc_PUSCH)
((__m128i*)idft_out0)[i] = _mm_slli_epi16(_mm_mulhi_epi16(((__m128i*)idft_out0)[i],norm128),1);
((__m128i*)idft_out1)[i] = _mm_slli_epi16(_mm_mulhi_epi16(((__m128i*)idft_out1)[i],norm128),1);
((__m128i*)idft_out2)[i] = _mm_slli_epi16(_mm_mulhi_epi16(((__m128i*)idft_out2)[i],norm128),1);
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
((int16x8_t*)idft_out0)[i] = vqdmulhq_s16(((int16x8_t*)idft_out0)[i],norm128);
((int16x8_t*)idft_out1)[i] = vqdmulhq_s16(((int16x8_t*)idft_out1)[i],norm128);
((int16x8_t*)idft_out2)[i] = vqdmulhq_s16(((int16x8_t*)idft_out2)[i],norm128);
......@@ -427,7 +427,7 @@ void lte_idft(LTE_DL_FRAME_PARMS *frame_parms,uint32_t *z, uint16_t Msc_PUSCH)
((__m128i*)z10)[i]=_mm_sign_epi16(((__m128i*)z10)[i],*(__m128i*)&conjugate2[0]);
((__m128i*)z11)[i]=_mm_sign_epi16(((__m128i*)z11)[i],*(__m128i*)&conjugate2[0]);
}
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
*&(((int16x8_t*)z0)[i])=vmulq_s16(*&(((int16x8_t*)z0)[i]),*(int16x8_t*)&conjugate2[0]);
*&(((int16x8_t*)z1)[i])=vmulq_s16(*&(((int16x8_t*)z1)[i]),*(int16x8_t*)&conjugate2[0]);
*&(((int16x8_t*)z2)[i])=vmulq_s16(*&(((int16x8_t*)z2)[i]),*(int16x8_t*)&conjugate2[0]);
......@@ -470,7 +470,7 @@ int32_t ulsch_qpsk_llr(LTE_DL_FRAME_PARMS *frame_parms,
#if defined(__x86_64__) || defined(__i386__)
__m128i *rxF=(__m128i*)&rxdataF_comp[0][(symbol*frame_parms->N_RB_DL*12)];
__m128i **llrp128 = (__m128i **)llrp;
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
int16x8_t *rxF= (int16x8_t*)&rxdataF_comp[0][(symbol*frame_parms->N_RB_DL*12)];
int16x8_t **llrp128 = (int16x8_t **)llrp;
#endif
......@@ -511,7 +511,7 @@ int i;
__m128i mmtmpU0;
__m128i **llrp128=(__m128i **)llrp;
ch_mag =(__m128i*)&ul_ch_mag[0][(symbol*frame_parms->N_RB_DL*12)];
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
int16x8_t *rxF=(int16x8_t*)&rxdataF_comp[0][(symbol*frame_parms->N_RB_DL*12)];
int16x8_t *ch_mag;
int16x8_t xmm0;
......@@ -530,7 +530,7 @@ int i;
(*llrp128)[0] = _mm_unpacklo_epi32(rxF[i],mmtmpU0);
(*llrp128)[1] = _mm_unpackhi_epi32(rxF[i],mmtmpU0);
(*llrp128)+=2;
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
xmm0 = vabsq_s16(rxF[i]);
xmm0 = vqsubq_s16(ch_mag[i],xmm0);
(*llrp16)[0] = vgetq_lane_s16(rxF[i],0);
......@@ -583,7 +583,7 @@ void ulsch_64qam_llr(LTE_DL_FRAME_PARMS *frame_parms,
ch_mag =(__m128i*)&ul_ch_mag[0][(symbol*frame_parms->N_RB_DL*12)];
ch_magb =(__m128i*)&ul_ch_magb[0][(symbol*frame_parms->N_RB_DL*12)];
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
int16x8_t *rxF=(int16x8_t*)&rxdataF_comp[0][(symbol*frame_parms->N_RB_DL*12)];
int16x8_t *ch_mag,*ch_magb;
int16x8_t mmtmpU1,mmtmpU2;
......@@ -615,7 +615,7 @@ void ulsch_64qam_llr(LTE_DL_FRAME_PARMS *frame_parms,
(*llrp32)[9] = _mm_extract_epi32(rxF[i],3);
(*llrp32)[10] = _mm_extract_epi32(mmtmpU1,3);
(*llrp32)[11] = _mm_extract_epi32(mmtmpU2,3);
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
mmtmpU1 = vabsq_s16(rxF[i]);
mmtmpU1 = vqsubq_s16(ch_mag[i],mmtmpU1);
......@@ -658,7 +658,7 @@ void ulsch_detection_mrc(LTE_DL_FRAME_PARMS *frame_parms,
__m128i *rxdataF_comp128_0,*ul_ch_mag128_0,*ul_ch_mag128_0b;
__m128i *rxdataF_comp128_1,*ul_ch_mag128_1,*ul_ch_mag128_1b;
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
int16x8_t *rxdataF_comp128_0,*ul_ch_mag128_0,*ul_ch_mag128_0b;
int16x8_t *rxdataF_comp128_1,*ul_ch_mag128_1,*ul_ch_mag128_1b;
......@@ -682,7 +682,7 @@ void ulsch_detection_mrc(LTE_DL_FRAME_PARMS *frame_parms,
ul_ch_mag128_0b[i] = _mm_adds_epi16(_mm_srai_epi16(ul_ch_mag128_0b[i],1),_mm_srai_epi16(ul_ch_mag128_1b[i],1));
rxdataF_comp128_0[i] = _mm_add_epi16(rxdataF_comp128_0[i],(*(__m128i*)&jitterc[0]));
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
rxdataF_comp128_0 = (int16x8_t *)&rxdataF_comp[0][symbol*frame_parms->N_RB_DL*12];
rxdataF_comp128_1 = (int16x8_t *)&rxdataF_comp[1][symbol*frame_parms->N_RB_DL*12];
ul_ch_mag128_0 = (int16x8_t *)&ul_ch_mag[0][symbol*frame_parms->N_RB_DL*12];
......@@ -801,7 +801,7 @@ void ulsch_channel_compensation(int32_t **rxdataF_ext,
uint8_t aarx;//,symbol_mod;
__m128i mmtmpU0,mmtmpU1,mmtmpU2,mmtmpU3;
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
int16x4_t *ul_ch128,*rxdataF128;
int16x8_t *ul_ch_mag128,*ul_ch_mag128b,*rxdataF_comp128;
......@@ -825,7 +825,7 @@ void ulsch_channel_compensation(int32_t **rxdataF_ext,
rxdataF128 = (__m128i *)&rxdataF_ext[aarx][symbol*frame_parms->N_RB_DL*12];
rxdataF_comp128 = (__m128i *)&rxdataF_comp[aarx][symbol*frame_parms->N_RB_DL*12];
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
ul_ch128 = (int16x4_t *)&ul_ch_estimates_ext[aarx][symbol*frame_parms->N_RB_DL*12];
......@@ -862,7 +862,7 @@ void ulsch_channel_compensation(int32_t **rxdataF_ext,
// printf("comp: symbol %d rb %d => %d,%d,%d (output_shift %d)\n",symbol,rb,*((int16_t*)&ul_ch_mag128[0]),*((int16_t*)&ul_ch_mag128[1]),*((int16_t*)&ul_ch_mag128[2]),output_shift);
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
mmtmpU0 = vmull_s16(ul_ch128[0], ul_ch128[0]);
mmtmpU0 = vqshlq_s32(vqaddq_s32(mmtmpU0,vrev64q_s32(mmtmpU0)),-output_shift128);
mmtmpU1 = vmull_s16(ul_ch128[1], ul_ch128[1]);
......@@ -953,7 +953,7 @@ void ulsch_channel_compensation(int32_t **rxdataF_ext,
ul_ch_mag128b+=3;
rxdataF128+=3;
rxdataF_comp128+=3;
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
mmtmpU0 = vmull_s16(ul_ch128[0], rxdataF128[0]);
//mmtmpU0 = [Re(ch[0])Re(rx[0]) Im(ch[0])Im(ch[0]) Re(ch[1])Re(rx[1]) Im(ch[1])Im(ch[1])]
mmtmpU1 = vmull_s16(ul_ch128[1], rxdataF128[1]);
......@@ -1047,7 +1047,7 @@ void ulsch_channel_level(int32_t **drs_ch_estimates_ext,
#if defined(__x86_64__) || defined(__i386__)
__m128i avg128U;
__m128i *ul_ch128;
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
int16x4_t *ul_ch128;
int32x4_t avg128U;
#endif
......@@ -1069,7 +1069,7 @@ void ulsch_channel_level(int32_t **drs_ch_estimates_ext,
}
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
avg128U = vdupq_n_s32(0);
ul_ch128=(int16x4_t *)drs_ch_estimates_ext[aarx];
......
......@@ -47,7 +47,7 @@ void dft_lte(int32_t *z,int32_t *d, int32_t Msc_PUSCH, uint8_t Nsymb)
#if defined(__x86_64__) || defined(__i386__)
__m128i dft_in128[4][1200],dft_out128[4][1200];
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
int16x8_t dft_in128[4][1200],dft_out128[4][1200];
#endif
uint32_t *dft_in0=(uint32_t*)dft_in128[0],*dft_out0=(uint32_t*)dft_out128[0];
......@@ -61,7 +61,8 @@ void dft_lte(int32_t *z,int32_t *d, int32_t Msc_PUSCH, uint8_t Nsymb)
uint32_t i,ip;
#if defined(__x86_64__) || defined(__i386__)
__m128i norm128;
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
int16x8_t norm128;
#endif
// printf("Doing lte_dft for Msc_PUSCH %d\n",Msc_PUSCH);
......@@ -119,7 +120,7 @@ void dft_lte(int32_t *z,int32_t *d, int32_t Msc_PUSCH, uint8_t Nsymb)
*/
#if defined(__x86_64__) || defined(__i386__)
norm128 = _mm_set1_epi16(9459);
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
norm128 = vdupq_n_s16(9459);
#endif
for (i=0; i<12; i++) {
......@@ -127,7 +128,7 @@ void dft_lte(int32_t *z,int32_t *d, int32_t Msc_PUSCH, uint8_t Nsymb)
((__m128i*)dft_out0)[i] = _mm_slli_epi16(_mm_mulhi_epi16(((__m128i*)dft_out0)[i],norm128),1);
((__m128i*)dft_out1)[i] = _mm_slli_epi16(_mm_mulhi_epi16(((__m128i*)dft_out1)[i],norm128),1);
((__m128i*)dft_out2)[i] = _mm_slli_epi16(_mm_mulhi_epi16(((__m128i*)dft_out2)[i],norm128),1);
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
((int16x8_t*)dft_out0)[i] = vqdmulhq_s16(((int16x8_t*)dft_out0)[i],norm128);
((int16x8_t*)dft_out1)[i] = vqdmulhq_s16(((int16x8_t*)dft_out1)[i],norm128);
((int16x8_t*)dft_out2)[i] = vqdmulhq_s16(((int16x8_t*)dft_out2)[i],norm128);
......
......@@ -38,7 +38,7 @@ void apply_7_5_kHz(PHY_VARS_UE *ue,int32_t*txdata,uint8_t slot)
uint32_t *kHz7_5ptr;
#if defined(__x86_64__) || defined(__i386__)
__m128i *txptr128,*kHz7_5ptr128,mmtmp_re,mmtmp_im,mmtmp_re2,mmtmp_im2;
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
int16x8_t *txptr128,*kHz7_5ptr128;
int32x4_t mmtmp_re,mmtmp_im;
int32x4_t mmtmp0,mmtmp1;
......@@ -85,7 +85,7 @@ void apply_7_5_kHz(PHY_VARS_UE *ue,int32_t*txdata,uint8_t slot)
#if defined(__x86_64__) || defined(__i386__)
txptr128 = (__m128i *)&txdata[slot_offset];
kHz7_5ptr128 = (__m128i *)kHz7_5ptr;
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
txptr128 = (int16x8_t*)&txdata[slot_offset];
kHz7_5ptr128 = (int16x8_t*)kHz7_5ptr;
#endif
......@@ -107,7 +107,7 @@ void apply_7_5_kHz(PHY_VARS_UE *ue,int32_t*txdata,uint8_t slot)
txptr128[0] = _mm_packs_epi32(mmtmp_re2,mmtmp_im2);
txptr128++;
kHz7_5ptr128++;
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
mmtmp0 = vmull_s16(((int16x4_t*)txptr128)[0],((int16x4_t*)kHz7_5ptr128)[0]);
//mmtmp0 = [Re(ch[0])Re(rx[0]) Im(ch[0])Im(ch[0]) Re(ch[1])Re(rx[1]) Im(ch[1])Im(ch[1])]
......@@ -145,7 +145,7 @@ void remove_7_5_kHz(RU_t *ru,uint8_t slot)
uint32_t *kHz7_5ptr;
#if defined(__x86_64__) || defined(__i386__)
__m128i *rxptr128,*rxptr128_7_5kHz,*kHz7_5ptr128,kHz7_5_2,mmtmp_re,mmtmp_im,mmtmp_re2,mmtmp_im2;
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
int16x8_t *rxptr128,*kHz7_5ptr128,*rxptr128_7_5kHz;
int32x4_t mmtmp_re,mmtmp_im;
int32x4_t mmtmp0,mmtmp1;
......@@ -199,7 +199,7 @@ void remove_7_5_kHz(RU_t *ru,uint8_t slot)
rxptr128 = (__m128i *)&rxdata[aa][slot_offset];
rxptr128_7_5kHz = (__m128i *)&rxdata_7_5kHz[aa][slot_offset2];
kHz7_5ptr128 = (__m128i *)kHz7_5ptr;
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
rxptr128 = (int16x8_t *)&rxdata[aa][slot_offset];
rxptr128_7_5kHz = (int16x8_t *)&rxdata_7_5kHz[aa][slot_offset2];
kHz7_5ptr128 = (int16x8_t *)kHz7_5ptr;
......@@ -227,7 +227,7 @@ void remove_7_5_kHz(RU_t *ru,uint8_t slot)
rxptr128_7_5kHz++;
kHz7_5ptr128++;
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
kHz7_5ptr128[0] = vmulq_s16(kHz7_5ptr128[0],((int16x8_t*)conjugate75_2)[0]);
mmtmp0 = vmull_s16(((int16x4_t*)rxptr128)[0],((int16x4_t*)kHz7_5ptr128)[0]);
......
......@@ -20,6 +20,36 @@
*/
#include "defs.h"
#if defined(__x86_64__) || defined(__i386__)
#define simd_q15_t __m128i
#define simd_q31_t __m128i
#define simd_q63_t __m128i
#define simd_q15_short_t __m64i
#define simd_q15_add(a,b) _mm_adds_epi16(a,b)
#define simd_q63_add(a,b) _mm_add_epi64(a,b)
#define simd_q15_sub(a,b) _mm_subs_epi16(a,b)
#define simd_q63_sub(a,b) _mm_subs_epi64(a,b)
#define simd_q15_add_short(a,b) _mm_adds_pi16(a,b)
#define simd_q31_add(a,b) _mm_adds_epi32(a,b)
#ifdef __AVX2__
#define simd256_q15_t __m256i
#endif
#elif defined(__arm__) || defined(__aarch64__)
#define simd_q15_t int16x8_t
#define simd_q31_t int32x4_t
#define simd_q63_t int64x2_t
#define simd_q15_short_t int16x4_t
#define simd_q15_add(a,b) vqaddq_s16(a,b)
#define simd_q63_add(a,b) vqaddq_s64(a,b)
#define simd_q15_sub(a,b) vqsubq_s16(a,b)
#define simd_q63_sub(a,b) vqsubq_s64(a,b)
#define simd_q15_add_short(a,b) vqadd_s16(a,b)
#define simd_q31_add(a,b) vqaddq_s32(a,b)
#define _mm_empty()
#define _m_empty()
#endif
int add_vector16(short *x,
......@@ -29,13 +59,13 @@ int add_vector16(short *x,
{
unsigned int i; // loop counter
__m128i *x_128;
__m128i *y_128;
__m128i *z_128;
simd_q15_t *x_128;
simd_q15_t *y_128;
simd_q15_t *z_128;
x_128 = (__m128i *)&x[0];
y_128 = (__m128i *)&y[0];
z_128 = (__m128i *)&z[0];
x_128 = (simd_q15_t *)&x[0];
y_128 = (simd_q15_t *)&y[0];
z_128 = (simd_q15_t *)&z[0];
for(i=0; i<(N>>5); i++) {
......@@ -45,7 +75,7 @@ int add_vector16(short *x,
print_shorts(y_128[0],"y[0]=");
*/
z_128[0] = _mm_adds_epi16(x_128[0],y_128[0]);
z_128[0] = simd_q15_add(x_128[0],y_128[0]);
/*
print_shorts(z_128[0],"z[0]=");
......@@ -53,7 +83,7 @@ int add_vector16(short *x,
print_shorts(y_128[1],"y[1]=");
*/
z_128[1] = _mm_adds_epi16(x_128[1],y_128[1]);
z_128[1] = simd_q15_add(x_128[1],y_128[1]);
/*
print_shorts(z_128[1],"z[1]=");
......@@ -61,7 +91,7 @@ int add_vector16(short *x,
print_shorts(y_128[2],"y[2]=");
*/
z_128[2] = _mm_adds_epi16(x_128[2],y_128[2]);
z_128[2] = simd_q15_add(x_128[2],y_128[2]);
/*
print_shorts(z_128[2],"z[2]=");
......@@ -69,7 +99,7 @@ int add_vector16(short *x,
print_shorts(y_128[3],"y[3]=");
*/
z_128[3] = _mm_adds_epi16(x_128[3],y_128[3]);
z_128[3] = simd_q15_add(x_128[3],y_128[3]);
/*
print_shorts(z_128[3],"z[3]=");
*/
......@@ -106,13 +136,13 @@ int add_vector16_64(short *x,
{
unsigned int i; // loop counter
__m64 *x_64;
__m64 *y_64;
__m64 *z_64;
simd_q15_short_t *x_64;
simd_q15_short_t *y_64;
simd_q15_short_t *z_64;
x_64 = (__m64 *)&x[0];
y_64 = (__m64 *)&y[0];
z_64 = (__m64 *)&z[0];
x_64 = (simd_q15_short_t *)&x[0];
y_64 = (simd_q15_short_t *)&y[0];
z_64 = (simd_q15_short_t *)&z[0];
for(i=0; i<(N>>2); i++) {
......@@ -122,7 +152,7 @@ int add_vector16_64(short *x,
print_shorts64(y_64[i],"y[i]=");
*/
z_64[i] = _mm_adds_pi16(x_64[i],y_64[i]);
z_64[i] = simd_q15_add_short(x_64[i],y_64[i]);
/*
print_shorts64(z_64[i],"z[i]=");
*/
......@@ -142,20 +172,20 @@ int add_cpx_vector32(short *x,
{
unsigned int i; // loop counter
__m128i *x_128;
__m128i *y_128;
__m128i *z_128;
simd_q31_t *x_128;
simd_q31_t *y_128;
simd_q31_t *z_128;
x_128 = (__m128i *)&x[0];
y_128 = (__m128i *)&y[0];
z_128 = (__m128i *)&z[0];
x_128 = (simd_q31_t *)&x[0];
y_128 = (simd_q31_t *)&y[0];
z_128 = (simd_q31_t *)&z[0];
for(i=0; i<(N>>3); i++) {
z_128[0] = _mm_add_epi32(x_128[0],y_128[0]);
z_128[1] = _mm_add_epi32(x_128[1],y_128[1]);
z_128[2] = _mm_add_epi32(x_128[2],y_128[2]);
z_128[3] = _mm_add_epi32(x_128[3],y_128[3]);
z_128[0] = simd_q31_add(x_128[0],y_128[0]);
z_128[1] = simd_q31_add(x_128[1],y_128[1]);
z_128[2] = simd_q31_add(x_128[2],y_128[2]);
z_128[3] = simd_q31_add(x_128[3],y_128[3]);
x_128+=4;
......@@ -176,16 +206,16 @@ int32_t sub_cpx_vector16(int16_t *x,
{
unsigned int i; // loop counter
__m128i *x_128;
__m128i *y_128;
__m128i *z_128;
simd_q15_t *x_128;
simd_q15_t *y_128;
simd_q15_t *z_128;
x_128 = (__m128i *)&x[0];
y_128 = (__m128i *)&y[0];
z_128 = (__m128i *)&z[0];
x_128 = (simd_q15_t *)&x[0];
y_128 = (simd_q15_t *)&y[0];
z_128 = (simd_q15_t *)&z[0];
for(i=0; i<(N>>3); i++) {
z_128[0] = _mm_subs_epi16(x_128[0],y_128[0]);
z_128[0] = simd_q15_sub(x_128[0],y_128[0]);
x_128++;
y_128++;
......@@ -207,19 +237,19 @@ int add_real_vector64(short *x,
{
unsigned int i; // loop counter
__m128i *x_128;
__m128i *y_128;
__m128i *z_128;
simd_q63_t *x_128;
simd_q63_t *y_128;
simd_q63_t *z_128;
x_128 = (__m128i *)&x[0];
y_128 = (__m128i *)&y[0];
z_128 = (__m128i *)&z[0];
x_128 = (simd_q63_t *)&x[0];
y_128 = (simd_q63_t *)&y[0];
z_128 = (simd_q63_t *)&z[0];
for(i=0; i<(N>>3); i++) {
z_128[0] = _mm_add_epi64(x_128[0], y_128[0]);
z_128[1] = _mm_add_epi64(x_128[1], y_128[1]);
z_128[2] = _mm_add_epi64(x_128[2], y_128[2]);
z_128[3] = _mm_add_epi64(x_128[3], y_128[3]);
z_128[0] = simd_q63_add(x_128[0], y_128[0]);
z_128[1] = simd_q63_add(x_128[1], y_128[1]);
z_128[2] = simd_q63_add(x_128[2], y_128[2]);
z_128[3] = simd_q63_add(x_128[3], y_128[3]);
x_128+=4;
......@@ -240,20 +270,19 @@ int sub_real_vector64(short *x,
{
unsigned int i; // loop counter
__m128i *x_128;
__m128i *y_128;
__m128i *z_128;
simd_q63_t *x_128;
simd_q63_t *y_128;
simd_q63_t *z_128;
x_128 = (__m128i *)&x[0];
y_128 = (__m128i *)&y[0];
z_128 = (__m128i *)&z[0];
x_128 = (simd_q63_t *)&x[0];
y_128 = (simd_q63_t *)&y[0];
z_128 = (simd_q63_t *)&z[0];
for(i=0; i<(N>>3); i++) {
z_128[0] = _mm_sub_epi64(x_128[0], y_128[0]);
z_128[1] = _mm_sub_epi64(x_128[1], y_128[1]);
z_128[2] = _mm_sub_epi64(x_128[2], y_128[2]);
z_128[3] = _mm_sub_epi64(x_128[3], y_128[3]);
z_128[0] = simd_q63_sub(x_128[0], y_128[0]);
z_128[1] = simd_q63_sub(x_128[1], y_128[1]);
z_128[2] = simd_q63_sub(x_128[2], y_128[2]);
z_128[3] = simd_q63_sub(x_128[3], y_128[3]);
x_128+=4;
y_128+=4;
......
......@@ -31,7 +31,7 @@
#define mulhi_s1_int16(a,b) _mm_slli_epi16(_mm_mulhi_epi16(a,b),2)
#define adds_int16(a,b) _mm_adds_epi16(a,b)
#define mullo_int16(a,b) _mm_mullo_epi16(a,b)
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
#define simd_q15_t int16x8_t
#define simdshort_q15_t int16x4_t
#define shiftright_int16(a,shift) vshrq_n_s16(a,shift)
......@@ -103,7 +103,7 @@ void multadd_real_vector_complex_scalar(int16_t *x,
j++;
y_128[j] = _mm_adds_epi16(y_128[j],_mm_unpackhi_epi16(yr,yi));
j++;
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
int16x8x2_t yint;
yint = vzipq_s16(yr,yi);
y_128[j] = adds_int16(y_128[j],yint.val[0]);
......@@ -354,7 +354,7 @@ int rotate_cpx_vector(int16_t *x,
((int16_t *)&alpha_128)[5] = -alpha[1];
((int16_t *)&alpha_128)[6] = alpha[1];
((int16_t *)&alpha_128)[7] = alpha[0];
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
int32x4_t shift;
int32x4_t ab_re0,ab_re1,ab_im0,ab_im1,re32,im32;
int16_t reflip[8] __attribute__((aligned(16))) = {1,-1,1,-1,1,-1,1,-1};
......@@ -386,7 +386,7 @@ int rotate_cpx_vector(int16_t *x,
y_128[0] = _mm_packs_epi32(m2,m3); // pack in 16bit integers with saturation [re im re im re im re im]
//print_ints("y_128[0]=", &y_128[0]);
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
ab_re0 = vmull_s16(((int16x4_t*)xd)[0],((int16x4_t*)&bconj)[0]);
ab_re1 = vmull_s16(((int16x4_t*)xd)[1],((int16x4_t*)&bconj)[1]);
......
......@@ -30,7 +30,7 @@ int16_t conjug2[8]__attribute__((aligned(16))) = {1,-1,1,-1,1,-1,1,-1} ;
#define simdshort_q15_t __m64
#define set1_int16(a) _mm_set1_epi16(a)
#define setr_int16(a0, a1, a2, a3, a4, a5, a6, a7) _mm_setr_epi16(a0, a1, a2, a3, a4, a5, a6, a7 )
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
int16_t conjug[4]__attribute__((aligned(16))) = {-1,1,-1,1} ;
#define simd_q15_t int16x8_t
#define simdshort_q15_t int16x4_t
......@@ -69,7 +69,7 @@ int mult_cpx_conj_vector(int16_t *x1,
simd_q15_t tmp_re,tmp_im;
simd_q15_t tmpy0,tmpy1;
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
int32x4_t tmp_re,tmp_im;
int32x4_t tmp_re1,tmp_im1;
int16x4x2_t tmpy;
......@@ -98,7 +98,7 @@ int mult_cpx_conj_vector(int16_t *x1,
else
*y_128 += _mm_packs_epi32(tmpy0,tmpy1);
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
tmp_re = vmull_s16(((simdshort_q15_t *)x1_128)[0], ((simdshort_q15_t*)x2_128)[0]);
//tmp_re = [Re(x1[0])Re(x2[0]) Im(x1[0])Im(x2[0]) Re(x1[1])Re(x2[1]) Im(x1[1])Im(x2[1])]
......@@ -166,6 +166,7 @@ int mult_cpx_vector(int16_t *x1, //Q15
//print_shorts("x1_128:",&x1_128[0]);
// print_shorts("x2_128:",&x2_128[0]);
#if defined(__x86_64__) || defined(__i386__)
//right shift by 13 while p_a * x0 and 15 while
// we compute 4 cpx multiply for each loop
for(i=0; i<(N>>2); i++) {
......@@ -195,6 +196,9 @@ int mult_cpx_vector(int16_t *x1, //Q15
}
_mm_empty();
_m_empty();
#elif defined(__arm__) || defined(__aarch64__)
AssertFatal(1==0,"To be done for ARM\n");
#endif
return(0);
}
......@@ -226,7 +230,7 @@ int multadd_cpx_vector(int16_t *x1,
#if defined(__x86_64__) || defined(__i386__)
simd_q15_t tmp_re,tmp_im;
simd_q15_t tmpy0,tmpy1;
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
int32x4_t tmp_re,tmp_im;
int32x4_t tmp_re1,tmp_im1;
int16x4x2_t tmpy;
......@@ -254,8 +258,8 @@ int multadd_cpx_vector(int16_t *x1,
else
*y_128 = _mm_adds_epi16(*y_128,_mm_packs_epi32(tmpy0,tmpy1));
//print_shorts("*y_128:",&y_128[i]);
#elif defined(__arm__)
msg("mult_cpx_vector not implemented for __arm__");
#elif defined(__arm__) || defined(__aarch64__)
AssertFatal(1==0,"mult_cpx_vector not implemented for __arm__");
#endif
x1_128++;
x2_128++;
......
......@@ -309,7 +309,7 @@ static inline __m256i packed_cmult2_256(__m256i a,__m256i b,__m256i b2)
}
#endif
#elif defined (__arm__)
#elif defined (__arm__) || defined(__aarch64__)
static inline void cmac(int16x8_t a,int16x8_t b, int32x4_t *re32, int32x4_t *im32) __attribute__((always_inline));
static inline void cmac(int16x8_t a,int16x8_t b, int32x4_t *re32, int32x4_t *im32)
{
......@@ -494,7 +494,7 @@ const __m256i *W35_256 = (__m256i *)W35s;
const __m256i *W45_256 = (__m256i *)W45s;
#endif
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
int16x8_t *W0 = (int16x8_t *)W0s;
int16x8_t *W13 = (int16x8_t *)W13s;
int16x8_t *W23 = (int16x8_t *)W23s;
......@@ -579,7 +579,7 @@ static inline void bfly2_256(__m256i *x0, __m256i *x1,__m256i *y0, __m256i *y1,_
#endif
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
static inline void bfly2(int16x8_t *x0, int16x8_t *x1,int16x8_t *y0, int16x8_t *y1,int16x8_t *tw)__attribute__((always_inline));
......@@ -614,7 +614,7 @@ static inline void bfly2_tw1(__m128i *x0, __m128i *x1, __m128i *y0, __m128i *y1)
}
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
static inline void bfly2_tw1(int16x8_t *x0, int16x8_t *x1, int16x8_t *y0, int16x8_t *y1)__attribute__((always_inline));
......@@ -678,7 +678,7 @@ static inline void bfly2_16_256(__m256i *x0, __m256i *x1, __m256i *y0, __m256i *
#endif
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
static inline void bfly2_16(int16x8_t *x0, int16x8_t *x1, int16x8_t *y0, int16x8_t *y1, int16x8_t *tw, int16x8_t *twb)__attribute__((always_inline));
......@@ -746,7 +746,7 @@ static inline void ibfly2_256(__m256i *x0, __m256i *x1,__m256i *y0, __m256i *y1,
}
#endif
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
static inline void ibfly2(int16x8_t *x0, int16x8_t *x1,int16x8_t *y0, int16x8_t *y1,int16x8_t *tw)
{
......@@ -825,7 +825,7 @@ static inline void bfly3_256(__m256i *x0,__m256i *x1,__m256i *x2,
}
#endif
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
static inline void bfly3(int16x8_t *x0,int16x8_t *x1,int16x8_t *x2,
int16x8_t *y0,int16x8_t *y1,int16x8_t *y2,
int16x8_t *tw1,int16x8_t *tw2) __attribute__((always_inline));
......@@ -905,7 +905,7 @@ static inline void ibfly3_256(__m256i *x0,__m256i *x1,__m256i *x2,
}
#endif
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
static inline void ibfly3(int16x8_t *x0,int16x8_t *x1,int16x8_t *x2,
int16x8_t *y0,int16x8_t *y1,int16x8_t *y2,
int16x8_t *tw1,int16x8_t *tw2) __attribute__((always_inline));
......@@ -976,7 +976,7 @@ static inline void bfly3_tw1_256(__m256i *x0,__m256i *x1,__m256i *x2,
}
#endif
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
static inline void bfly3_tw1(int16x8_t *x0,int16x8_t *x1,int16x8_t *x2,
int16x8_t *y0,int16x8_t *y1,int16x8_t *y2) __attribute__((always_inline));
......@@ -1084,7 +1084,7 @@ static inline void bfly4_256(__m256i *x0,__m256i *x1,__m256i *x2,__m256i *x3,
*(y3) = _mm256_add_epi16(*(x0),cpack_256(dy3r,dy3i));
}
#endif
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
static inline void bfly4(int16x8_t *x0,int16x8_t *x1,int16x8_t *x2,int16x8_t *x3,
int16x8_t *y0,int16x8_t *y1,int16x8_t *y2,int16x8_t *y3,
int16x8_t *tw1,int16x8_t *tw2,int16x8_t *tw3)__attribute__((always_inline));
......@@ -1192,7 +1192,7 @@ static inline void ibfly4_256(__m256i *x0,__m256i *x1,__m256i *x2,__m256i *x3,
}
#endif
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
static inline void ibfly4(int16x8_t *x0,int16x8_t *x1,int16x8_t *x2,int16x8_t *x3,
int16x8_t *y0,int16x8_t *y1,int16x8_t *y2,int16x8_t *y3,
......@@ -1288,7 +1288,7 @@ static inline void bfly4_tw1_256(__m256i *x0,__m256i *x1,__m256i *x2,__m256i *x3
}
#endif
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
static inline void bfly4_tw1(int16x8_t *x0,int16x8_t *x1,int16x8_t *x2,int16x8_t *x3,
int16x8_t *y0,int16x8_t *y1,int16x8_t *y2,int16x8_t *y3)__attribute__((always_inline));
......@@ -1336,7 +1336,7 @@ static inline void ibfly4_tw1(__m128i *x0,__m128i *x1,__m128i *x2,__m128i *x3,
}
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
static inline void ibfly4_tw1(int16x8_t *x0,int16x8_t *x1,int16x8_t *x2,int16x8_t *x3,
int16x8_t *y0,int16x8_t *y1,int16x8_t *y2,int16x8_t *y3)__attribute__((always_inline));
......@@ -1447,7 +1447,7 @@ static inline void bfly4_16_256(__m256i *x0,__m256i *x1,__m256i *x2,__m256i *x3,
#endif
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
static inline void bfly4_16(int16x8_t *x0,int16x8_t *x1,int16x8_t *x2,int16x8_t *x3,
int16x8_t *y0,int16x8_t *y1,int16x8_t *y2,int16x8_t *y3,
......@@ -1572,7 +1572,7 @@ static inline void ibfly4_16_256(__m256i *x0,__m256i *x1,__m256i *x2,__m256i *x3
}
#endif
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
static inline void ibfly4_16(int16x8_t *x0,int16x8_t *x1,int16x8_t *x2,int16x8_t *x3,
int16x8_t *y0,int16x8_t *y1,int16x8_t *y2,int16x8_t *y3,
int16x8_t *tw1,int16x8_t *tw2,int16x8_t *tw3,
......@@ -1709,7 +1709,7 @@ static inline void bfly5_256(__m256i *x0, __m256i *x1, __m256i *x2, __m256i *x3,
}
#endif
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
static inline void bfly5(int16x8_t *x0, int16x8_t *x1, int16x8_t *x2, int16x8_t *x3,int16x8_t *x4,
int16x8_t *y0, int16x8_t *y1, int16x8_t *y2, int16x8_t *y3,int16x8_t *y4,
int16x8_t *tw1,int16x8_t *tw2,int16x8_t *tw3,int16x8_t *tw4)__attribute__((always_inline));
......@@ -1838,7 +1838,7 @@ static inline void bfly5_tw1_256(__m256i *x0, __m256i *x1, __m256i *x2, __m256i
*(y4) = _mm256_adds_epi16(*(x0),*(y4));
}
#endif
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
static inline void bfly5_tw1(int16x8_t *x0, int16x8_t *x1, int16x8_t *x2, int16x8_t *x3,int16x8_t *x4,
int16x8_t *y0, int16x8_t *y1, int16x8_t *y2, int16x8_t *y3,int16x8_t *y4) __attribute__((always_inline));
......@@ -1895,7 +1895,7 @@ static inline void transpose16(__m128i *x,__m128i *y)
y[3] = _mm_unpackhi_epi64(ytmp1,ytmp3);
}
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
static inline void transpose16(int16x8_t *x,int16x8_t *y) __attribute__((always_inline));
static inline void transpose16(int16x8_t *x,int16x8_t *y)
{
......@@ -1961,7 +1961,7 @@ static inline void transpose16_ooff_simd256(__m256i *x,__m256i *y,int off)
}
#endif
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
static inline void transpose16_ooff(int16x8_t *x,int16x8_t *y,int off) __attribute__((always_inline));
static inline void transpose16_ooff(int16x8_t *x,int16x8_t *y,int off)
......@@ -2012,7 +2012,7 @@ static inline void transpose4_ooff_simd256(__m256i *x,__m256i *y,int off)
y[off] = _mm256_insertf128_si256(perm_tmp1,_mm256_extracti128_si256(perm_tmp0,1),0);
}
#endif
#elif (__arm__)
#elif (__arm__) || defined(__aarch64__)
static inline void transpose4_ooff(int16x4_t *x,int16x4_t *y,int off)__attribute__((always_inline));
static inline void transpose4_ooff(int16x4_t *x,int16x4_t *y,int off)
......@@ -2142,7 +2142,7 @@ static inline void dft16(int16_t *x,int16_t *y)
y128[1] = _mm_adds_epi16(x02t,x13t); // x0 + x1f - x2 - x3f
y128[3] = _mm_subs_epi16(x02t,x13t); // x0 - x1f - x2 + x3f
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
int16x8_t *tw16a_128=(int16x8_t *)tw16a,*tw16b_128=(int16x8_t *)tw16b,*x128=(int16x8_t *)x,*y128=(int16x8_t *)y;
......@@ -2360,7 +2360,7 @@ static inline void idft16(int16_t *x,int16_t *y)
y128[3] = _mm_adds_epi16(x02t,x13t); // x0 + x1f - x2 - x3f
y128[1] = _mm_subs_epi16(x02t,x13t); // x0 - x1f - x2 + x3f
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
int16x8_t *tw16a_128=(int16x8_t *)tw16,*tw16b_128=(int16x8_t *)tw16c,*x128=(int16x8_t *)x,*y128=(int16x8_t *)y;
/* This is the original version before unrolling
......@@ -2642,7 +2642,7 @@ const static int16_t tw64crep[192] __attribute__((aligned(32))) = {
#define mulhi_int16_simd256(a,b) _mm256_slli_epi16(_mm256_mulhi_epi16(a,b),1);
#endif
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
#define simd_q15_t int16x8_t
#define simdshort_q15_t int16x4_t
#define shiftright_int16(a,shift) vshrq_n_s16(a,shift)
......@@ -18647,7 +18647,7 @@ int main(int argc, char**argv)
x[i] = _mm256_set1_epi32(taus());
x[i] = _mm256_srai_epi16(x[i],4);
#endif
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
x[i] = (int16x8_t)vdupq_n_s32(taus());
x[i] = vshrq_n_s16(x[i],4);
#endif
......@@ -47,6 +47,17 @@ typedef struct {
int trials;
int meas_flag;
} time_stats_t;
#elif defined(__aarch64__)
typedef struct {
uint64_t in;
uint64_t diff_now;
uint64_t diff;
uint64_t p_time; /*!< \brief absolute process duration */
uint64_t diff_square; /*!< \brief process duration square */
uint64_t max;
int trials;
int meas_flag;
} time_stats_t;
#elif defined(__arm__)
typedef struct {
uint32_t in;
......@@ -56,6 +67,7 @@ typedef struct {
uint32_t diff_square; /*!< \brief process duration square */
uint32_t max;
int trials;
int meas_flag;
} time_stats_t;
#endif
......@@ -84,7 +96,14 @@ static inline unsigned long long rdtsc_oai(void)
__asm__ volatile ("rdtsc" : "=a" (a), "=d" (d));
return (d<<32) | a;
}
#elif defined(__aarch64__)
static inline uint64_t rdtsc_oai(void) __attribute__((always_inline));
static inline uint64_t rdtsc_oai(void)
{
uint64_t r = 0;
asm volatile("mrs %0, cntvct_el0" : "=r"(r));
return r;
}
#elif defined(__arm__)
static inline uint32_t rdtsc_oai(void) __attribute__((always_inline));
static inline uint32_t rdtsc_oai(void)
......
......@@ -123,6 +123,7 @@ static inline void* malloc16_clear( size_t size )
#define cmax3(a,b,c) ((cmax(a,b)>c) ? (cmax(a,b)) : (c))
/// suppress compiler warning for unused arguments
#define UNUSED(x) (void)x;
......
......@@ -345,7 +345,7 @@ static inline __m128i ssp_cvtepi16_epi32_SSE2 ( __m128i a)
}
#endif // __SSE4_1__
#elif defined(__arm__)
#elif defined(__arm__) || defined(__aarch64__)
#include <arm_neon.h>
#endif // x86_64 || i386
......
......@@ -32,8 +32,11 @@
#include "UTIL/LOG/log.h"
//#define DEBUG_CH
#if defined(__i386__) || defined(__x86_64__)
extern void print_shorts(char *s,__m128i *x);
#elsif defined (__arm__) || defined(__aarch64__)
extern void print_shorts(char *s,int16x8_t *x);
#endif
void fill_channel_desc(channel_desc_t *chan_desc,
uint8_t nb_tx,
uint8_t nb_rx,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment