Commit bbddc46e authored by Tsung-Yu Chan's avatar Tsung-Yu Chan

refactor/rewrite the inner_rx_XX with 128 bit SIMD

parent 38dd2a60
......@@ -54,13 +54,6 @@ int64_t time_now_us(void)
return micros;
}
void nr_idft(int32_t *z, uint32_t Msc_PUSCH)
{
......@@ -363,7 +356,8 @@ void nr_ulsch_extract_rbs0(c16_t *rxdataF,
}
memcpy((void*)ul_ch0_ext,(void*)ul_ch0,nb_re_pusch*sizeof(int32_t));
}
else if (pusch_pdu->dmrs_config_type == pusch_dmrs_type1) { // 6 REs / PRB
else if (pusch_pdu->dmrs_config_type == pusch_dmrs_type1) // 6 REs / PRB
{
AssertFatal(delta==0||delta==1,"Illegal delta %d\n",delta);
int32_t *rxF32 = &rxF[start_re];
int32_t *rxF_ext32 = rxF_ext;
......@@ -551,8 +545,8 @@ void nr_ulsch_extract_rbs(c16_t **rxdataF,
unsigned char symbol,
uint8_t is_dmrs_symbol,
nfapi_nr_pusch_pdu_t *pusch_pdu,
NR_DL_FRAME_PARMS *frame_parms) {
NR_DL_FRAME_PARMS *frame_parms)
{
unsigned char aarx;
int16_t *rxF,*rxF_ext;
int *ul_ch0,*ul_ch0_ext;
......@@ -568,58 +562,65 @@ void nr_ulsch_extract_rbs(c16_t **rxdataF,
int nb_re_pusch2 = nb_re_pusch + (nb_re_pusch&7);
for (aarx = 0; aarx < frame_parms->nb_antennas_rx; aarx++) {
for (aarx = 0; aarx < frame_parms->nb_antennas_rx; aarx++)
{
rxF = (int16_t *)&rxdataF[aarx][soffset+(symbol * frame_parms->ofdm_symbol_size)];
rxF_ext = (int16_t *)&pusch_vars->rxdataF_ext[aarx][symbol * nb_re_pusch2]; // [hna] rxdataF_ext isn't contiguous in order to solve an alignment problem ib llr computation in case of mod_order = 4, 6
ul_ch0 = &pusch_vars->ul_ch_estimates[aarx][pusch_vars->dmrs_symbol*frame_parms->ofdm_symbol_size]; // update channel estimates if new dmrs symbol are available
ul_ch0_ext = &pusch_vars->ul_ch_estimates_ext[aarx][symbol*nb_re_pusch2];
if (is_dmrs_symbol == 0) {
if (is_dmrs_symbol == 0)
{
if (start_re + nb_re_pusch <= frame_parms->ofdm_symbol_size) {
memcpy((void*)rxF_ext, (void*)&rxF[start_re*2], nb_re_pusch*sizeof(int32_t));
} else {
int neg_length = frame_parms->ofdm_symbol_size-start_re;
int pos_length = nb_re_pusch-neg_length;
memcpy((void*)rxF_ext,(void*)&rxF[start_re*2],neg_length*sizeof(int32_t));
memcpy((void*)rxF_ext, (void*) &rxF[start_re*2], neg_length*sizeof(int32_t));
memcpy((void*)&rxF_ext[2*neg_length],(void*)rxF,pos_length*sizeof(int32_t));
}
memcpy((void*)ul_ch0_ext,(void*)ul_ch0,nb_re_pusch*sizeof(int32_t));
}
else if (pusch_pdu->dmrs_config_type == pusch_dmrs_type1) { // 6 REs / PRB
else if (pusch_pdu->dmrs_config_type == pusch_dmrs_type1) // 6 REs / PRB
{
AssertFatal(delta==0||delta==1,"Illegal delta %d\n",delta);
int32_t *rxF32 = (int32_t*)&rxF[start_re*2];
int32_t *rxF32 = (int32_t*)&rxF[start_re*2]; // align int16 to int 32
int32_t *rxF_ext32 =(int32_t*)rxF_ext;
int32_t *ul_ch032 = (int32_t*)ul_ch0;
int32_t *ul_ch0_ext32 =(int32_t*)ul_ch0_ext;
int idx,idx2,idx3;
if (start_re + nb_re_pusch < frame_parms->ofdm_symbol_size) {
for (idx=1-delta,idx2=0;idx<nb_re_pusch;idx+=2,idx2++) {
int idx, idx2, idx3;
if (start_re + nb_re_pusch < frame_parms->ofdm_symbol_size)
{
for (idx=1-delta, idx2=0; idx<nb_re_pusch; idx+=2, idx2++)
{
rxF_ext32[idx2] = rxF32[idx];
ul_ch0_ext32[idx2]= ul_ch032[idx];
}
}
else { // handle the two pieces around DC
else // handle the two pieces around DC
{
LOG_D(PHY,"Running extraction with DMRS for config 1, allocation around DC, start_re %d\n",start_re);
int neg_length = frame_parms->ofdm_symbol_size-start_re;
int pos_length = nb_re_pusch-neg_length;
for (idx=1-delta,idx2=0;idx<neg_length;idx+=2,idx2++) {
for (idx=1-delta, idx2=0; idx<neg_length; idx+=2, idx2++)
{
rxF_ext32[idx2] = rxF32[idx];
ul_ch0_ext32[idx2]= ul_ch032[idx];
}
rxF32=(int32_t*)rxF;
idx3=idx;
for (idx=1-delta;idx<pos_length;idx+=2,idx2++,idx3++) {
for (idx=1-delta; idx<pos_length; idx+=2, idx2++, idx3++)
{
rxF_ext32[idx2] = rxF32[idx];
ul_ch0_ext32[idx2]= ul_ch032[idx3];
}
}
}
else if (pusch_pdu->dmrs_config_type == pusch_dmrs_type2) { // 8 REs / PRB
else if (pusch_pdu->dmrs_config_type == pusch_dmrs_type2) // 8 REs / PRB
{
AssertFatal(delta==0||delta==2||delta==4,"Illegal delta %d\n",delta);
if (start_re + nb_re_pusch < frame_parms->ofdm_symbol_size) {
int64_t *rxF64 = (int64_t*)&rxF[start_re*2];
......@@ -903,43 +904,33 @@ void nr_ulsch_channel_level(int **ul_ch_estimates_ext,
uint8_t nrOfLayers,
unsigned short nb_rb)
{
short rb;
unsigned char aatx, aarx;
simde__m128i *ul_ch128, avg128U;
int16_t x = factor2(len);
int16_t y = (len)>>x;
uint32_t nb_rb_0 = len/12 + ((len%12)?1:0);
int off = ((nb_rb_0&1) == 1)? 4:0;
for (aatx = 0; aatx < nrOfLayers; aatx++) {
for (aarx = 0; aarx < frame_parms->nb_antennas_rx; aarx++) {
//clear average level
avg128U = simde_mm_setzero_si128();
ul_ch128=(simde__m128i *)&ul_ch_estimates_ext[aatx*frame_parms->nb_antennas_rx+aarx][symbol*(off+(nb_rb*12))];
avg128U = simde_mm_setzero_si128(); // set all value to zero
ul_ch128 = (simde__m128i *)&ul_ch_estimates_ext[aatx*frame_parms->nb_antennas_rx+aarx][symbol*(off+(nb_rb*12))];
for (rb = 0; rb < nb_rb_0; rb++) {
avg128U = simde_mm_add_epi32(avg128U, simde_mm_srai_epi32(simde_mm_madd_epi16(ul_ch128[0], ul_ch128[0]), x));
avg128U = simde_mm_add_epi32(avg128U, simde_mm_srai_epi32(simde_mm_madd_epi16(ul_ch128[1], ul_ch128[1]), x));
avg128U = simde_mm_add_epi32(avg128U, simde_mm_srai_epi32(simde_mm_madd_epi16(ul_ch128[2], ul_ch128[2]), x));
ul_ch128+=3;
ul_ch128 += 3;
}
avg[aatx*frame_parms->nb_antennas_rx+aarx] = (((int32_t*)&avg128U)[0] +
((int32_t*)&avg128U)[1] +
((int32_t*)&avg128U)[2] +
((int32_t*)&avg128U)[3]) / y;
}
}
simde_mm_empty();
simde_m_empty();
}
static simde__m128i a_mult_conjb(simde__m128i a, simde__m128i b, unsigned char output_shift)
......@@ -1340,28 +1331,23 @@ void nr_ulsch_detection_mrc(NR_DL_FRAME_PARMS *frame_parms,
}
for (int aa=1;aa<frame_parms->nb_antennas_rx;aa++) {
// MRC on each re of rb, both on MF output and magnitude (for 16QAM/64QAM llr computation)
for (i=0; i<nb_re>>3; i++) {
for (i=0; i<nb_re>>3; i++)
{
rxdataF_comp256[0][i] = simde_mm256_adds_epi16(rxdataF_comp256[0][i],rxdataF_comp256[aa][i]);
ul_ch_mag256[0][i] = simde_mm256_adds_epi16(ul_ch_mag256[0][i], ul_ch_mag256[aa][i]);
ul_ch_mag256b[0][i] = simde_mm256_adds_epi16(ul_ch_mag256b[0][i],ul_ch_mag256b[aa][i]);
ul_ch_mag256c[0][i] = simde_mm256_adds_epi16(ul_ch_mag256c[0][i],ul_ch_mag256c[aa][i]);
}
}
}
simde_mm_empty();
simde_m_empty();
}
void inner_rx_qpsk(int *rxF,
int *ul_ch,
int16_t *llr,
int aarx,
int length,
int output_shift) {
register simde__m256i xmmtmpD0,xmmtmpD1,xmmtmpD2,xmmtmpD3,xmmtmpD4;
void inner_rx_qpsk(int *rxF, int *ul_ch, int16_t *llr, int aarx, int length, int output_shift)
{
#if !USE_128BIT
register simde__m256i xmmp0, xmmp1, xmmp2, xmmp3, xmmp4;
register simde__m256i complex_shuffle256 = simde_mm256_set_epi8(29,28,31,30,25,24,27,26,21,20,23,22,17,16,19,18,13,12,15,14,9,8,11,10,5,4,7,6,1,0,3,2);
register simde__m256i conj256 = simde_mm256_set_epi16(1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1);
......@@ -1369,154 +1355,160 @@ void inner_rx_qpsk(int *rxF,
simde__m256i *ulch256 = (simde__m256i*)ul_ch;
// need to use simde__m64 because llr output is not necessarily aligned to 256 bits, but it is always to 64 bits
simde__m64 *llr64 = (simde__m64 *)llr;
for (int i=0; i<((length>>3)+((length&7)>0?1:0)); i++) {
xmmp0 = simde_mm256_madd_epi16(ulch256[i], rxF256[i]);
// xmmp0 contains real part of 8 consecutive outputs (32-bit) of conj(H_m[i])*R_m[i]
xmmp1 = simde_mm256_shuffle_epi8(ulch256[i], complex_shuffle256);
xmmp1 = simde_mm256_sign_epi16(xmmp1, conj256);
xmmp1 = simde_mm256_madd_epi16(xmmp1, rxF256[i]);
// xmmp1 contains imag part of 8 consecutive outputs (32-bit) of conj(H_m[i])*R_m[i]
xmmp0 = simde_mm256_srai_epi32(xmmp0, output_shift);
xmmp1 = simde_mm256_srai_epi32(xmmp1, output_shift);
xmmp2 = simde_mm256_unpacklo_epi32(xmmp0, xmmp1);
xmmp3 = simde_mm256_unpackhi_epi32(xmmp0, xmmp1);
xmmp4 = simde_mm256_packs_epi32(xmmp2,xmmp3);
if (aarx == 0)
{
*llr64 = (simde__m64)simde_mm256_extract_epi64(xmmp4,0); llr64++;
*llr64 = (simde__m64)simde_mm256_extract_epi64(xmmp4,1); llr64++;
*llr64 = (simde__m64)simde_mm256_extract_epi64(xmmp4,2); llr64++;
*llr64 = (simde__m64)simde_mm256_extract_epi64(xmmp4,3); llr64++;
}
else
{
*llr64 = simde_mm_adds_pi16(*llr64,(simde__m64)(simde_mm256_extract_epi64(xmmp4,0))); llr64++;
*llr64 = simde_mm_adds_pi16(*llr64,(simde__m64)(simde_mm256_extract_epi64(xmmp4,1))); llr64++;
*llr64 = simde_mm_adds_pi16(*llr64,(simde__m64)(simde_mm256_extract_epi64(xmmp4,2))); llr64++;
*llr64 = simde_mm_adds_pi16(*llr64,(simde__m64)(simde_mm256_extract_epi64(xmmp4,3))); llr64++;
}
}
#else
register simde__m128i xmmp0, xmmp1, xmmp2, xmmp3, xmmp4;
register simde__m128i complex_shuffle128 = simde_mm_set_epi8(13,12,15,14,9,8,11,10,5,4,7,6,1,0,3,2);
register simde__m128i conj128 = simde_mm_set_epi16(-1, 1, -1, 1, -1, 1, -1, 1);
if (aarx==0)
for (int i=0;i<((length>>3)+((length&7)>0?1:0));i++) {
xmmtmpD0 = simde_mm256_madd_epi16(ulch256[i],rxF256[i]);
// xmmtmpD0 contains real part of 8 consecutive outputs (32-bit) of conj(H_m[i])*R_m[i]
xmmtmpD1 = simde_mm256_shuffle_epi8(ulch256[i],complex_shuffle256);
xmmtmpD1 = simde_mm256_sign_epi16(xmmtmpD1,conj256);
xmmtmpD1 = simde_mm256_madd_epi16(xmmtmpD1,rxF256[i]);
// xmmtmpD1 contains imag part of 8 consecutive outputs (32-bit) of conj(H_m[i])*R_m[i]
xmmtmpD0 = simde_mm256_srai_epi32(xmmtmpD0,output_shift);
xmmtmpD1 = simde_mm256_srai_epi32(xmmtmpD1,output_shift);
xmmtmpD2 = simde_mm256_unpacklo_epi32(xmmtmpD0,xmmtmpD1);
xmmtmpD3 = simde_mm256_unpackhi_epi32(xmmtmpD0,xmmtmpD1);
xmmtmpD4 = simde_mm256_packs_epi32(xmmtmpD2,xmmtmpD3);
*llr64 = (simde__m64)simde_mm256_extract_epi64(xmmtmpD4,0); llr64++;
*llr64 = (simde__m64)simde_mm256_extract_epi64(xmmtmpD4,1); llr64++;
*llr64 = (simde__m64)simde_mm256_extract_epi64(xmmtmpD4,2); llr64++;
*llr64 = (simde__m64)simde_mm256_extract_epi64(xmmtmpD4,3); llr64++;
simde__m128i *rxF128 = (simde__m128i*)rxF;
simde__m128i *ulch128 = (simde__m128i*)ul_ch;
simde__m128i *llr128 = (simde__m128*)llr;
for (int i = 0; i < (length >> 2); i++) {
xmmp0 = simde_mm_sign_epi16(ulch128[i], conj128);
xmmp0 = simde_mm_madd_epi16(xmmp0, rxF128[i]);
// xmmp0 contains real part of 4 consecutive outputs (32-bit) of H_m[i]*R_m[i]
xmmp1 = simde_mm_shuffle_epi8(ulch128[i], complex_shuffle128);
xmmp1 = simde_mm_madd_epi16(xmmp1, rxF128[i]);
// xmmp1 contains imag part of 4 consecutive outputs (32-bit) of H_m[i]*R_m[i]
xmmp0 = simde_mm_srai_epi32(xmmp0, output_shift);
xmmp1 = simde_mm_srai_epi32(xmmp1, output_shift);
xmmp2 = simde_mm_unpacklo_epi32(xmmp0, xmmp1);
xmmp3 = simde_mm_unpackhi_epi32(xmmp0, xmmp1);
xmmp4 = simde_mm_packs_epi32(xmmp2, xmmp3);
if (aarx == 0)
*llr128 = xmmp4;
else
*llr128 = simde_mm_add_epi16(*llr128, xmmp4);
llr128++;
}
if (length & 3)
{
int i = (length>>1) - 1;
simde__m64* llr64 = (simde__m64*)llr128;
simde__m64 xmm0, xmm1, xmm2, xmm3, xmm4;
simde__m64 complex_shuffle64 = simde_mm_set_pi8(5,4,7,6,1,0,3,2);
simde__m64 conj64 = simde_mm_set_pi16(-1,1,-1,1);
simde__m64 *rxF64 = (simde__m64*)rxF;
simde__m64 *ulch64 = (simde__m64*)ul_ch;
simde__m64 QAM_amp = simde_mm_set1_pi16(QAM16_n1);
xmm0 = simde_mm_sign_pi16(ulch64[i], conj64);
xmm0 = simde_mm_madd_pi16(xmm0, rxF64[i]);
// xmm0 contains real part of 4 consecutive outputs (32-bit) of H_m[i]*R_m[i]
xmm1 = simde_mm_shuffle_pi8(ulch64[i], complex_shuffle64);
xmm1 = simde_mm_madd_pi16(xmm1, rxF64[i]);
// xmm1 contains imag part of 4 consecutive outputs (32-bit) of H_m[i]*R_m[i]
xmm0 = simde_mm_srai_pi32(xmm0, output_shift);
xmm1 = simde_mm_srai_pi32(xmm1, output_shift);
xmm2 = simde_mm_unpacklo_pi32(xmm0, xmm1);
xmm3 = simde_mm_unpackhi_pi32(xmm0, xmm1);
xmm4 = simde_mm_packs_pi32(xmm2, xmm3);
if (aarx == 0)
*llr64 = xmm4;
else
for (int i=0;i<((length>>3)+((length&7)>0?1:0));i++) {
xmmtmpD0 = simde_mm256_madd_epi16(ulch256[i],rxF256[i]);
// xmmtmpD0 contains real part of 8 consecutive outputs (32-bit) of conj(H_m[i])*R_m[i]
xmmtmpD1 = simde_mm256_shuffle_epi8(ulch256[i],complex_shuffle256);
xmmtmpD1 = simde_mm256_sign_epi16(xmmtmpD1,conj256);
xmmtmpD1 = simde_mm256_madd_epi16(xmmtmpD1,rxF256[i]);
// xmmtmpD1 contains imag part of 8 consecutive outputs (32-bit) of conj(H_m[i])*R_m[i]
xmmtmpD0 = simde_mm256_srai_epi32(xmmtmpD0,output_shift);
xmmtmpD1 = simde_mm256_srai_epi32(xmmtmpD1,output_shift);
xmmtmpD2 = simde_mm256_unpacklo_epi32(xmmtmpD0,xmmtmpD1);
xmmtmpD3 = simde_mm256_unpackhi_epi32(xmmtmpD0,xmmtmpD1);
xmmtmpD4 = simde_mm256_packs_epi32(xmmtmpD2,xmmtmpD3);
*llr64 = simde_mm_adds_pi16(*llr64,(simde__m64)(simde_mm256_extract_epi64(xmmtmpD4,0))); llr64++;
*llr64 = simde_mm_adds_pi16(*llr64,(simde__m64)(simde_mm256_extract_epi64(xmmtmpD4,1))); llr64++;
*llr64 = simde_mm_adds_pi16(*llr64,(simde__m64)(simde_mm256_extract_epi64(xmmtmpD4,2))); llr64++;
*llr64 = simde_mm_adds_pi16(*llr64,(simde__m64)(simde_mm256_extract_epi64(xmmtmpD4,3))); llr64++;
*llr64 = simde_mm_add_pi16(*llr64, xmm4);
}
#endif
}
void inner_rx_256qam(int *rxF, int *ul_ch, int16_t *llr, int aarx, int length,int output_shift) {
#ifndef USE_128BIT
register simde__m256i xmmtmpD0,xmmtmpD1,xmmtmpD2,xmmtmpD3,xmmtmpD4,xmmtmpD5,xmmtmpD6,xmmtmpD7,xmmtmpD8,xmmtmpD9;
#if !USE_128BIT
register simde__m256i xmmp0, xmmp1, xmmp2, xmmp3, xmmp4, xmmp5, xmmp6, xmmp7, xmmp8, xmmp9;
register simde__m256i complex_shuffle256 = simde_mm256_set_epi8(29,28,31,30,25,24,27,26,21,20,23,22,17,16,19,18,13,12,15,14,9,8,11,10,5,4,7,6,1,0,3,2);
register simde__m256i conj256 = simde_mm256_set_epi16(1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1);
register simde__m256i QAM_amp256 = simde_mm256_set1_epi16(QAM256_n1); // 2/sqrt(10)
register simde__m256i QAM_amp256 = simde_mm256_set1_epi16(QAM256_n1);
register simde__m256i QAM_amp256b = simde_mm256_set1_epi16(QAM256_n2);
register simde__m256i QAM_amp256c = simde_mm256_set1_epi16(QAM256_n3);
simde__m256i *rxF256 = (simde__m256i*)rxF;
simde__m256i *ulch256 = (simde__m256i*)ul_ch;
simde__m256i *llr256 = (simde__m256i *)llr;
if (aarx==0) {
for (int i=0;i<(length>>3);i++) {
xmmtmpD0 = simde_mm256_madd_epi16(ulch256[i],rxF256[i]);
// xmmtmpD0 contains real part of 8 consecutive outputs (32-bit) of conj(H_m[i])*R_m[i]
xmmtmpD1 = simde_mm256_shuffle_epi8(ulch256[i],complex_shuffle256);
xmmtmpD1 = simde_mm256_sign_epi16(xmmtmpD1,conj256);
xmmtmpD1 = simde_mm256_madd_epi16(xmmtmpD1,rxF256[i]);
// xmmtmpD1 contains imag part of 8 consecutive outputs (32-bit) of conj(H_m[i])*R_m[i]
xmmtmpD0 = simde_mm256_srai_epi32(xmmtmpD0,output_shift);
xmmtmpD1 = simde_mm256_srai_epi32(xmmtmpD1,output_shift);
xmmtmpD2 = simde_mm256_unpacklo_epi32(xmmtmpD0,xmmtmpD1);
xmmtmpD3 = simde_mm256_unpackhi_epi32(xmmtmpD0,xmmtmpD1);
xmmtmpD4 = simde_mm256_packs_epi32(xmmtmpD2,xmmtmpD3);
for (int i = 0; i < ((length >> 3) + (( length & 7) > 0 ? 1 : 0)); i++)
{
xmmp0 = simde_mm256_madd_epi16(ulch256[i],rxF256[i]);
// xmmp0 contains real part of 8 consecutive outputs (32-bit) of conj(H_m[i])*R_m[i]
xmmp1 = simde_mm256_shuffle_epi8(ulch256[i],complex_shuffle256);
xmmp1 = simde_mm256_sign_epi16(xmmp1,conj256);
xmmp1 = simde_mm256_madd_epi16(xmmp1,rxF256[i]);
// xmmp1 contains imag part of 8 consecutive outputs (32-bit) of conj(H_m[i])*R_m[i]
xmmp0 = simde_mm256_srai_epi32(xmmp0,output_shift);
xmmp1 = simde_mm256_srai_epi32(xmmp1,output_shift);
xmmp2 = simde_mm256_unpacklo_epi32(xmmp0,xmmp1);
xmmp3 = simde_mm256_unpackhi_epi32(xmmp0,xmmp1);
xmmp4 = simde_mm256_packs_epi32(xmmp2,xmmp3);
// compute channel amplitude for LLR
xmmtmpD0 = simde_mm256_madd_epi16(ulch256[i],ulch256[i]);
xmmtmpD0 = simde_mm256_srai_epi32(xmmtmpD0,output_shift);
xmmtmpD0 = simde_mm256_packs_epi32(xmmtmpD0,xmmtmpD0); // contains 16 LLRs
xmmtmpD2 = simde_mm256_unpacklo_epi16(xmmtmpD0,xmmtmpD0);
xmmtmpD1 = simde_mm256_mulhrs_epi16(xmmtmpD2,QAM_amp256);
xmmtmpD6 = simde_mm256_mulhrs_epi16(xmmtmpD2,QAM_amp256b);
xmmtmpD8 = simde_mm256_mulhrs_epi16(xmmtmpD2,QAM_amp256c);
xmmtmpD2 = simde_mm256_abs_epi16(xmmtmpD4); // registers of even index in xmm0-> |y_R|, registers of odd index in xmm0-> |y_I|
xmmtmpD2 = simde_mm256_subs_epi16(xmmtmpD1,xmmtmpD2); // registers of even index in xmm0-> |y_R|-|h|^2, registers of odd index in xmm0-> |y_I|-|h|^2
// xmmtmpD2 contains 16 LLRs
xmmtmpD7 = simde_mm256_abs_epi16(xmmtmpD2);
xmmtmpD7 = simde_mm256_subs_epi16(xmmtmpD6,xmmtmpD7); // contains 16 LLRs
xmmtmpD9 = simde_mm256_abs_epi16(xmmtmpD7);
xmmtmpD9 = simde_mm256_subs_epi16(xmmtmpD8,xmmtmpD9); // contains 16 LLRs
// xmmtmpD4 A0 A1 A2 A3 A4 A5 A6 A7
// xmmtmpD2 B0 B1 B2 B3 B4 B5 B6 B7
// xmmtmpD7 C0 C1 C2 C3 C4 C5 C6 C7
// xmmtmpD9 D0 D1 D2 D3 D4 D5 D6 D7
xmmtmpD1 = simde_mm256_unpacklo_epi32(xmmtmpD4,xmmtmpD2); // A0 B0 A1 B1 A4 B4 A5 B5
xmmtmpD3 = simde_mm256_unpackhi_epi32(xmmtmpD4,xmmtmpD2); // A2 B2 A3 B3 A6 B6 A7 B7
xmmtmpD5 = simde_mm256_unpacklo_epi32(xmmtmpD7,xmmtmpD9); // C0 D0 C1 D1 C4 D4 C5 D5
xmmtmpD6 = simde_mm256_unpackhi_epi32(xmmtmpD7,xmmtmpD9); // C2 D2 C3 D3 C6 D6 C7 D7
xmmtmpD2 = simde_mm256_unpacklo_epi64(xmmtmpD1,xmmtmpD5); // A0 B0 C0 D0 A4 B4 C4 D4
xmmtmpD4 = simde_mm256_unpackhi_epi64(xmmtmpD1,xmmtmpD5); // A1 B1 C1 D1 A5 B5 C5 D5
xmmtmpD1 = simde_mm256_unpacklo_epi64(xmmtmpD3,xmmtmpD6); // A2 B2 C2 D2 A6 B6 C6 D6
xmmtmpD5 = simde_mm256_unpackhi_epi64(xmmtmpD3,xmmtmpD6); // A3 B3 C3 D3 A7 B7 C7 D7
llr256[0] = simde_mm256_permute2x128_si256(xmmtmpD2, xmmtmpD4, 0x20); // A0 B0 C0 D0 A1 B1 C1 D1
llr256[1] = simde_mm256_permute2x128_si256(xmmtmpD1, xmmtmpD5, 0x20); // A2 B2 C2 D2 A3 B3 C3 D3
llr256[2] = simde_mm256_permute2x128_si256(xmmtmpD2, xmmtmpD4, 0x31); // A4 B4 C4 D4 A5 B5 C5 D5
llr256[3] = simde_mm256_permute2x128_si256(xmmtmpD1, xmmtmpD5, 0x31); // A6 B6 C6 D6 A7 B7 C7 D7
xmmp0 = simde_mm256_madd_epi16(ulch256[i],ulch256[i]);
xmmp0 = simde_mm256_srai_epi32(xmmp0,output_shift);
xmmp0 = simde_mm256_packs_epi32(xmmp0,xmmp0); // contains 16 LLRs
xmmp2 = simde_mm256_unpacklo_epi16(xmmp0,xmmp0);
xmmp1 = simde_mm256_mulhrs_epi16(xmmp2,QAM_amp256);
xmmp6 = simde_mm256_mulhrs_epi16(xmmp2,QAM_amp256b);
xmmp8 = simde_mm256_mulhrs_epi16(xmmp2,QAM_amp256c);
xmmp2 = simde_mm256_abs_epi16(xmmp4); // registers of even index in xmm0-> |y_R|, registers of odd index in xmm0-> |y_I|
xmmp2 = simde_mm256_subs_epi16(xmmp1,xmmp2); // registers of even index in xmm0-> |y_R|-|h|^2, registers of odd index in xmm0-> |y_I|-|h|^2
// xmmp2 contains 16 LLRs
xmmp7 = simde_mm256_abs_epi16(xmmp2);
xmmp7 = simde_mm256_subs_epi16(xmmp6,xmmp7); // contains 16 LLRs
xmmp9 = simde_mm256_abs_epi16(xmmp7);
xmmp9 = simde_mm256_subs_epi16(xmmp8,xmmp9); // contains 16 LLRs
// xmmp4 A0 A1 A2 A3 A4 A5 A6 A7
// xmmp2 B0 B1 B2 B3 B4 B5 B6 B7
// xmmp7 C0 C1 C2 C3 C4 C5 C6 C7
// xmmp9 D0 D1 D2 D3 D4 D5 D6 D7
xmmp1 = simde_mm256_unpacklo_epi32(xmmp4,xmmp2); // A0 B0 A1 B1 A4 B4 A5 B5
xmmp3 = simde_mm256_unpackhi_epi32(xmmp4,xmmp2); // A2 B2 A3 B3 A6 B6 A7 B7
xmmp5 = simde_mm256_unpacklo_epi32(xmmp7,xmmp9); // C0 D0 C1 D1 C4 D4 C5 D5
xmmp6 = simde_mm256_unpackhi_epi32(xmmp7,xmmp9); // C2 D2 C3 D3 C6 D6 C7 D7
xmmp2 = simde_mm256_unpacklo_epi64(xmmp1,xmmp5); // A0 B0 C0 D0 A4 B4 C4 D4
xmmp4 = simde_mm256_unpackhi_epi64(xmmp1,xmmp5); // A1 B1 C1 D1 A5 B5 C5 D5
xmmp1 = simde_mm256_unpacklo_epi64(xmmp3,xmmp6); // A2 B2 C2 D2 A6 B6 C6 D6
xmmp5 = simde_mm256_unpackhi_epi64(xmmp3,xmmp6); // A3 B3 C3 D3 A7 B7 C7 D7
if (aarx == 0)
{
llr256[0] = simde_mm256_permute2x128_si256(xmmp2, xmmp4, 0x20); // A0 B0 C0 D0 A1 B1 C1 D1
llr256[1] = simde_mm256_permute2x128_si256(xmmp1, xmmp5, 0x20); // A2 B2 C2 D2 A3 B3 C3 D3
llr256[2] = simde_mm256_permute2x128_si256(xmmp2, xmmp4, 0x31); // A4 B4 C4 D4 A5 B5 C5 D5
llr256[3] = simde_mm256_permute2x128_si256(xmmp1, xmmp5, 0x31); // A6 B6 C6 D6 A7 B7 C7 D7
llr256+=4;
}
} else {
for (int i=0;i<(length>>3);i++) {
xmmtmpD0 = simde_mm256_madd_epi16(ulch256[i],rxF256[i]);
// xmmtmpD0 contains real part of 8 consecutive outputs (32-bit) of conj(H_m[i])*R_m[i]
xmmtmpD1 = simde_mm256_shuffle_epi8(ulch256[i],complex_shuffle256);
xmmtmpD1 = simde_mm256_sign_epi16(xmmtmpD1,conj256);
xmmtmpD1 = simde_mm256_madd_epi16(xmmtmpD1,rxF256[i]);
// xmmtmpD1 contains imag part of 8 consecutive outputs (32-bit) of conj(H_m[i])*R_m[i]
xmmtmpD0 = simde_mm256_srai_epi32(xmmtmpD0,output_shift);
xmmtmpD1 = simde_mm256_srai_epi32(xmmtmpD1,output_shift);
xmmtmpD2 = simde_mm256_unpacklo_epi32(xmmtmpD0,xmmtmpD1);
xmmtmpD3 = simde_mm256_unpackhi_epi32(xmmtmpD0,xmmtmpD1);
xmmtmpD4 = simde_mm256_packs_epi32(xmmtmpD2,xmmtmpD3);
// compute channel amplitude for LLR
xmmtmpD0 = simde_mm256_madd_epi16(ulch256[i],ulch256[i]);
xmmtmpD0 = simde_mm256_srai_epi32(xmmtmpD0,output_shift);
xmmtmpD0 = simde_mm256_packs_epi32(xmmtmpD0,xmmtmpD0);
xmmtmpD2 = simde_mm256_unpacklo_epi16(xmmtmpD0,xmmtmpD0);
xmmtmpD1 = simde_mm256_mulhrs_epi16(xmmtmpD2,QAM_amp256);
xmmtmpD6 = simde_mm256_mulhrs_epi16(xmmtmpD2,QAM_amp256b);
xmmtmpD8 = simde_mm256_mulhrs_epi16(xmmtmpD2,QAM_amp256c);
xmmtmpD2 = simde_mm256_abs_epi16(xmmtmpD4); // registers of even index in xmm0-> |y_R|, registers of odd index in xmm0-> |y_I|
xmmtmpD2 = simde_mm256_subs_epi16(xmmtmpD1,xmmtmpD2); // registers of even index in xmm0-> |y_R|-|h|^2, registers of odd index in xmm0-> |y_I|-|h|^2
xmmtmpD7 = simde_mm256_abs_epi16(xmmtmpD2);
xmmtmpD7 = simde_mm256_subs_epi16(xmmtmpD6,xmmtmpD7);
xmmtmpD9 = simde_mm256_abs_epi16(xmmtmpD7);
xmmtmpD9 = simde_mm256_subs_epi16(xmmtmpD8,xmmtmpD9);
// xmmtmpD4 A0 A1 A2 A3 A4 A5 A6 A7
// xmmtmpD2 B0 B1 B2 B3 B4 B5 B6 B7
// xmmtmpD7 C0 C1 C2 C3 C4 C5 C6 C7
// xmmtmpD9 D0 D1 D2 D3 D4 D5 D6 D7
xmmtmpD1 = simde_mm256_unpacklo_epi32(xmmtmpD4,xmmtmpD2); // A0 B0 A1 B1 A4 B4 A5 B5
xmmtmpD3 = simde_mm256_unpackhi_epi32(xmmtmpD4,xmmtmpD2); // A2 B2 A3 B3 A6 B6 A7 B7
xmmtmpD5 = simde_mm256_unpacklo_epi32(xmmtmpD7,xmmtmpD9); // C0 D0 C1 D1 C4 D4 C5 D5
xmmtmpD6 = simde_mm256_unpackhi_epi32(xmmtmpD7,xmmtmpD9); // C2 D2 C3 D3 C6 D6 C7 D7
xmmtmpD2 = simde_mm256_unpacklo_epi64(xmmtmpD1,xmmtmpD5); // A0 B0 C0 D0 A4 B4 C4 D4
xmmtmpD4 = simde_mm256_unpackhi_epi64(xmmtmpD1,xmmtmpD5); // A1 B1 C1 D1 A5 B5 C5 D5
xmmtmpD1 = simde_mm256_unpacklo_epi64(xmmtmpD3,xmmtmpD6); // A2 B2 C2 D2 A6 B6 C6 D6
xmmtmpD5 = simde_mm256_unpackhi_epi64(xmmtmpD3,xmmtmpD6); // A3 B3 C3 D3 A7 B7 C7 D7
llr256[0] = simde_mm256_adds_epi16(llr256[0],simde_mm256_permute2x128_si256(xmmtmpD2, xmmtmpD4, 0x20)); // A0 B0 C0 D0 A1 B1 C1 D1
llr256[1] = simde_mm256_adds_epi16(llr256[1],simde_mm256_permute2x128_si256(xmmtmpD1, xmmtmpD5, 0x20)); // A2 B2 C2 D2 A3 B3 C3 D3
llr256[2] = simde_mm256_adds_epi16(llr256[2],simde_mm256_permute2x128_si256(xmmtmpD2, xmmtmpD4, 0x31)); // A4 B4 C4 D4 A5 B5 C5 D5
llr256[3] = simde_mm256_adds_epi16(llr256[3],simde_mm256_permute2x128_si256(xmmtmpD1, xmmtmpD5, 0x31)); // A6 B6 C6 D6 A7 B7 C7 D7
else
{
llr256[0] = simde_mm256_adds_epi16(llr256[0],simde_mm256_permute2x128_si256(xmmp2, xmmp4, 0x20)); // A0 B0 C0 D0 A1 B1 C1 D1
llr256[1] = simde_mm256_adds_epi16(llr256[1],simde_mm256_permute2x128_si256(xmmp1, xmmp5, 0x20)); // A2 B2 C2 D2 A3 B3 C3 D3
llr256[2] = simde_mm256_adds_epi16(llr256[2],simde_mm256_permute2x128_si256(xmmp2, xmmp4, 0x31)); // A4 B4 C4 D4 A5 B5 C5 D5
llr256[3] = simde_mm256_adds_epi16(llr256[3],simde_mm256_permute2x128_si256(xmmp1, xmmp5, 0x31)); // A6 B6 C6 D6 A7 B7 C7 D7
llr256+=4;
}
}
......@@ -1554,7 +1546,7 @@ void inner_rx_256qam(int *rxF, int *ul_ch, int16_t *llr, int aarx, int length,in
xmm6 = simde_mm_mulhrs_epi16(xmm2,QAM_ampc);
xmm0 = simde_mm_abs_epi16(xmm4); // registers of even index in xmm0-> |y_R|, registers of odd index in xmm0-> |y_I|
xmm0 = simde_mm_subs_epi16(xmm1,xmm0); // registers of even index in xmm0-> |y_R|-|h|^2, registers of odd index in xmm0-> |y_I|-|h|^2
// xmmtmpD2 contains 8 LLRs
// xmmp2 contains 8 LLRs
xmm1 = simde_mm_abs_epi16(xmm0);
xmm1 = simde_mm_subs_epi16(xmm5,xmm1); // contains 8 LLRs
xmm2 = simde_mm_abs_epi16(xmm1);
......@@ -1574,7 +1566,8 @@ void inner_rx_256qam(int *rxF, int *ul_ch, int16_t *llr, int aarx, int length,in
llr128[3] = simde_mm_unpackhi_epi64(xmm4,xmm6); // A3 B3 C3 D3
llr128+=4;
}
else {
else
{
llr128[0] = simde_mm_adds_epi16(llr128[0],simde_mm_unpacklo_epi64(xmm3,xmm5)); // A0 B0 C0 D0
llr128[1] = simde_mm_adds_epi16(llr128[1],simde_mm_unpackhi_epi64(xmm3,xmm5)); // A1 B1 C1 D1
llr128[2] = simde_mm_adds_epi16(llr128[2],simde_mm_unpacklo_epi64(xmm4,xmm6)); // A2 B2 C2 D2
......@@ -1583,134 +1576,136 @@ void inner_rx_256qam(int *rxF, int *ul_ch, int16_t *llr, int aarx, int length,in
}
}
#else
int nb_re128 = length>>2;
simde__m128i xmm0,xmm1,xmm2,xmm3,xmm4,xmm5,xmm6;
simde__m128i complex_shuffle128 = simde_mm_set_epi8(13,12,15,14,9,8,11,10,5,4,7,6,1,0,3,2);
simde__m128i conj128 = simde_mm_set_epi16(1,-1,1,-1,1,-1,1,-1);
simde__m128i xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6;
simde__m128i complex_shuffle128 = simde_mm_set_epi8(13, 12, 15, 14, 9, 8, 11, 10, 5, 4, 7, 6, 1, 0, 3, 2);
simde__m128i conj128 = simde_mm_set_epi16(-1, 1, -1, 1, -1, 1, -1, 1);
simde__m128i *rxF128 = (simde__m128i*)rxF;
simde__m128i *ulch128 = (simde__m128i*)ul_ch;
simde__m128i QAM_amp = simde_mm_set1_epi16(QAM256_n1); // 2/sqrt(10)
simde__m128i QAM_amp = simde_mm_set1_epi16(QAM256_n1);
simde__m128i QAM_ampb = simde_mm_set1_epi16(QAM256_n2);
simde__m128i QAM_ampc = simde_mm_set1_epi16(QAM256_n3);
simde__m128i *llr128 = (simde__m128i*)llr;
for (int i=0;i<nb_re128;i++) {
xmm0 = simde_mm_madd_epi16(ulch128[i],rxF128[i]);
// xmm0 contains real part of 4 consecutive outputs (32-bit) of conj(H_m[i])*R_m[i]
xmm1 = simde_mm_shuffle_epi8(ulch128[i],complex_shuffle128);
xmm1 = simde_mm_sign_epi16(xmm1,conj128);
xmm1 = simde_mm_madd_epi16(xmm1,rxF128[i]);
// xmm1 contains imag part of 4 consecutive outputs (32-bit) of conj(H_m[i])*R_m[i]
xmm0 = simde_mm_srai_epi32(xmm0,output_shift);
xmm1 = simde_mm_srai_epi32(xmm1,output_shift);
xmm2 = simde_mm_unpacklo_epi32(xmm0,xmm1);
xmm3 = simde_mm_unpackhi_epi32(xmm0,xmm1);
xmm4 = simde_mm_packs_epi32(xmm2,xmm3);
for (int i = 0; i < (length >> 2); i++)
{
xmm0 = simde_mm_sign_epi16(ulch128[i], conj128);
xmm0 = simde_mm_madd_epi16(xmm0, rxF128[i]);
// xmm0 contains real part of 4 consecutive outputs (32-bit) of H_m[i]*R_m[i]
xmm1 = simde_mm_shuffle_epi8(ulch128[i], complex_shuffle128);
xmm1 = simde_mm_madd_epi16(xmm1, rxF128[i]);
// xmm1 contains imag part of 4 consecutive outputs (32-bit) of H_m[i]*R_m[i]
xmm0 = simde_mm_srai_epi32(xmm0, output_shift);
xmm1 = simde_mm_srai_epi32(xmm1, output_shift);
xmm2 = simde_mm_unpacklo_epi32(xmm0, xmm1);
xmm3 = simde_mm_unpackhi_epi32(xmm0, xmm1);
xmm4 = simde_mm_packs_epi32(xmm2, xmm3);
// compute channel amplitude for LLR
xmm0 = simde_mm_madd_epi16(ulch128[i],ulch128[i]);
xmm0 = simde_mm_srai_epi32(xmm0,output_shift);
xmm0 = simde_mm_packs_epi32(xmm0,xmm0); // contains 16 LLRs
xmm2 = simde_mm_unpacklo_epi16(xmm0,xmm0);
xmm1 = simde_mm_mulhrs_epi16(xmm2,QAM_amp);
xmm5 = simde_mm_mulhrs_epi16(xmm2,QAM_ampb);
xmm6 = simde_mm_mulhrs_epi16(xmm2,QAM_ampc);
xmm0 = simde_mm_madd_epi16(ulch128[i], ulch128[i]); // |h|^2
xmm0 = simde_mm_srai_epi32(xmm0, output_shift);
xmm0 = simde_mm_packs_epi32(xmm0, xmm0);
xmm2 = simde_mm_unpacklo_epi16(xmm0, xmm0);
xmm1 = simde_mm_mulhrs_epi16(xmm2, QAM_amp);
xmm5 = simde_mm_mulhrs_epi16(xmm2, QAM_ampb);
xmm6 = simde_mm_mulhrs_epi16(xmm2, QAM_ampc);
xmm0 = simde_mm_abs_epi16(xmm4); // registers of even index in xmm0-> |y_R|, registers of odd index in xmm0-> |y_I|
xmm0 = simde_mm_subs_epi16(xmm1,xmm0); // registers of even index in xmm0-> |y_R|-|h|^2, registers of odd index in xmm0-> |y_I|-|h|^2
// xmmtmpD2 contains 8 LLRs
xmm0 = simde_mm_subs_epi16(xmm1, xmm0); // registers of even index in xmm0-> |y_R|-|h|^2, registers of odd index in xmm0-> |y_I|-|h|^2
xmm1 = simde_mm_abs_epi16(xmm0);
xmm1 = simde_mm_subs_epi16(xmm5,xmm1); // contains 8 LLRs
xmm1 = simde_mm_subs_epi16(xmm5, xmm1); // contains 8 LLRs
xmm2 = simde_mm_abs_epi16(xmm1);
xmm2 = simde_mm_subs_epi16(xmm6,xmm2); // contains 8 LLRs
xmm2 = simde_mm_subs_epi16(xmm6, xmm2); // contains 8 LLRs
// rxF[i] A0 A1 A2 A3
// xmm0 B0 B1 B2 B3
// xmm1 C0 C1 C2 C3
// xmm2 D0 D1 D2 D3
xmm3 = simde_mm_unpacklo_epi32(rxF128[i],xmm0); // A0 B0 A1 B1
xmm4 = simde_mm_unpackhi_epi32(rxF128[i],xmm0); // A2 B2 A3 B3
xmm5 = simde_mm_unpacklo_epi32(xmm1,xmm2); // C0 D0 C1 D1
xmm6 = simde_mm_unpackhi_epi32(xmm1,xmm2); // C2 D2 C3 D3
xmm3 = simde_mm_unpacklo_epi32(xmm4, xmm0); // A0 B0 A1 B1
xmm4 = simde_mm_unpackhi_epi32(xmm4, xmm0); // A2 B2 A3 B3
xmm5 = simde_mm_unpacklo_epi32(xmm1, xmm2); // C0 D0 C1 D1
xmm6 = simde_mm_unpackhi_epi32(xmm1, xmm2); // C2 D2 C3 D3
if (aarx == 0) {
llr128[0] = simde_mm_unpacklo_epi64(xmm3,xmm5); // A0 B0 C0 D0
llr128[1] = simde_mm_unpackhi_epi64(xmm3,xmm5); // A1 B1 C1 D1
llr128[2] = simde_mm_unpacklo_epi64(xmm4,xmm6); // A2 B2 C2 D2
llr128[3] = simde_mm_unpackhi_epi64(xmm4,xmm6); // A3 B3 C3 D3
llr128+=4;
llr128[0] = simde_mm_unpacklo_epi64(xmm3, xmm5); // A0 B0 C0 D0
llr128[1] = simde_mm_unpackhi_epi64(xmm3, xmm5); // A1 B1 C1 D1
llr128[2] = simde_mm_unpacklo_epi64(xmm4, xmm6); // A2 B2 C2 D2
llr128[3] = simde_mm_unpackhi_epi64(xmm4, xmm6); // A3 B3 C3 D3
}
else {
llr128[0] = simde_mm_adds_epi16(llr128[0],simde_mm_unpacklo_epi64(xmm3,xmm5)); // A0 B0 C0 D0
llr128[1] = simde_mm_adds_epi16(llr128[1],simde_mm_unpackhi_epi64(xmm3,xmm5)); // A1 B1 C1 D1
llr128[2] = simde_mm_adds_epi16(llr128[2],simde_mm_unpacklo_epi64(xmm4,xmm6)); // A2 B2 C2 D2
llr128[3] = simde_mm_adds_epi16(llr128[3],simde_mm_unpackhi_epi64(xmm4,xmm6)); // A3 B3 C3 D3
llr128+=4;
llr128[0] = simde_mm_adds_epi16(llr128[0], simde_mm_unpacklo_epi64(xmm3, xmm5)); // A0 B0 C0 D0
llr128[1] = simde_mm_adds_epi16(llr128[1], simde_mm_unpackhi_epi64(xmm3, xmm5)); // A1 B1 C1 D1
llr128[2] = simde_mm_adds_epi16(llr128[2], simde_mm_unpacklo_epi64(xmm4, xmm6)); // A2 B2 C2 D2
llr128[3] = simde_mm_adds_epi16(llr128[3], simde_mm_unpackhi_epi64(xmm4, xmm6)); // A3 B3 C3 D3
}
llr128+=4;
}
#endif
simde__m64 *llr64 = (simde__m64*)llr128;
if ((length&7) == 6) { // there a single 64-bit input element remaining
int nb_re64 = length>>1;
simde__m64 xmm0,xmm1,xmm2,xmm3,xmm4,xmm5,xmm6;
if (length & 3)
{
simde__m64 *llr64 = (simde__m64*) llr128;
int i = (length>>1) - 1;
simde__m64 xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6;
simde__m64 complex_shuffle64 = simde_mm_set_pi8(5,4,7,6,1,0,3,2);
simde__m64 conj64 = simde_mm_set_pi16(1,-1,1,-1);
simde__m64 conj64 = simde_mm_set_pi16(-1, 1, -1, 1);
simde__m64 *rxF64 = (simde__m64*)rxF;
simde__m64 *ulch64 = (simde__m64*)ul_ch;
simde__m64 QAM_amp = simde_mm_set1_pi16(QAM256_n1); // 2/sqrt(10)
simde__m64 QAM_amp = simde_mm_set1_pi16(QAM256_n1);
simde__m64 QAM_ampb = simde_mm_set1_pi16(QAM256_n2);
simde__m64 QAM_ampc = simde_mm_set1_pi16(QAM256_n3);
xmm0 = simde_mm_madd_pi16(ulch64[nb_re64-1],rxF64[nb_re64-1]);
// xmm0 contains real part of 4 consecutive outputs (32-bit) of conj(H_m[i])*R_m[i]
xmm1 = simde_mm_shuffle_pi8(ulch64[nb_re64-1],complex_shuffle64);
xmm1 = simde_mm_sign_pi16(xmm1,conj64);
xmm1 = simde_mm_madd_pi16(xmm1,rxF64[nb_re64-1]);
// xmm1 contains imag part of 4 consecutive outputs (32-bit) of conj(H_m[i])*R_m[i]
xmm0 = simde_mm_srai_pi32(xmm0,output_shift);
xmm1 = simde_mm_srai_pi32(xmm1,output_shift);
xmm2 = simde_mm_unpacklo_pi32(xmm0,xmm1);
xmm3 = simde_mm_unpackhi_pi32(xmm0,xmm1);
xmm4 = simde_mm_packs_pi32(xmm2,xmm3);
xmm0 = simde_mm_sign_pi16(ulch64[i], conj64);
xmm0 = simde_mm_madd_pi16(xmm0, rxF64[i]);
// xmm0 contains real part of 4 consecutive outputs (32-bit) of H_m[i]*R_m[i]
xmm1 = simde_mm_shuffle_pi8(ulch64[i], complex_shuffle64);
xmm1 = simde_mm_madd_pi16(xmm1, rxF64[i]);
// xmm1 contains imag part of 4 consecutive outputs (32-bit) of H_m[i]*R_m[i]
xmm0 = simde_mm_srai_pi32(xmm0, output_shift);
xmm1 = simde_mm_srai_pi32(xmm1, output_shift);
xmm2 = simde_mm_unpacklo_pi32(xmm0, xmm1);
xmm3 = simde_mm_unpackhi_pi32(xmm0, xmm1);
xmm4 = simde_mm_packs_pi32(xmm2, xmm3);
// compute channel amplitude for LLR
xmm0 = simde_mm_madd_pi16(ulch64[nb_re64-1],ulch64[nb_re64-1]);
xmm0 = simde_mm_srai_pi32(xmm0,output_shift);
xmm0 = simde_mm_packs_pi32(xmm0,xmm0); // contains 16 LLRs
xmm2 = simde_mm_unpacklo_pi16(xmm0,xmm0);
xmm1 = simde_mm_mulhrs_pi16(xmm2,QAM_amp);
xmm5 = simde_mm_mulhrs_pi16(xmm2,QAM_ampb);
xmm6 = simde_mm_mulhrs_pi16(xmm2,QAM_ampc);
xmm0 = simde_mm_madd_pi16(ulch64[i], ulch64[i]); // |h|^2
xmm0 = simde_mm_srai_pi32(xmm0, output_shift);
xmm0 = simde_mm_packs_pi32(xmm0, xmm0);
xmm2 = simde_mm_unpacklo_pi16(xmm0, xmm0);
xmm1 = simde_mm_mulhrs_pi16(xmm2, QAM_amp);
xmm5 = simde_mm_mulhrs_pi16(xmm2, QAM_ampb);
xmm6 = simde_mm_mulhrs_pi16(xmm2, QAM_ampc);
xmm0 = simde_mm_abs_pi16(xmm4); // registers of even index in xmm0-> |y_R|, registers of odd index in xmm0-> |y_I|
xmm0 = simde_mm_subs_pi16(xmm1,xmm0); // registers of even index in xmm0-> |y_R|-|h|^2, registers of odd index in xmm0-> |y_I|-|h|^2
// xmmtmpD2 contains 8 LLRs
xmm0 = simde_mm_subs_pi16(xmm1, xmm0); // registers of even index in xmm0-> |y_R|-|h|^2, registers of odd index in xmm0-> |y_I|-|h|^2
xmm1 = simde_mm_abs_pi16(xmm0);
xmm1 = simde_mm_subs_pi16(xmm5,xmm1); // contains 8 LLRs
xmm1 = simde_mm_subs_pi16(xmm5, xmm1); // contains 8 LLRs
xmm2 = simde_mm_abs_pi16(xmm1);
xmm2 = simde_mm_subs_pi16(xmm6,xmm2); // contains 8 LLRs
// rxF[i] A0 A1
// xmm0 B0 B1
// xmm1 C0 C1
// xmm2 D0 D1
if (aarx==0) {
llr64[0] = simde_m_punpckldq(rxF64[nb_re64-1],xmm0); // A0 B0
llr64[2] = simde_m_punpckhdq(rxF64[nb_re64-1],xmm0); // A1 B1
llr64[1] = simde_m_punpckldq(xmm1,xmm2); // C0 D0
llr64[3] = simde_m_punpckhdq(xmm1,xmm2); // C1 D1
} else {
llr64[0] = simde_mm_adds_pi16(llr64[0],simde_m_punpckldq(rxF64[nb_re64-1],xmm0)); // A0 B0
llr64[2] = simde_mm_adds_pi16(llr64[1],simde_m_punpckhdq(rxF64[nb_re64-1],xmm0)); // A1 B1
llr64[1] = simde_mm_adds_pi16(llr64[2],simde_m_punpckldq(xmm1,xmm2)); // C0 D0
llr64[3] = simde_mm_adds_pi16(llr64[3],simde_m_punpckhdq(xmm1,xmm2)); // C1 D1
xmm2 = simde_mm_subs_pi16(xmm6, xmm2); // contains 8 LLRs
xmm3 = simde_mm_unpacklo_pi32(xmm4, xmm0);
xmm4 = simde_mm_unpackhi_pi32(xmm4, xmm0);
xmm5 = simde_mm_unpacklo_pi32(xmm1, xmm2);
xmm6 = simde_mm_unpackhi_pi32(xmm1, xmm2);
if (aarx == 0) {
llr64[0] = simde_m_punpckldq(xmm3, xmm5);
llr64[1] = simde_m_punpckhdq(xmm3, xmm5);
llr64[2] = simde_m_punpckldq(xmm4, xmm6);
llr64[3] = simde_m_punpckhdq(xmm4, xmm6);
}
else
{
llr64[0] = simde_mm_adds_pi16(llr64[0], simde_m_punpckldq(xmm3, xmm5));
llr64[1] = simde_mm_adds_pi16(llr64[1], simde_m_punpckhdq(xmm3, xmm5));
llr64[2] = simde_mm_adds_pi16(llr64[2], simde_m_punpckldq(xmm4, xmm6));
llr64[3] = simde_mm_adds_pi16(llr64[3], simde_m_punpckhdq(xmm4, xmm6));
}
}
#endif
}
void inner_rx_64qam(int * restrict rxF, int * restrict ul_ch, int16_t *restrict llr, int aarx, int length,int output_shift) {
register simde__m256i xmmtmpD0,xmmtmpD1,xmmtmpD2,xmmtmpD3,xmmtmpD4,xmmtmpD6,xmmtmpD7;
register simde__m256i complex_shuffle256 = simde_mm256_set_epi8(29,28,31,30,25,24,27,26,21,20,23,22,17,16,19,18,13,12,15,14,9,8,11,10,5,4,7,6,1,0,3,2);
register simde__m256i conj256 = simde_mm256_set_epi16(1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1);
void inner_rx_64qam(int * restrict rxF, int * restrict ul_ch, int16_t *restrict llr, int aarx, int length,int output_shift)
{
#if !USE_128BIT
register simde__m256i xmmp0, xmmp1, xmmp2, xmmp3, xmmp4, xmmp6, xmmp7;
register simde__m256i complex_shuffle256 = simde_mm256_set_epi8(29, 28, 31, 30, 25, 24, 27, 26, 21, 20, 23, 22, 17, 16, 19, 18, 13, 12, 15, 14, 9, 8, 11, 10, 5, 4, 7, 6, 1, 0, 3, 2);
register simde__m256i conj256 = simde_mm256_set_epi16(1, -1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1);
register simde__m256i QAM_amp256 = simde_mm256_set1_epi16(QAM64_n1); // 2/sqrt(10)
register simde__m256i QAM_amp256b = simde_mm256_set1_epi16(QAM64_n2);
......@@ -1718,104 +1713,177 @@ void inner_rx_64qam(int * restrict rxF, int * restrict ul_ch, int16_t *restrict
simde__m256i *ulch256 = (simde__m256i*)ul_ch;
// need to use simde__m64 because llr output is not necessarily aligned to 256 bits, but it is always to 64 bits
if (aarx==0) {
uint32_t *llr32 = (uint32_t *)llr;
simde__m64 *llr64 = (simde__m64 *)llr;
for (int i=0;i<((length>>3)+((length&7)>0?1:0));i++) {
xmmtmpD0 = simde_mm256_madd_epi16(ulch256[i],rxF256[i]);
// xmmtmpD0 contains real part of 8 consecutive outputs (32-bit) of conj(H_m[i])*R_m[i]
xmmtmpD1 = simde_mm256_shuffle_epi8(ulch256[i],complex_shuffle256);
xmmtmpD1 = simde_mm256_sign_epi16(xmmtmpD1,conj256);
xmmtmpD1 = simde_mm256_madd_epi16(xmmtmpD1,rxF256[i]);
// xmmtmpD1 contains imag part of 8 consecutive outputs (32-bit) of conj(H_m[i])*R_m[i]
xmmtmpD0 = simde_mm256_srai_epi32(xmmtmpD0,output_shift);
xmmtmpD1 = simde_mm256_srai_epi32(xmmtmpD1,output_shift);
xmmtmpD2 = simde_mm256_unpacklo_epi32(xmmtmpD0,xmmtmpD1);
xmmtmpD3 = simde_mm256_unpackhi_epi32(xmmtmpD0,xmmtmpD1);
xmmtmpD4 = simde_mm256_packs_epi32(xmmtmpD2,xmmtmpD3);
xmmp0 = simde_mm256_madd_epi16(ulch256[i],rxF256[i]);
// xmmp0 contains real part of 8 consecutive outputs (32-bit) of conj(H_m[i])*R_m[i]
xmmp1 = simde_mm256_shuffle_epi8(ulch256[i],complex_shuffle256);
xmmp1 = simde_mm256_sign_epi16(xmmp1,conj256);
xmmp1 = simde_mm256_madd_epi16(xmmp1,rxF256[i]);
// xmmp1 contains imag part of 8 consecutive outputs (32-bit) of conj(H_m[i])*R_m[i]
xmmp0 = simde_mm256_srai_epi32(xmmp0,output_shift);
xmmp1 = simde_mm256_srai_epi32(xmmp1,output_shift);
xmmp2 = simde_mm256_unpacklo_epi32(xmmp0,xmmp1);
xmmp3 = simde_mm256_unpackhi_epi32(xmmp0,xmmp1);
xmmp4 = simde_mm256_packs_epi32(xmmp2,xmmp3);
// compute channel amplitude for LLR
xmmtmpD0 = simde_mm256_madd_epi16(ulch256[i],ulch256[i]);
xmmtmpD0 = simde_mm256_srai_epi32(xmmtmpD0,output_shift);
xmmtmpD0 = simde_mm256_packs_epi32(xmmtmpD0,xmmtmpD0);
xmmtmpD2 = simde_mm256_unpacklo_epi16(xmmtmpD0,xmmtmpD0);
xmmtmpD1 = simde_mm256_mulhrs_epi16(xmmtmpD2,QAM_amp256);
xmmtmpD6 = simde_mm256_mulhrs_epi16(xmmtmpD2,QAM_amp256b);
xmmtmpD2 = simde_mm256_abs_epi16(xmmtmpD4); // registers of even index in xmm0-> |y_R|, registers of odd index in xmm0-> |y_I|
xmmtmpD2 = simde_mm256_subs_epi16(xmmtmpD1,xmmtmpD2); // registers of even index in xmm0-> |y_R|-|h|^2, registers of odd index in xmm0-> |y_I|-|h|^2
xmmtmpD7 = simde_mm256_abs_epi16(xmmtmpD2);
xmmtmpD7 = simde_mm256_subs_epi16(xmmtmpD6,xmmtmpD7);
llr32[0] = simde_mm256_extract_epi32(xmmtmpD4,0);
llr32[1] = simde_mm256_extract_epi32(xmmtmpD2,0);
llr32[2] = simde_mm256_extract_epi32(xmmtmpD7,0);
llr32[3] = simde_mm256_extract_epi32(xmmtmpD4,1);
llr32[4] = simde_mm256_extract_epi32(xmmtmpD2,1);
llr32[5] = simde_mm256_extract_epi32(xmmtmpD7,1);
llr32[6] = simde_mm256_extract_epi32(xmmtmpD4,2);
llr32[7] = simde_mm256_extract_epi32(xmmtmpD2,2);
llr32[8] = simde_mm256_extract_epi32(xmmtmpD7,2);
llr32[9] = simde_mm256_extract_epi32(xmmtmpD4,3);
llr32[10] = simde_mm256_extract_epi32(xmmtmpD2,3);
llr32[11] = simde_mm256_extract_epi32(xmmtmpD7,3);
llr32[12] = simde_mm256_extract_epi32(xmmtmpD4,4);
llr32[13] = simde_mm256_extract_epi32(xmmtmpD2,4);
llr32[14] = simde_mm256_extract_epi32(xmmtmpD7,4);
llr32[15] = simde_mm256_extract_epi32(xmmtmpD4,5);
llr32[16] = simde_mm256_extract_epi32(xmmtmpD2,5);
llr32[17] = simde_mm256_extract_epi32(xmmtmpD7,5);
llr32[18] = simde_mm256_extract_epi32(xmmtmpD4,6);
llr32[19] = simde_mm256_extract_epi32(xmmtmpD2,6);
llr32[20] = simde_mm256_extract_epi32(xmmtmpD7,6);
llr32[21] = simde_mm256_extract_epi32(xmmtmpD4,7);
llr32[22] = simde_mm256_extract_epi32(xmmtmpD2,7);
llr32[23] = simde_mm256_extract_epi32(xmmtmpD7,7);
llr32+=24;
xmmp0 = simde_mm256_madd_epi16(ulch256[i],ulch256[i]);
xmmp0 = simde_mm256_srai_epi32(xmmp0,output_shift);
xmmp0 = simde_mm256_packs_epi32(xmmp0,xmmp0);
xmmp2 = simde_mm256_unpacklo_epi16(xmmp0,xmmp0);
xmmp1 = simde_mm256_mulhrs_epi16(xmmp2,QAM_amp256);
xmmp6 = simde_mm256_mulhrs_epi16(xmmp2,QAM_amp256b);
xmmp2 = simde_mm256_abs_epi16(xmmp4); // registers of even index in xmm0-> |y_R|, registers of odd index in xmm0-> |y_I|
xmmp2 = simde_mm256_subs_epi16(xmmp1,xmmp2); // registers of even index in xmm0-> |y_R|-|h|^2, registers of odd index in xmm0-> |y_I|-|h|^2
xmmp7 = simde_mm256_abs_epi16(xmmp2);
xmmp7 = simde_mm256_subs_epi16(xmmp6,xmmp7);
if (aarx == 0)
{
llr64[0] = simde_mm_set_pi32(simde_mm256_extract_epi32(xmmp2,0),simde_mm256_extract_epi32(xmmp4,0));
llr64[1] = simde_mm_set_pi32(simde_mm256_extract_epi32(xmmp4,1),simde_mm256_extract_epi32(xmmp7,0));
llr64[2] = simde_mm_set_pi32(simde_mm256_extract_epi32(xmmp7,1),simde_mm256_extract_epi32(xmmp2,1));
llr64[3] = simde_mm_set_pi32(simde_mm256_extract_epi32(xmmp2,2),simde_mm256_extract_epi32(xmmp4,2));
llr64[4] = simde_mm_set_pi32(simde_mm256_extract_epi32(xmmp4,3),simde_mm256_extract_epi32(xmmp7,2));
llr64[5] = simde_mm_set_pi32(simde_mm256_extract_epi32(xmmp7,3),simde_mm256_extract_epi32(xmmp2,3));
llr64[6] = simde_mm_set_pi32(simde_mm256_extract_epi32(xmmp2,4),simde_mm256_extract_epi32(xmmp4,4));
llr64[7] = simde_mm_set_pi32(simde_mm256_extract_epi32(xmmp4,5),simde_mm256_extract_epi32(xmmp7,4));
llr64[8] = simde_mm_set_pi32(simde_mm256_extract_epi32(xmmp7,5),simde_mm256_extract_epi32(xmmp2,5));
llr64[9] = simde_mm_set_pi32(simde_mm256_extract_epi32(xmmp2,6),simde_mm256_extract_epi32(xmmp4,6));
llr64[10] = simde_mm_set_pi32(simde_mm256_extract_epi32(xmmp4,7),simde_mm256_extract_epi32(xmmp7,6));
llr64[11] = simde_mm_set_pi32(simde_mm256_extract_epi32(xmmp7,7),simde_mm256_extract_epi32(xmmp2,7));
llr64+=12;
}
} else {
else
{
llr64[0] = simde_mm_adds_pi16(llr64[0],simde_mm_set_pi32(simde_mm256_extract_epi32(xmmp2,0),simde_mm256_extract_epi32(xmmp4,0)));
llr64[1] = simde_mm_adds_pi16(llr64[1],simde_mm_set_pi32(simde_mm256_extract_epi32(xmmp4,1),simde_mm256_extract_epi32(xmmp7,0)));
llr64[2] = simde_mm_adds_pi16(llr64[2],simde_mm_set_pi32(simde_mm256_extract_epi32(xmmp7,1),simde_mm256_extract_epi32(xmmp2,1)));
llr64[3] = simde_mm_adds_pi16(llr64[3],simde_mm_set_pi32(simde_mm256_extract_epi32(xmmp2,2),simde_mm256_extract_epi32(xmmp4,2)));
llr64[4] = simde_mm_adds_pi16(llr64[4],simde_mm_set_pi32(simde_mm256_extract_epi32(xmmp4,3),simde_mm256_extract_epi32(xmmp7,2)));
llr64[5] = simde_mm_adds_pi16(llr64[5],simde_mm_set_pi32(simde_mm256_extract_epi32(xmmp7,3),simde_mm256_extract_epi32(xmmp2,3)));
llr64[6] = simde_mm_adds_pi16(llr64[6],simde_mm_set_pi32(simde_mm256_extract_epi32(xmmp2,4),simde_mm256_extract_epi32(xmmp4,4)));
llr64[7] = simde_mm_adds_pi16(llr64[7],simde_mm_set_pi32(simde_mm256_extract_epi32(xmmp4,5),simde_mm256_extract_epi32(xmmp7,4)));
llr64[8] = simde_mm_adds_pi16(llr64[8],simde_mm_set_pi32(simde_mm256_extract_epi32(xmmp7,5),simde_mm256_extract_epi32(xmmp2,5)));
llr64[9] = simde_mm_adds_pi16(llr64[9],simde_mm_set_pi32(simde_mm256_extract_epi32(xmmp2,6),simde_mm256_extract_epi32(xmmp4,6)));
llr64[10] = simde_mm_adds_pi16(llr64[10],simde_mm_set_pi32(simde_mm256_extract_epi32(xmmp4,7),simde_mm256_extract_epi32(xmmp7,6)));
llr64[11] = simde_mm_adds_pi16(llr64[11],simde_mm_set_pi32(simde_mm256_extract_epi32(xmmp7,7),simde_mm256_extract_epi32(xmmp2,7)));
llr64+=12;
}
}
#else
register simde__m128i xmmp0, xmmp1, xmmp2, xmmp3, xmmp4, xmmp6, xmmp7;
register simde__m128i complex_shuffle128 = simde_mm_set_epi8(13, 12, 15, 14, 9, 8, 11, 10, 5, 4, 7, 6, 1, 0, 3, 2);
register simde__m128i conj128 = simde_mm_set_epi16(-1, 1, -1, 1, -1, 1, -1, 1);
// register simde__m128i conj128 = simde_mm_set_epi16(1, -1, 1, -1, 1, -1, 1, -1);
register simde__m128i QAM_amp128 = simde_mm_set1_epi16(QAM64_n1); // 4/sqrt(42)
register simde__m128i QAM_amp128b = simde_mm_set1_epi16(QAM64_n2); // 2/sqrt(42)
simde__m128i *rxF128 = (simde__m128i*) rxF;
simde__m128i *ulch128 = (simde__m128i*) ul_ch;
// need to use simde__m64 because llr output is not necessarily aligned to 256 bits, but it is always to 64 bits
simde__m64 *llr64 = (simde__m64 *)llr;
for (int i=0;i<((length>>3)+((length&7)>0?1:0));i++) {
xmmtmpD0 = simde_mm256_madd_epi16(ulch256[i],rxF256[i]);
// xmmtmpD0 contains real part of 8 consecutive outputs (32-bit) of conj(H_m[i])*R_m[i]
xmmtmpD1 = simde_mm256_shuffle_epi8(ulch256[i],complex_shuffle256);
xmmtmpD1 = simde_mm256_sign_epi16(xmmtmpD1,conj256);
xmmtmpD1 = simde_mm256_madd_epi16(xmmtmpD1,rxF256[i]);
// xmmtmpD1 contains imag part of 8 consecutive outputs (32-bit) of conj(H_m[i])*R_m[i]
xmmtmpD0 = simde_mm256_srai_epi32(xmmtmpD0,output_shift);
xmmtmpD1 = simde_mm256_srai_epi32(xmmtmpD1,output_shift);
xmmtmpD2 = simde_mm256_unpacklo_epi32(xmmtmpD0,xmmtmpD1);
xmmtmpD3 = simde_mm256_unpackhi_epi32(xmmtmpD0,xmmtmpD1);
xmmtmpD4 = simde_mm256_packs_epi32(xmmtmpD2,xmmtmpD3);
for (int i = 0; i < (length>>2); i++)
{
xmmp0 = simde_mm_sign_epi16(ulch128[i], conj128);
xmmp0 = simde_mm_madd_epi16(xmmp0, rxF128[i]);
// xmmp0 = simde_mm_madd_epi16(ulch128[i], rxF128[i]);
// xmmp0 contains real part of 4 consecutive outputs (32-bit) of H_m[i]*R_m[i]
// xmmp1 = simde_mm_sign_epi16(xmmp1, conj128);
xmmp1 = simde_mm_shuffle_epi8(ulch128[i], complex_shuffle128);
xmmp1 = simde_mm_madd_epi16(xmmp1, rxF128[i]);
// xmmp1 contains imag part of 4 consecutive outputs (32-bit) of H_m[i]*R_m[i]
xmmp0 = simde_mm_srai_epi32(xmmp0, output_shift);
xmmp1 = simde_mm_srai_epi32(xmmp1, output_shift);
xmmp2 = simde_mm_unpacklo_epi32(xmmp0, xmmp1);
xmmp3 = simde_mm_unpackhi_epi32(xmmp0, xmmp1);
xmmp4 = simde_mm_packs_epi32(xmmp2, xmmp3);
// compute channel amplitude for LLR
xmmtmpD0 = simde_mm256_madd_epi16(ulch256[i],ulch256[i]);
xmmtmpD0 = simde_mm256_srai_epi32(xmmtmpD0,output_shift);
xmmtmpD0 = simde_mm256_packs_epi32(xmmtmpD0,xmmtmpD0);
xmmtmpD2 = simde_mm256_unpacklo_epi16(xmmtmpD0,xmmtmpD0);
xmmtmpD1 = simde_mm256_mulhrs_epi16(xmmtmpD2,QAM_amp256);
xmmtmpD6 = simde_mm256_mulhrs_epi16(xmmtmpD2,QAM_amp256b);
xmmp0 = simde_mm_madd_epi16(ulch128[i], ulch128[i]);
xmmp0 = simde_mm_srai_epi32(xmmp0, output_shift);
xmmp0 = simde_mm_packs_epi32(xmmp0, xmmp0);
xmmp2 = simde_mm_unpacklo_epi16(xmmp0, xmmp0);
xmmp1 = simde_mm_mulhrs_epi16(xmmp2, QAM_amp128);
xmmp6 = simde_mm_mulhrs_epi16(xmmp2, QAM_amp128b);
xmmp2 = simde_mm_abs_epi16(xmmp4); // registers of even index in xmm0-> |y_R|, registers of odd index in xmm0-> |y_I|
xmmp2 = simde_mm_subs_epi16(xmmp1, xmmp2); // registers of even index in xmm0-> |y_R|-|h|^2, registers of odd index in xmm0-> |y_I|-|h|^2
xmmp7 = simde_mm_abs_epi16(xmmp2);
xmmp7 = simde_mm_subs_epi16(xmmp6, xmmp7);
if (aarx == 0)
{
llr64[0] = simde_mm_set_pi32(simde_mm_extract_epi32(xmmp2, 0), simde_mm_extract_epi32(xmmp4, 0));
llr64[1] = simde_mm_set_pi32(simde_mm_extract_epi32(xmmp4, 1), simde_mm_extract_epi32(xmmp7, 0));
llr64[2] = simde_mm_set_pi32(simde_mm_extract_epi32(xmmp7, 1), simde_mm_extract_epi32(xmmp2, 1));
llr64[3] = simde_mm_set_pi32(simde_mm_extract_epi32(xmmp2, 2), simde_mm_extract_epi32(xmmp4, 2));
llr64[4] = simde_mm_set_pi32(simde_mm_extract_epi32(xmmp4, 3), simde_mm_extract_epi32(xmmp7, 2));
llr64[5] = simde_mm_set_pi32(simde_mm_extract_epi32(xmmp7, 3), simde_mm_extract_epi32(xmmp2, 3));
llr64 += 6;
}
else
{
llr64[0] = simde_mm_adds_pi16(llr64[0], simde_mm_set_pi32(simde_mm_extract_epi32(xmmp2, 0),simde_mm_extract_epi32(xmmp4, 0)));
llr64[1] = simde_mm_adds_pi16(llr64[1], simde_mm_set_pi32(simde_mm_extract_epi32(xmmp4, 1),simde_mm_extract_epi32(xmmp7, 0)));
llr64[2] = simde_mm_adds_pi16(llr64[2], simde_mm_set_pi32(simde_mm_extract_epi32(xmmp7, 1),simde_mm_extract_epi32(xmmp2, 1)));
llr64[3] = simde_mm_adds_pi16(llr64[3], simde_mm_set_pi32(simde_mm_extract_epi32(xmmp2, 2),simde_mm_extract_epi32(xmmp4, 2)));
llr64[4] = simde_mm_adds_pi16(llr64[4], simde_mm_set_pi32(simde_mm_extract_epi32(xmmp4, 3),simde_mm_extract_epi32(xmmp7, 2)));
llr64[5] = simde_mm_adds_pi16(llr64[5], simde_mm_set_pi32(simde_mm_extract_epi32(xmmp7, 3),simde_mm_extract_epi32(xmmp2, 3)));
llr64 += 6;
}
}
if (length & 3)
{
int i = (length>>1) - 1;
simde__m64 xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6;
simde__m64 complex_shuffle64 = simde_mm_set_pi8(5,4,7,6,1,0,3,2);
simde__m64 conj64 = simde_mm_set_pi16(-1,1,-1,1);
simde__m64 *rxF64 = (simde__m64*)rxF;
simde__m64 *ulch64 = (simde__m64*)ul_ch;
simde__m64 QAM_amp = simde_mm_set1_pi16(QAM64_n1);
simde__m64 QAM_ampb = simde_mm_set1_pi16(QAM64_n2);
xmm0 = simde_mm_sign_pi16(ulch64[i], conj64);
xmm0 = simde_mm_madd_pi16(xmm0, rxF64[i]);
// xmm0 contains real part of 4 consecutive outputs (32-bit) of H_m[i]*R_m[i]
xmm1 = simde_mm_shuffle_pi8(ulch64[i], complex_shuffle64);
xmm1 = simde_mm_madd_pi16(xmm1, rxF64[i]);
// xmm1 contains imag part of 4 consecutive outputs (32-bit) of H_m[i]*R_m[i]
xmm0 = simde_mm_srai_pi32(xmm0, output_shift);
xmm1 = simde_mm_srai_pi32(xmm1, output_shift);
xmm2 = simde_mm_unpacklo_pi32(xmm0, xmm1);
xmm3 = simde_mm_unpackhi_pi32(xmm0, xmm1);
xmm4 = simde_mm_packs_pi32(xmm2, xmm3);
xmmtmpD2 = simde_mm256_abs_epi16(xmmtmpD4); // registers of even index in xmm0-> |y_R|, registers of odd index in xmm0-> |y_I|
xmmtmpD2 = simde_mm256_subs_epi16(xmmtmpD1,xmmtmpD2); // registers of even index in xmm0-> |y_R|-|h|^2, registers of odd index in xmm0-> |y_I|-|h|^2
xmmtmpD7 = simde_mm256_abs_epi16(xmmtmpD2);
xmmtmpD7 = simde_mm256_subs_epi16(xmmtmpD6,xmmtmpD7);
llr64[0] = simde_mm_adds_pi16(llr64[0],simde_mm_set_pi32(simde_mm256_extract_epi32(xmmtmpD2,0),simde_mm256_extract_epi32(xmmtmpD4,0)));
llr64[1] = simde_mm_adds_pi16(llr64[1],simde_mm_set_pi32(simde_mm256_extract_epi32(xmmtmpD4,1),simde_mm256_extract_epi32(xmmtmpD7,0)));
llr64[2] = simde_mm_adds_pi16(llr64[2],simde_mm_set_pi32(simde_mm256_extract_epi32(xmmtmpD7,1),simde_mm256_extract_epi32(xmmtmpD2,1)));
llr64[3] = simde_mm_adds_pi16(llr64[3],simde_mm_set_pi32(simde_mm256_extract_epi32(xmmtmpD2,2),simde_mm256_extract_epi32(xmmtmpD4,2)));
llr64[4] = simde_mm_adds_pi16(llr64[4],simde_mm_set_pi32(simde_mm256_extract_epi32(xmmtmpD4,3),simde_mm256_extract_epi32(xmmtmpD7,2)));
llr64[5] = simde_mm_adds_pi16(llr64[5],simde_mm_set_pi32(simde_mm256_extract_epi32(xmmtmpD7,3),simde_mm256_extract_epi32(xmmtmpD2,3)));
llr64[6] = simde_mm_adds_pi16(llr64[6],simde_mm_set_pi32(simde_mm256_extract_epi32(xmmtmpD2,4),simde_mm256_extract_epi32(xmmtmpD4,4)));
llr64[7] = simde_mm_adds_pi16(llr64[7],simde_mm_set_pi32(simde_mm256_extract_epi32(xmmtmpD4,5),simde_mm256_extract_epi32(xmmtmpD7,4)));
llr64[8] = simde_mm_adds_pi16(llr64[8],simde_mm_set_pi32(simde_mm256_extract_epi32(xmmtmpD7,5),simde_mm256_extract_epi32(xmmtmpD2,5)));
llr64[9] = simde_mm_adds_pi16(llr64[9],simde_mm_set_pi32(simde_mm256_extract_epi32(xmmtmpD2,6),simde_mm256_extract_epi32(xmmtmpD4,6)));
llr64[10] = simde_mm_adds_pi16(llr64[10],simde_mm_set_pi32(simde_mm256_extract_epi32(xmmtmpD4,7),simde_mm256_extract_epi32(xmmtmpD7,6)));
llr64[11] = simde_mm_adds_pi16(llr64[11],simde_mm_set_pi32(simde_mm256_extract_epi32(xmmtmpD7,7),simde_mm256_extract_epi32(xmmtmpD2,7)));
llr64+=12;
// compute channel amplitude for LLR
xmm0 = simde_mm_madd_pi16(ulch64[i], ulch64[i]); // |h|^2
xmm0 = simde_mm_srai_pi32(xmm0, output_shift);
xmm0 = simde_mm_packs_pi32(xmm0, xmm0);
xmm2 = simde_mm_unpacklo_pi16(xmm0, xmm0);
xmm1 = simde_mm_mulhrs_pi16(xmm2, QAM_amp);
xmm5 = simde_mm_mulhrs_pi16(xmm2, QAM_ampb);
xmm0 = simde_mm_abs_pi16(xmm4); // registers of even index in xmm0-> |y_R|, registers of odd index in xmm0-> |y_I|
xmm0 = simde_mm_subs_pi16(xmm1, xmm0); // registers of even index in xmm0-> |y_R|-|h|^2, registers of odd index in xmm0-> |y_I|-|h|^2
xmm1 = simde_mm_abs_pi16(xmm0);
xmm1 = simde_mm_subs_pi16(xmm5, xmm1); // contains 8 LLRs
if (aarx == 0)
{
llr64[0] = simde_mm_set_pi32(simde_mm_extract_pi16(xmm0, 0), simde_mm_extract_pi16(xmm4, 0));
llr64[1] = simde_mm_set_pi32(simde_mm_extract_pi16(xmm4, 1), simde_mm_extract_pi16(xmm1, 0));
llr64[2] = simde_mm_set_pi32(simde_mm_extract_pi16(xmm1, 1), simde_mm_extract_pi16(xmm0, 1));
}
else
{
llr64[0] = simde_mm_adds_pi16(llr64[0], simde_mm_set_pi32(simde_mm_extract_pi16(xmm0, 0),simde_mm_extract_pi16(xmm4, 0)));
llr64[1] = simde_mm_adds_pi16(llr64[1], simde_mm_set_pi32(simde_mm_extract_pi16(xmm4, 1),simde_mm_extract_pi16(xmm1, 0)));
llr64[2] = simde_mm_adds_pi16(llr64[2], simde_mm_set_pi32(simde_mm_extract_pi16(xmm1, 1),simde_mm_extract_pi16(xmm0, 1)));
}
}
#endif
}
......@@ -1882,128 +1950,200 @@ void inner_rx_64qam(int* rxF, int* ul_ch, int16_t * llr, int aarx, int length,
void inner_rx_16qam( int * rxF, int * ul_ch, int16_t * llr, int aarx, int length,int output_shift) {
register simde__m256i xmmtmpD0,xmmtmpD1,xmmtmpD2,xmmtmpD3,xmmtmpD4,xmmtmpD5;
#if !USE_128BIT
register simde__m256i xmmp0,xmmp1,xmmp2,xmmp3,xmmp4,xmmp5;
register simde__m256i complex_shuffle256 = simde_mm256_set_epi8(29,28,31,30,25,24,27,26,21,20,23,22,17,16,19,18,13,12,15,14,9,8,11,10,5,4,7,6,1,0,3,2);
register simde__m256i conj256 = simde_mm256_set_epi16(1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1);
register simde__m256i QAM_amp256 = simde_mm256_set1_epi16(QAM64_n1); // 2/sqrt(10)
register simde__m256i QAM_amp256 = simde_mm256_set1_epi16(QAM16_n1); // 2/sqrt(10)
simde__m256i *rxF256 = (simde__m256i*)rxF;
simde__m256i *ulch256 = (simde__m256i*)ul_ch;
// need to use simde__m64 because llr output is not necessarily aligned to 256 bits, but it is always to 64 bits
simde__m64 *llr64 = (simde__m64 *)llr;
if (aarx==0)
for (int i=0;i<((length>>3)+((length&7)>0?1:0));i++) {
xmmtmpD0 = simde_mm256_madd_epi16(ulch256[i],rxF256[i]);
// xmmtmpD0 contains real part of 8 consecutive outputs (32-bit) of conj(H_m[i])*R_m[i]
xmmtmpD1 = simde_mm256_shuffle_epi8(ulch256[i],complex_shuffle256);
xmmtmpD1 = simde_mm256_sign_epi16(xmmtmpD1,conj256);
xmmtmpD1 = simde_mm256_madd_epi16(xmmtmpD1,rxF256[i]);
// xmmtmpD1 contains imag part of 8 consecutive outputs (32-bit) of conj(H_m[i])*R_m[i]
xmmtmpD0 = simde_mm256_srai_epi32(xmmtmpD0,output_shift);
xmmtmpD1 = simde_mm256_srai_epi32(xmmtmpD1,output_shift);
xmmtmpD2 = simde_mm256_unpacklo_epi32(xmmtmpD0,xmmtmpD1);
xmmtmpD3 = simde_mm256_unpackhi_epi32(xmmtmpD0,xmmtmpD1);
xmmtmpD4 = simde_mm256_packs_epi32(xmmtmpD2,xmmtmpD3);
for (int i = 0; i < ((length >> 3) + ((length & 7) > 0 ? 1 : 0)); i++)
{
xmmp0 = simde_mm256_madd_epi16(ulch256[i], rxF256[i]);
// xmmp0 contains real part of 8 consecutive outputs (32-bit) of conj(H_m[i])*R_m[i]
xmmp1 = simde_mm256_shuffle_epi8(ulch256[i], complex_shuffle256);
xmmp1 = simde_mm256_sign_epi16(xmmp1, conj256);
xmmp1 = simde_mm256_madd_epi16(xmmp1, rxF256[i]);
// xmmp1 contains imag part of 8 consecutive outputs (32-bit) of conj(H_m[i])*R_m[i]
xmmp0 = simde_mm256_srai_epi32(xmmp0, output_shift);
xmmp1 = simde_mm256_srai_epi32(xmmp1, output_shift);
xmmp2 = simde_mm256_unpacklo_epi32(xmmp0, xmmp1);
xmmp3 = simde_mm256_unpackhi_epi32(xmmp0, xmmp1);
xmmp4 = simde_mm256_packs_epi32(xmmp2, xmmp3);
// compute channel amplitude for LLR
xmmtmpD0 = simde_mm256_madd_epi16(ulch256[i],ulch256[i]);
xmmtmpD0 = simde_mm256_srai_epi32(xmmtmpD0,output_shift);
xmmtmpD0 = simde_mm256_packs_epi32(xmmtmpD0,xmmtmpD0);
xmmtmpD1 = simde_mm256_unpacklo_epi16(xmmtmpD0,xmmtmpD0);
xmmtmpD1 = simde_mm256_mulhrs_epi16(xmmtmpD1,QAM_amp256);
xmmtmpD2 = simde_mm256_abs_epi16(xmmtmpD4); // registers of even index in xmm0-> |y_R|, registers of odd index in xmm0-> |y_I|
xmmtmpD2 = simde_mm256_subs_epi16(xmmtmpD1,xmmtmpD2); // registers of even index in xmm0-> |y_R|-|h|^2, registers of odd index in xmm0-> |y_I|-|h|^2
xmmtmpD3 = simde_mm256_unpacklo_epi32(xmmtmpD4,xmmtmpD2); // llr128[0] contains the llrs of the 1st,2nd,5th and 6th REs
xmmtmpD5 = simde_mm256_unpackhi_epi32(xmmtmpD4,xmmtmpD2); // llr128[1] contains the llrs of the 3rd, 4th, 7th and 8th REs
xmmp0 = simde_mm256_madd_epi16(ulch256[i], ulch256[i]);
xmmp0 = simde_mm256_srai_epi32(xmmp0, output_shift);
xmmp0 = simde_mm256_packs_epi32(xmmp0, xmmp0);
xmmp1 = simde_mm256_unpacklo_epi16(xmmp0, xmmp0);
xmmp1 = simde_mm256_mulhrs_epi16(xmmp1, QAM_amp256);
xmmp2 = simde_mm256_abs_epi16(xmmp4); // registers of even index in xmm0-> |y_R|, registers of odd index in xmm0-> |y_I|
xmmp2 = simde_mm256_subs_epi16(xmmp1,xmmp2); // registers of even index in xmm0-> |y_R|-|h|^2, registers of odd index in xmm0-> |y_I|-|h|^2
xmmp3 = simde_mm256_unpacklo_epi32(xmmp4,xmmp2); // llr128[0] contains the llrs of the 1st,2nd,5th and 6th REs
xmmp5 = simde_mm256_unpackhi_epi32(xmmp4,xmmp2); // llr128[1] contains the llrs of the 3rd, 4th, 7th and 8th REs
if (aarx == 0)
{
// 1st/2nd RE
llr64[0] = (simde__m64)simde_mm256_extract_epi64(xmmtmpD3,0); // llr32[0] low 16 bits-> y_R , high 16 bits-> y_I
llr64[0] = (simde__m64)simde_mm256_extract_epi64(xmmp3,0); // llr32[0] low 16 bits-> y_R , high 16 bits-> y_I
// 3rd/4th RE
llr64[1] = (simde__m64)simde_mm256_extract_epi64(xmmtmpD3,1); // llr32[2] low 16 bits-> y_R , high 16 bits-> y_I
// 4th/5th RE
llr64[2] = (simde__m64)simde_mm256_extract_epi64(xmmtmpD5,0); // llr32[4] low 16 bits-> y_R , high 16 bits-> y_I
// 6th/7th RE
llr64[3] = (simde__m64)simde_mm256_extract_epi64(xmmtmpD5,1); // llr32[6] low 16 bits-> y_R , high 16 bits-> y_I
// 8th/9th RE
llr64[4] = (simde__m64)simde_mm256_extract_epi64(xmmtmpD3,2); // llr32[8] low 16 bits-> y_R , high 16 bits-> y_I
// 10th/11th RE
llr64[5] = (simde__m64)simde_mm256_extract_epi64(xmmtmpD3,3); // llr32[10] low 16 bits-> y_R , high 16 bits-> y_I
// 12th/13th RE
llr64[6] = (simde__m64)simde_mm256_extract_epi64(xmmtmpD5,2); // llr32[12] low 16 bits-> y_R , high 16 bits-> y_I
// 14th/15th RE
llr64[7] = (simde__m64)simde_mm256_extract_epi64(xmmtmpD5,3); // llr32[14] low 16 bits-> y_R , high 16 bits-> y_I
llr64[1] = (simde__m64)simde_mm256_extract_epi64(xmmp3,1); // llr32[2] low 16 bits-> y_R , high 16 bits-> y_I
// 5th/6th RE
llr64[2] = (simde__m64)simde_mm256_extract_epi64(xmmp5,0); // llr32[4] low 16 bits-> y_R , high 16 bits-> y_I
// 7Rh/8th RE
llr64[3] = (simde__m64)simde_mm256_extract_epi64(xmmp5,1); // llr32[6] low 16 bits-> y_R , high 16 bits-> y_I
// 9th/10th RE
llr64[4] = (simde__m64)simde_mm256_extract_epi64(xmmp3,2); // llr32[8] low 16 bits-> y_R , high 16 bits-> y_I
// 11th/12th RE
llr64[5] = (simde__m64)simde_mm256_extract_epi64(xmmp3,3); // llr32[10] low 16 bits-> y_R , high 16 bits-> y_I
// 13th/14th RE
llr64[6] = (simde__m64)simde_mm256_extract_epi64(xmmp5,2); // llr32[12] low 16 bits-> y_R , high 16 bits-> y_I
// 15th/16th RE
llr64[7] = (simde__m64)simde_mm256_extract_epi64(xmmp5,3); // llr32[14] low 16 bits-> y_R , high 16 bits-> y_I
llr64+=8;
}
else
for (int i=0;i<((length>>3)+((length&7)>0?1:0));i++) {
xmmtmpD0 = simde_mm256_madd_epi16(ulch256[i],rxF256[i]);
// xmmtmpD0 contains real part of 8 consecutive outputs (32-bit) of conj(H_m[i])*R_m[i]
xmmtmpD1 = simde_mm256_shuffle_epi8(ulch256[i],complex_shuffle256);
xmmtmpD1 = simde_mm256_sign_epi16(xmmtmpD1,conj256);
xmmtmpD1 = simde_mm256_madd_epi16(xmmtmpD1,rxF256[i]);
// xmmtmpD1 contains imag part of 8 consecutive outputs (32-bit) of conj(H_m[i])*R_m[i]
xmmtmpD0 = simde_mm256_srai_epi32(xmmtmpD0,output_shift);
xmmtmpD1 = simde_mm256_srai_epi32(xmmtmpD1,output_shift);
xmmtmpD2 = simde_mm256_unpacklo_epi32(xmmtmpD0,xmmtmpD1);
xmmtmpD3 = simde_mm256_unpackhi_epi32(xmmtmpD0,xmmtmpD1);
xmmtmpD4 = simde_mm256_packs_epi32(xmmtmpD2,xmmtmpD3);
{
llr64[0] = simde_mm_adds_pi16(llr64[0],(simde__m64)simde_mm256_extract_epi64(xmmp3,0));
llr64[1] = simde_mm_adds_pi16(llr64[1],(simde__m64)simde_mm256_extract_epi64(xmmp3,1));
llr64[2] = simde_mm_adds_pi16(llr64[2],(simde__m64)simde_mm256_extract_epi64(xmmp5,0));
llr64[3] = simde_mm_adds_pi16(llr64[3],(simde__m64)simde_mm256_extract_epi64(xmmp5,1));
llr64[4] = simde_mm_adds_pi16(llr64[4],(simde__m64)simde_mm256_extract_epi64(xmmp3,2));
llr64[5] = simde_mm_adds_pi16(llr64[5],(simde__m64)simde_mm256_extract_epi64(xmmp3,3));
llr64[6] = simde_mm_adds_pi16(llr64[6],(simde__m64)simde_mm256_extract_epi64(xmmp5,2));
llr64[7] = simde_mm_adds_pi16(llr64[7],(simde__m64)simde_mm256_extract_epi64(xmmp5,3));
llr64 += 8;
}
}
#else
register simde__m128i xmmp0, xmmp1, xmmp2, xmmp3, xmmp4, xmmp5;
register simde__m128i complex_shuffle128 = simde_mm_set_epi8(13, 12, 15, 14, 9, 8, 11, 10, 5, 4, 7, 6, 1, 0, 3, 2);
register simde__m128i conj128 = simde_mm_set_epi16(-1, 1, -1, 1, -1, 1, -1, 1);
xmmtmpD0 = simde_mm256_madd_epi16(ulch256[i],ulch256[i]);
xmmtmpD0 = simde_mm256_srai_epi32(xmmtmpD0,output_shift);
xmmtmpD0 = simde_mm256_packs_epi32(xmmtmpD0,xmmtmpD0);
xmmtmpD1 = simde_mm256_unpacklo_epi16(xmmtmpD0,xmmtmpD0);
xmmtmpD1 = simde_mm256_mulhrs_epi16(xmmtmpD1,QAM_amp256);
register simde__m128i QAM_amp128 = simde_mm_set1_epi16(QAM16_n1); // 2/sqrt(10)
simde__m128i *rxF128 = (simde__m128i*)rxF;
simde__m128i *ulch128 = (simde__m128i*)ul_ch;
// need to use simde__m64 because llr output is not necessarily aligned to 256 bits, but it is always to 64 bits
simde__m64 *llr64 = (simde__m64 *)llr;
xmmtmpD2 = simde_mm256_abs_epi16(xmmtmpD4); // registers of even index in xmm0-> |y_R|, registers of odd index in xmm0-> |y_I|
xmmtmpD2 = simde_mm256_subs_epi16(xmmtmpD1,xmmtmpD2); // registers of even index in xmm0-> |y_R|-|h|^2, registers of odd index in xmm0-> |y_I|-|h|^2
for (int i = 0; i < (length >> 2); i++)
{
xmmp0 = simde_mm_sign_epi16(ulch128[i], conj128);
xmmp0 = simde_mm_madd_epi16(xmmp0, rxF128[i]);
// xmmp0 contains real part of 4 consecutive outputs (32-bit) of H_m[i]*R_m[i]
xmmp1 = simde_mm_shuffle_epi8(ulch128[i], complex_shuffle128);
xmmp1 = simde_mm_madd_epi16(xmmp1, rxF128[i]);
// xmmp1 contains imag part of 4 consecutive outputs (32-bit) of H_m[i]*R_m[i]
xmmp0 = simde_mm_srai_epi32(xmmp0, output_shift);
xmmp1 = simde_mm_srai_epi32(xmmp1, output_shift);
xmmp2 = simde_mm_unpacklo_epi32(xmmp0, xmmp1);
xmmp3 = simde_mm_unpackhi_epi32(xmmp0, xmmp1);
xmmp4 = simde_mm_packs_epi32(xmmp2, xmmp3);
xmmtmpD3 = simde_mm256_unpacklo_epi32(xmmtmpD4,xmmtmpD2); // llr128[0] contains the llrs of the 1st,2nd,5th and 6th REs
xmmtmpD5 = simde_mm256_unpackhi_epi32(xmmtmpD4,xmmtmpD2); // llr128[1] contains the llrs of the 3rd, 4th, 7th and 8th REs
// 1st/2nd RE
llr64[0] = simde_mm_adds_pi16(llr64[0],(simde__m64)simde_mm256_extract_epi64(xmmtmpD3,0));
// 3rd/4th RE
llr64[1] = simde_mm_adds_pi16(llr64[1],(simde__m64)simde_mm256_extract_epi64(xmmtmpD3,1));
// 4th/5th RE
llr64[2] = simde_mm_adds_pi16(llr64[2],(simde__m64)simde_mm256_extract_epi64(xmmtmpD5,0));
// 6th/7th RE
llr64[3] = simde_mm_adds_pi16(llr64[3],(simde__m64)simde_mm256_extract_epi64(xmmtmpD5,1));
// 8th/9th RE
llr64[4] = simde_mm_adds_pi16(llr64[4],(simde__m64)simde_mm256_extract_epi64(xmmtmpD3,2));
// 10th/11th RE
llr64[5] = simde_mm_adds_pi16(llr64[5],(simde__m64)simde_mm256_extract_epi64(xmmtmpD3,3));
// 12th/13th RE
llr64[6] = simde_mm_adds_pi16(llr64[6],(simde__m64)simde_mm256_extract_epi64(xmmtmpD5,2));
// 14th/15th RE
llr64[7] = simde_mm_adds_pi16(llr64[7],(simde__m64)simde_mm256_extract_epi64(xmmtmpD5,3));
llr64+=8;
// compute channel amplitude for LLR
xmmp0 = simde_mm_madd_epi16(ulch128[i], ulch128[i]); // |h|^2
xmmp0 = simde_mm_srai_epi32(xmmp0, output_shift);
xmmp0 = simde_mm_packs_epi32(xmmp0, xmmp0);
xmmp1 = simde_mm_unpacklo_epi16(xmmp0, xmmp0);
xmmp1 = simde_mm_mulhrs_epi16(xmmp1, QAM_amp128);
xmmp2 = simde_mm_abs_epi16(xmmp4); // registers of even index in xmm0-> |y_R|, registers of odd index in xmm0-> |y_I|
xmmp2 = simde_mm_subs_epi16(xmmp1, xmmp2); // registers of even index in xmm0-> |y_R|-|h|^2, registers of odd index in xmm0-> |y_I|-|h|^2
xmmp3 = simde_mm_unpacklo_epi32(xmmp4, xmmp2); // llr128[0] contains the llrs of the 1st,2nd,5th and 6th REs
xmmp5 = simde_mm_unpackhi_epi32(xmmp4, xmmp2); // llr128[1] contains the llrs of the 3rd, 4th, 7th and 8th REs
if (aarx == 0)
{
llr64[0] = (simde__m64)simde_mm_extract_epi64(xmmp3, 0); // llr32[0] low 16 bits-> y_R, high 16 bits-> y_I
llr64[1] = (simde__m64)simde_mm_extract_epi64(xmmp3, 1); // llr32[2] low 16 bits-> y_R, high 16 bits-> y_I
llr64[2] = (simde__m64)simde_mm_extract_epi64(xmmp5, 0); // llr32[4] low 16 bits-> y_R, high 16 bits-> y_I
llr64[3] = (simde__m64)simde_mm_extract_epi64(xmmp5, 1); // llr32[6] low 16 bits-> y_R, high 16 bits-> y_I
}
}
else
{
llr64[0] = simde_mm_adds_pi16(llr64[0], (simde__m64)simde_mm_extract_epi64(xmmp3, 0));
llr64[1] = simde_mm_adds_pi16(llr64[1], (simde__m64)simde_mm_extract_epi64(xmmp3, 1));
llr64[2] = simde_mm_adds_pi16(llr64[2], (simde__m64)simde_mm_extract_epi64(xmmp5, 0));
llr64[3] = simde_mm_adds_pi16(llr64[3], (simde__m64)simde_mm_extract_epi64(xmmp5, 1));
}
llr64 += 4;
}
if (length & 3)
{
int i = (length>>1) - 1;
simde__m64 xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6;
simde__m64 complex_shuffle64 = simde_mm_set_pi8(5,4,7,6,1,0,3,2);
simde__m64 conj64 = simde_mm_set_pi16(-1,1,-1,1);
simde__m64 *rxF64 = (simde__m64*)rxF;
simde__m64 *ulch64 = (simde__m64*)ul_ch;
simde__m64 QAM_amp = simde_mm_set1_pi16(QAM16_n1);
xmm0 = simde_mm_sign_pi16(ulch64[i], conj64);
xmm0 = simde_mm_madd_pi16(xmm0, rxF64[i]);
// xmm0 contains real part of 4 consecutive outputs (32-bit) of H_m[i]*R_m[i]
xmm1 = simde_mm_shuffle_pi8(ulch64[i], complex_shuffle64);
xmm1 = simde_mm_madd_pi16(xmm1, rxF64[i]);
// xmm1 contains imag part of 4 consecutive outputs (32-bit) of H_m[i]*R_m[i]
xmm0 = simde_mm_srai_pi32(xmm0, output_shift);
xmm1 = simde_mm_srai_pi32(xmm1, output_shift);
xmm2 = simde_mm_unpacklo_pi32(xmm0, xmm1);
xmm3 = simde_mm_unpackhi_pi32(xmm0, xmm1);
xmm4 = simde_mm_packs_pi32(xmm2, xmm3);
// compute channel amplitude for LLR
xmm0 = simde_mm_madd_pi16(ulch64[i], ulch64[i]); // |h|^2
xmm0 = simde_mm_srai_pi32(xmm0, output_shift);
xmm0 = simde_mm_packs_pi32(xmm0, xmm0);
xmm2 = simde_mm_unpacklo_pi16(xmm0, xmm0);
xmm1 = simde_mm_mulhrs_pi16(xmm2, QAM_amp);
xmm0 = simde_mm_abs_pi16(xmm4); // registers of even index in xmm0-> |y_R|, registers of odd index in xmm0-> |y_I|
xmm0 = simde_mm_subs_pi16(xmm1, xmm0); // registers of even index in xmm0-> |y_R|-|h|^2, registers of odd index in xmm0-> |y_I|-|h|^2
if (aarx == 0)
{
llr64[0] = simde_mm_set_pi32(simde_mm_extract_pi16(xmm0, 0), simde_mm_extract_pi16(xmm4, 0));
llr64[1] = simde_mm_set_pi32(simde_mm_extract_pi16(xmm0, 1), simde_mm_extract_pi16(xmm4, 1));
}
else
{
llr64[0] = simde_mm_adds_pi16(llr64[0], simde_mm_set_pi32(simde_mm_extract_pi16(xmm0, 0),simde_mm_extract_pi16(xmm4, 0)));
llr64[1] = simde_mm_adds_pi16(llr64[1], simde_mm_set_pi32(simde_mm_extract_pi16(xmm4, 1),simde_mm_extract_pi16(xmm1, 0)));
}
}
#endif
}
void nr_pusch_symbol_processing_noprecoding(void *arg)
{
// int64_t now = time_now_us();
// printf("Into nr_pusch_symbol_processing_noprecoding %ld \n ", now);
// int64_t now = time_now_us();
// printf("Into nr_pusch_symbol_processing_noprecoding %ld \n ", now);
puschSymbolProc_t *rdata=(puschSymbolProc_t*)arg;
PHY_VARS_gNB *gNB=rdata->gNB;
NR_DL_FRAME_PARMS *frame_parms=rdata->frame_parms;
nfapi_nr_pusch_pdu_t *rel15_ul=rdata->rel15_ul;
int ulsch_id=rdata->ulsch_id;
int slot=rdata->slot;
int16_t *llr=rdata->llr;
int16_t *s=rdata->s;
for (int symbol = rdata->startSymbol;symbol < rdata->startSymbol+rdata->numSymbols;symbol++) {
PHY_VARS_gNB *gNB = rdata->gNB;
NR_DL_FRAME_PARMS *frame_parms = rdata->frame_parms;
nfapi_nr_pusch_pdu_t *rel15_ul = rdata->rel15_ul;
int ulsch_id = rdata->ulsch_id;
int slot = rdata->slot;
int16_t *llr = rdata->llr;
int16_t *s = rdata->s;
for (int symbol = rdata->startSymbol; symbol < rdata->startSymbol+rdata->numSymbols; symbol++)
{
int dmrs_symbol_flag = (rel15_ul->ul_dmrs_symb_pos >> symbol) & 0x01;
// printf("symbol: %d, dmrs_symbol_flag: %d\n", symbol, dmrs_symbol_flag);
int nb_re_pusch = gNB->pusch_vars[ulsch_id].ul_valid_re_per_slot[symbol];
// this needs to be reworded for parrellization, we need a table which give dmrs symbol location
// used for chennel estimate, they are being run in parallel!
if (dmrs_symbol_flag == 1) {
if (dmrs_symbol_flag == 1)
{
if ((rel15_ul->ul_dmrs_symb_pos >> ((symbol + 1) % frame_parms->symbols_per_slot)) & 0x01)
AssertFatal(1==0,"Double DMRS configuration is not yet supported\n");
......@@ -2014,9 +2154,7 @@ void nr_pusch_symbol_processing_noprecoding(void *arg)
if (nb_re_pusch == 0) continue;
void (*inner_rx)(int *,int *,int16_t *,int,int,int);
if (rel15_ul->qam_mod_order == 2) inner_rx = inner_rx_qpsk;
else if (rel15_ul->qam_mod_order == 4) inner_rx = inner_rx_16qam;
else if (rel15_ul->qam_mod_order == 6) inner_rx = inner_rx_64qam;
......@@ -2039,7 +2177,8 @@ void nr_pusch_symbol_processing_noprecoding(void *arg)
dmrs_symbol_flag,
rel15_ul,
frame_parms);
inner_rx(rxFext,chFext,llr16,aa,nb_re_pusch,gNB->pusch_vars[ulsch_id].log2_maxh);
// demodulation
inner_rx(rxFext, chFext, llr16, aa, nb_re_pusch, gNB->pusch_vars[ulsch_id].log2_maxh);
}
// unscrambling
simde__m64 *llr64 = (simde__m64 *) llr;
......@@ -2050,8 +2189,8 @@ void nr_pusch_symbol_processing_noprecoding(void *arg)
llr+=(nb_re_pusch*rel15_ul->qam_mod_order);
}
// int64_t end = time_now_us();
// printf("Elapsed time = %ld tstamp %ld id %lu \n", end - now, end, pthread_self());
// int64_t end = time_now_us();
// printf("Elapsed time = %ld tstamp %ld id %lu \n", end - now, end, pthread_self());
}
/*
......@@ -2918,15 +3057,16 @@ int nr_rx_pusch(PHY_VARS_gNB *gNB,
int max_ch = 0;
for(uint8_t symbol = rel15_ul->start_symbol_index; symbol < (rel15_ul->start_symbol_index + rel15_ul->nr_of_symbols); symbol++) {
for (uint8_t symbol = rel15_ul->start_symbol_index; symbol < (rel15_ul->start_symbol_index + rel15_ul->nr_of_symbols); symbol++)
{
uint8_t dmrs_symbol_flag = (rel15_ul->ul_dmrs_symb_pos >> symbol) & 0x01;
LOG_D(PHY, "symbol %d, dmrs_symbol_flag :%d\n", symbol, dmrs_symbol_flag);
if (dmrs_symbol_flag == 1) {
if (pusch_vars->dmrs_symbol == INVALID_VALUE)
pusch_vars->dmrs_symbol = symbol;
for (int nl=0; nl<rel15_ul->nrOfLayers; nl++) {
for (int nl=0; nl<rel15_ul->nrOfLayers; nl++)
{
nr_pusch_channel_estimation(gNB,
slot,
get_dmrs_port(nl,rel15_ul->dmrs_ports),
......@@ -2939,8 +3079,8 @@ int nr_rx_pusch(PHY_VARS_gNB *gNB,
nr_gnb_measurements(gNB, ulsch, pusch_vars, symbol,rel15_ul->nrOfLayers);
for (aarx = 0; aarx < frame_parms->nb_antennas_rx; aarx++) {
for (aarx = 0; aarx < frame_parms->nb_antennas_rx; aarx++)
{
if (symbol == rel15_ul->start_symbol_index) {
pusch_vars->ulsch_power[aarx] = 0;
pusch_vars->ulsch_noise_power[aarx] = 0;
......@@ -3164,8 +3304,6 @@ int nr_rx_pusch_tp(PHY_VARS_gNB *gNB,
NR_DL_FRAME_PARMS *frame_parms = &gNB->frame_parms;
nfapi_nr_pusch_pdu_t *rel15_ul = &gNB->ulsch[ulsch_id].harq_process->ulsch_pdu;
NR_gNB_PUSCH *pusch_vars = &gNB->pusch_vars[ulsch_id];
pusch_vars->dmrs_symbol = INVALID_VALUE;
pusch_vars->cl_done = 0;
......@@ -3180,8 +3318,8 @@ int nr_rx_pusch_tp(PHY_VARS_gNB *gNB,
//----------------------------------------------------------
start_meas(&gNB->ulsch_channel_estimation_stats);
int max_ch = 0;
for(uint8_t symbol = rel15_ul->start_symbol_index; symbol < (rel15_ul->start_symbol_index + rel15_ul->nr_of_symbols); symbol++) {
for(uint8_t symbol = rel15_ul->start_symbol_index; symbol < (rel15_ul->start_symbol_index + rel15_ul->nr_of_symbols); symbol++)
{
uint8_t dmrs_symbol_flag = (rel15_ul->ul_dmrs_symb_pos >> symbol) & 0x01;
LOG_D(PHY, "symbol %d, dmrs_symbol_flag :%d\n", symbol, dmrs_symbol_flag);
if (dmrs_symbol_flag == 1) {
......@@ -3198,10 +3336,8 @@ int nr_rx_pusch_tp(PHY_VARS_gNB *gNB,
bwp_start_subcarrier,
rel15_ul,
&max_ch);
// measure the SNR from the channel estimation
nr_gnb_measurements(gNB, &gNB->ulsch[ulsch_id], pusch_vars, symbol, rel15_ul->nrOfLayers);
for (aarx = 0; aarx < frame_parms->nb_antennas_rx; aarx++) {
if (symbol == rel15_ul->start_symbol_index) {
pusch_vars->ulsch_power[aarx] = 0;
......@@ -3211,12 +3347,10 @@ int nr_rx_pusch_tp(PHY_VARS_gNB *gNB,
&pusch_vars->ul_ch_estimates[aarx][symbol * frame_parms->ofdm_symbol_size],
rel15_ul->rb_size * 12);
for (int rb = 0; rb < rel15_ul->rb_size; rb++) {
pusch_vars->ulsch_noise_power[aarx] +=
gNB->measurements.n0_subband_power[aarx][rel15_ul->bwp_start + rel15_ul->rb_start + rb] / rel15_ul->rb_size;
pusch_vars->ulsch_noise_power[aarx] += gNB->measurements.n0_subband_power[aarx][rel15_ul->bwp_start + rel15_ul->rb_start + rb] / rel15_ul->rb_size;
}
}
}
}
if (gNB->chest_time == 1) { // averaging time domain channel estimates
......@@ -3239,25 +3373,24 @@ int nr_rx_pusch_tp(PHY_VARS_gNB *gNB,
int number_dmrs_symbols=0;
for (int l = rel15_ul->start_symbol_index; l < rel15_ul->start_symbol_index + rel15_ul->nr_of_symbols; l++)
number_dmrs_symbols += ((rel15_ul->ul_dmrs_symb_pos)>>l)&0x01;
int nb_re_dmrs;
if (rel15_ul->dmrs_config_type==pusch_dmrs_type1)
nb_re_dmrs = 6*rel15_ul->num_dmrs_cdm_grps_no_data;
else
nb_re_dmrs = 4*rel15_ul->num_dmrs_cdm_grps_no_data;
// get how many bit in a slot //
int G = nr_get_G(rel15_ul->rb_size,
rel15_ul->nr_of_symbols,
nb_re_dmrs,
number_dmrs_symbols, // number of dmrs symbols irrespective of single or double symbol dmrs
rel15_ul->qam_mod_order,
rel15_ul->nrOfLayers);
// initialize scrambling sequence
// initialize scrambling sequence //
int16_t s[G+96] __attribute__((aligned(32)));
nr_codeword_unscrambling_init(s,G,0,rel15_ul->data_scrambling_id,rel15_ul->rnti);
void (*nr_pusch_symbol_processing_ptr)(void*) = &nr_pusch_symbol_processing_noprecoding;
// void (*nr_pusch_symbol_processing_ptr)(void*) = &nr_pusch_symbol_processing;
// void (*nr_pusch_symbol_processing_ptr)(void*) = &nr_pusch_symbol_processing;
// first the computation of channel levels
......@@ -3266,12 +3399,16 @@ int nr_rx_pusch_tp(PHY_VARS_gNB *gNB,
for(meas_symbol = rel15_ul->start_symbol_index;
meas_symbol < (rel15_ul->start_symbol_index + rel15_ul->nr_of_symbols);
meas_symbol++)
if ((nb_re_pusch = get_nb_re_pusch(frame_parms,rel15_ul,meas_symbol)) > 0) break;
if ((nb_re_pusch = get_nb_re_pusch(frame_parms,rel15_ul,meas_symbol)) > 0) {
break;
}
AssertFatal(nb_re_pusch>0 && meas_symbol>=0,"nb_re_pusch %d cannot be 0 or meas_symbol %d cannot be negative here\n",nb_re_pusch,meas_symbol);
start_meas(&gNB->ulsch_rbs_extraction_stats);
//* only for the dmrs symbol *//
// extract the data in the OFDM frame, to the start of the array
nr_ulsch_extract_rbs(gNB->common_vars.rxdataF,
pusch_vars,
slot,
......@@ -3287,8 +3424,8 @@ int nr_rx_pusch_tp(PHY_VARS_gNB *gNB,
nr_ulsch_channel_level(pusch_vars->ul_ch_estimates_ext,
frame_parms,
avg,
meas_symbol,
nb_re_pusch,
meas_symbol, // index of the start symbol
nb_re_pusch, // number of the re in pusch
rel15_ul->nrOfLayers,
rel15_ul->rb_size);
......@@ -3304,8 +3441,8 @@ int nr_rx_pusch_tp(PHY_VARS_gNB *gNB,
stop_meas(&gNB->rx_pusch_init_stats);
// int64_t start = time_now_us();
// printf("Tasks started %ld \n", start );
// int64_t start = time_now_us();
// printf("Tasks started %ld \n", start );
start_meas(&gNB->rx_pusch_symbol_processing_stats);
int numSymbols=gNB->num_pusch_symbols_per_thread;
......@@ -3330,16 +3467,19 @@ int nr_rx_pusch_tp(PHY_VARS_gNB *gNB,
for(uint8_t symbol = rel15_ul->start_symbol_index;
symbol < (rel15_ul->start_symbol_index + rel15_ul->nr_of_symbols);
symbol+=numSymbols) {
symbol+=numSymbols)
{
int total_res=0;
for (int s = 0; s<numSymbols;s++) {
for (int s = 0; s<numSymbols;s++)
{
pusch_vars->ul_valid_re_per_slot[symbol+s] = get_nb_re_pusch(frame_parms,rel15_ul,symbol+s);
pusch_vars->llr_offset[symbol+s] = ((symbol+s)==rel15_ul->start_symbol_index) ?
0 :
pusch_vars->llr_offset[symbol+s-1] + pusch_vars->ul_valid_re_per_slot[symbol+s-1] * rel15_ul->qam_mod_order;
total_res+=pusch_vars->ul_valid_re_per_slot[symbol+s];
}
if (total_res > 0) {
if (total_res > 0)
{
#ifdef TASK_MANAGER
puschSymbolProc_t *rdata = &arr[idx_arr];
idx_arr++;
......@@ -3349,8 +3489,8 @@ int nr_rx_pusch_tp(PHY_VARS_gNB *gNB,
#else
union puschSymbolReqUnion id = {.s={ulsch_id,frame,slot,0}};
id.p=1+symbol;
notifiedFIFO_elt_t *req=newNotifiedFIFO_elt(sizeof(puschSymbolProc_t),id.p,gNB->respPuschSymb,nr_pusch_symbol_processing_ptr);
puschSymbolProc_t *rdata=(puschSymbolProc_t*)NotifiedFifoData(req);
notifiedFIFO_elt_t *req = newNotifiedFIFO_elt(sizeof(puschSymbolProc_t), id.p, gNB->respPuschSymb, nr_pusch_symbol_processing_ptr); // create a job for Tpool
puschSymbolProc_t *rdata = (puschSymbolProc_t*)NotifiedFifoData(req); // data for the job
#endif
rdata->gNB = gNB;
rdata->frame_parms=frame_parms;
......@@ -3382,7 +3522,7 @@ nr_pusch_symbol_processing_noprecoding(rdata);
#endif
// printf("Waiting %ld \n", time_now_us());
// printf("Waiting %ld \n", time_now_us());
#ifdef TASK_MANAGER
......@@ -3398,8 +3538,8 @@ nr_pusch_symbol_processing_noprecoding(rdata);
}
#endif
// int64_t const finish = time_now_us();
// printf("Tasks finished %ld delay %ld \n", finish , finish-start );
// int64_t const finish = time_now_us();
// printf("Tasks finished %ld delay %ld \n", finish , finish-start );
stop_meas(&gNB->rx_pusch_symbol_processing_stats);
return 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment